rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
}
+int
+iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ volatile union iavf_rx_desc *rxdp;
+ uint16_t desc;
+
+ desc = rxq->rx_tail;
+ rxdp = &rxq->rx_ring[desc];
+ /* watch for changes in status bit */
+ pmc->addr = &rxdp->wb.qword1.status_error_len;
+
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ pmc->val = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+ pmc->mask = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+
+ /* registers are 64-bit */
+ pmc->size = sizeof(uint64_t);
+
+ return 0;
+}
+
static inline int
check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
{
static inline bool
check_tx_vec_allow(struct iavf_tx_queue *txq)
{
- if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
+ if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
uint8_t proto_xtr;
uint16_t len;
uint16_t rx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
nb_desc > IAVF_MAX_RING_DESC ||
nb_desc < IAVF_MIN_RING_DESC) {
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->rx_hdr_len = 0;
rxq->vsi = vsi;
+ rxq->offloads = offloads;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
rxq->port_id, rxq->queue_id, rx_id, nb_hold);
rx_id = (uint16_t)((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
rxd = *rxdp;
nb_hold++;
rxe = rxq->sw_ring[rx_id];
+ rxq->sw_ring[rx_id] = nmb;
rx_id++;
if (unlikely(rx_id == rxq->nb_rx_desc))
rx_id = 0;
rxd = *rxdp;
nb_hold++;
rxe = rxq->sw_ring[rx_id];
+ rxq->sw_ring[rx_id] = nmb;
rx_id++;
if (unlikely(rx_id == rxq->nb_rx_desc))
rx_id = 0;
rxd = *rxdp;
nb_hold++;
rxe = rxq->sw_ring[rx_id];
+ rxq->sw_ring[rx_id] = nmb;
rx_id++;
if (rx_id == rxq->nb_rx_desc)
rx_id = 0;
rxd = *rxdp;
nb_hold++;
rxe = rxq->sw_ring[rx_id];
+ rxq->sw_ring[rx_id] = nmb;
rx_id++;
if (rx_id == rxq->nb_rx_desc)
rx_id = 0;
/* Update rx tail register */
rte_wmb();
- IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+ IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
- PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
- "(port=%d queue=%d)", desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
return -1;
}
(volatile struct iavf_tx_context_desc *)
&txr[tx_id];
+ /* clear QW0 or the previous writeback value
+ * may impact next write
+ */
+ *(volatile uint64_t *)ctx_txd = 0;
+
txn = &sw_ring[txe->next_id];
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
if (txe->mbuf) {
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
txq->port_id, txq->queue_id, tx_id, nb_tx);
- IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
#ifdef RTE_ARCH_X86
struct iavf_rx_queue *rxq;
int i;
+ int check_ret;
bool use_avx2 = false;
-#ifdef CC_AVX512_SUPPORT
bool use_avx512 = false;
-#endif
-
- if (!iavf_rx_vec_dev_check(dev) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)iavf_rxq_vec_setup(rxq);
- }
+ bool use_flex = false;
+ check_ret = iavf_rx_vec_dev_check(dev);
+ if (check_ret >= 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
use_avx2 = true;
+
#ifdef CC_AVX512_SUPPORT
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
use_avx512 = true;
#endif
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ use_flex = true;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ (void)iavf_rxq_vec_setup(rxq);
+ }
+
if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG,
- "Using %sVector Scattered Rx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (!use_avx512) {
+ PMD_DRV_LOG(DEBUG,
+ "Using %sVector Scattered Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ } else {
+ if (check_ret == IAVF_VECTOR_PATH)
+ PMD_DRV_LOG(DEBUG,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ else
+ PMD_DRV_LOG(DEBUG,
+ "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ }
+ if (use_flex) {
dev->rx_pkt_burst = use_avx2 ?
iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
iavf_recv_scattered_pkts_vec_flex_rxd;
#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- dev->rx_pkt_burst =
- iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+ if (use_avx512) {
+ if (check_ret == IAVF_VECTOR_PATH)
+ dev->rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+ else
+ dev->rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+ }
#endif
} else {
dev->rx_pkt_burst = use_avx2 ?
iavf_recv_scattered_pkts_vec_avx2 :
iavf_recv_scattered_pkts_vec;
#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- dev->rx_pkt_burst =
- iavf_recv_scattered_pkts_vec_avx512;
+ if (use_avx512) {
+ if (check_ret == IAVF_VECTOR_PATH)
+ dev->rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512;
+ else
+ dev->rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_offload;
+ }
#endif
}
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (!use_avx512) {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ } else {
+ if (check_ret == IAVF_VECTOR_PATH)
+ PMD_DRV_LOG(DEBUG,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ else
+ PMD_DRV_LOG(DEBUG,
+ "Using AVX512 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ }
+ if (use_flex) {
dev->rx_pkt_burst = use_avx2 ?
iavf_recv_pkts_vec_avx2_flex_rxd :
iavf_recv_pkts_vec_flex_rxd;
#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- dev->rx_pkt_burst =
- iavf_recv_pkts_vec_avx512_flex_rxd;
+ if (use_avx512) {
+ if (check_ret == IAVF_VECTOR_PATH)
+ dev->rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_flex_rxd;
+ else
+ dev->rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+ }
#endif
} else {
dev->rx_pkt_burst = use_avx2 ?
iavf_recv_pkts_vec_avx2 :
iavf_recv_pkts_vec;
#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- dev->rx_pkt_burst =
- iavf_recv_pkts_vec_avx512;
+ if (use_avx512) {
+ if (check_ret == IAVF_VECTOR_PATH)
+ dev->rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512;
+ else
+ dev->rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_offload;
+ }
#endif
}
}
return;
}
-#endif
+#endif
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
#ifdef RTE_ARCH_X86
struct iavf_tx_queue *txq;
int i;
+ int check_ret;
+ bool use_sse = false;
bool use_avx2 = false;
-#ifdef CC_AVX512_SUPPORT
bool use_avx512 = false;
-#endif
- if (!iavf_tx_vec_dev_check(dev) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- use_avx2 = true;
+ check_ret = iavf_tx_vec_dev_check(dev);
+
+ if (check_ret >= 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+ /* SSE and AVX2 not support offload path yet. */
+ if (check_ret == IAVF_VECTOR_PATH) {
+ use_sse = true;
+ if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ use_avx2 = true;
+ }
#ifdef CC_AVX512_SUPPORT
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
use_avx512 = true;
#endif
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = use_avx2 ?
- iavf_xmit_pkts_vec_avx2 :
- iavf_xmit_pkts_vec;
+ if (!use_sse && !use_avx2 && !use_avx512)
+ goto normal;
+
+ if (!use_avx512) {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = use_avx2 ?
+ iavf_xmit_pkts_vec_avx2 :
+ iavf_xmit_pkts_vec;
+ }
+ dev->tx_pkt_prepare = NULL;
#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+ if (use_avx512) {
+ if (check_ret == IAVF_VECTOR_PATH) {
+ dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+ PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
+ dev->data->port_id);
+ } else {
+ dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+ dev->tx_pkt_prepare = iavf_prep_pkts;
+ PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
+ dev->data->port_id);
+ }
+ }
#endif
- dev->tx_pkt_prepare = NULL;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
return;
}
-#endif
+normal:
+#endif
PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
dev->data->port_id);
dev->tx_pkt_burst = iavf_xmit_pkts;