uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
+static int
+ice_monitor_callback(const uint64_t value,
+ const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+ const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ return (value & m) == m ? -1 : 0;
+}
+
int
ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
{
/* watch for changes in status bit */
pmc->addr = &rxdp->wb.status_error0;
- /*
- * we expect the DD bit to be set to 1 if this descriptor was already
- * written to.
- */
- pmc->val = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
- pmc->mask = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ /* comparison callback */
+ pmc->fn = ice_monitor_callback;
/* register is 16-bit */
pmc->size = sizeof(uint16_t);
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
- struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
+ struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
- uint16_t buf_size, len;
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint16_t buf_size;
+ struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
RTE_PKTMBUF_HEADROOM);
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
- rxq->max_pkt_len = RTE_MIN(len,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ rxq->max_pkt_len = RTE_MIN((uint32_t)
+ ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ dev_data->dev_conf.rxmode.max_rx_pkt_len);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
/* Check if scattered RX needs to be used. */
if (rxq->max_pkt_len > buf_size)
- dev->data->scattered_rx = 1;
+ dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
{
struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
- struct rte_eth_dev *dev;
if (!nb_pkts)
return 0;
if (ice_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed +=
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
rxq->rx_free_thresh;
PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
"port_id=%u, queue_id=%u",
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
- dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
return ptypes;
#endif
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("ice fdir tx queue",
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("ice fdir rx queue",
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
- int rx_check_ret;
- bool use_avx512 = false;
- bool use_avx2 = false;
+ int rx_check_ret = -1;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ad->rx_use_avx512 = false;
+ ad->rx_use_avx2 = false;
rx_check_ret = ice_rx_vec_dev_check(dev);
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
#ifdef CC_AVX512_SUPPORT
- use_avx512 = true;
+ ad->rx_use_avx512 = true;
#else
PMD_DRV_LOG(NOTICE,
"AVX512 is not supported in build env");
#endif
- if (!use_avx512 &&
+ if (!ad->rx_use_avx512 &&
(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- use_avx2 = true;
+ ad->rx_use_avx2 = true;
} else {
ad->rx_vec_allowed = false;
if (ad->rx_vec_allowed) {
if (dev->data->scattered_rx) {
- if (use_avx512) {
+ if (ad->rx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
PMD_DRV_LOG(NOTICE,
ice_recv_scattered_pkts_vec_avx512;
}
#endif
+ } else if (ad->rx_use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2;
+ }
} else {
PMD_DRV_LOG(DEBUG,
- "Using %sVector Scattered Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ "Using Vector Scattered Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_scattered_pkts_vec_avx2 :
- ice_recv_scattered_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
}
} else {
- if (use_avx512) {
+ if (ad->rx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
PMD_DRV_LOG(NOTICE,
ice_recv_pkts_vec_avx512;
}
#endif
+ } else if (ad->rx_use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2;
+ }
} else {
PMD_DRV_LOG(DEBUG,
- "Using %sVector Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ "Using Vector Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_pkts_vec;
}
}
return;
{ ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
+ { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
{ ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
{ ice_recv_pkts_vec, "Vector SSE" },
#endif
#ifdef RTE_ARCH_X86
struct ice_tx_queue *txq;
int i;
- int tx_check_ret;
- bool use_avx512 = false;
- bool use_avx2 = false;
+ int tx_check_ret = -1;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ad->tx_use_avx2 = false;
+ ad->tx_use_avx512 = false;
tx_check_ret = ice_tx_vec_dev_check(dev);
if (tx_check_ret >= 0 &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->tx_vec_allowed = true;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq && ice_txq_vec_setup(txq)) {
- ad->tx_vec_allowed = false;
- break;
- }
- }
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
#ifdef CC_AVX512_SUPPORT
- use_avx512 = true;
+ ad->tx_use_avx512 = true;
#else
PMD_DRV_LOG(NOTICE,
"AVX512 is not supported in build env");
#endif
- if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
- (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- use_avx2 = true;
-
- if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+ if (!ad->tx_use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ ad->tx_use_avx2 = true;
+
+ if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
+ tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
ad->tx_vec_allowed = false;
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq && ice_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
} else {
ad->tx_vec_allowed = false;
}
}
if (ad->tx_vec_allowed) {
- if (use_avx512) {
+ dev->tx_pkt_prepare = NULL;
+ if (ad->tx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
PMD_DRV_LOG(NOTICE,
dev->data->port_id);
dev->tx_pkt_burst =
ice_xmit_pkts_vec_avx512_offload;
+ dev->tx_pkt_prepare = ice_prep_pkts;
} else {
PMD_DRV_LOG(NOTICE,
"Using AVX512 Vector Tx (port %d).",
}
#endif
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = use_avx2 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
+ if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst =
+ ice_xmit_pkts_vec_avx2_offload;
+ dev->tx_pkt_prepare = ice_prep_pkts;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ ad->tx_use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ad->tx_use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
+ }
}
- dev->tx_pkt_prepare = NULL;
return;
}