#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
-#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define I40E_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define I40E_TX_IEEE1588_TMST 0
#endif
-#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
-
-#define I40E_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_MASK | \
+#define I40E_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+
+#define I40E_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_QINQ | \
+ RTE_MBUF_F_TX_VLAN | \
+ RTE_MBUF_F_TX_TUNNEL_MASK | \
+ RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
I40E_TX_IEEE1588_TMST)
#define I40E_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
+#define I40E_TX_OFFLOAD_SIMPLE_SUP_MASK (RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV6)
+
+#define I40E_TX_OFFLOAD_SIMPLE_NOTSUP_MASK \
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_SIMPLE_SUP_MASK)
+
+static int
+i40e_monitor_callback(const uint64_t value,
+ const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+ const uint64_t m = rte_cpu_to_le_64(1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ return (value & m) == m ? -1 : 0;
+}
+
+int
+i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile union i40e_rx_desc *rxdp;
+ uint16_t desc;
+
+ desc = rxq->rx_tail;
+ rxdp = &rxq->rx_ring[desc];
+ /* watch for changes in status bit */
+ pmc->addr = &rxdp->wb.qword1.status_error_len;
+
+ /* comparison callback */
+ pmc->fn = i40e_monitor_callback;
+
+ /* registers are 64-bit */
+ pmc->size = sizeof(uint64_t);
+
+ return 0;
+}
static inline void
i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
- PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+ RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
/* Check if RSS_HASH */
flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
- I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
/* Check if FDIR Match */
flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
- PKT_RX_FDIR : 0);
+ RTE_MBUF_F_RX_FDIR : 0);
return flags;
}
#define I40E_RX_ERR_BITS 0x3f
if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
- flags |= PKT_RX_EIP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
return flags;
}
if ((mb->packet_type & RTE_PTYPE_L2_MASK)
== RTE_PTYPE_L2_ETHER_TIMESYNC)
- pkt_flags = PKT_RX_IEEE1588_PTP;
+ pkt_flags = RTE_MBUF_F_RX_IEEE1588_PTP;
if (tsyn & 0x04) {
- pkt_flags |= PKT_RX_IEEE1588_TMST;
+ pkt_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
mb->timesync = tsyn & 0x03;
}
if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
} else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
- flags |= PKT_RX_FDIR_FLX;
+ flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
mb->hash.fdir.lo =
rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
- flags |= PKT_RX_FDIR_FLX;
+ flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
#else
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
#endif
return flags;
}
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
/* L4TUNT: L4 Tunneling Type */
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
union i40e_tx_offload tx_offload)
{
/* Set MACLEN */
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
else
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2)
<< I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
- PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
- "(port=%d queue=%d)", desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
return -1;
}
uint16_t pkt_len;
uint64_t qword1;
uint32_t rx_status;
- int32_t s[I40E_LOOK_AHEAD], nb_dd;
+ int32_t s[I40E_LOOK_AHEAD], var, nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
I40E_RXD_QW1_STATUS_SHIFT;
}
- rte_smp_rmb();
+ /* This barrier is to order loads of different words in the descriptor */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
/* Compute how many status bits were set */
- for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
- nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+ for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
+ var = s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+#ifdef RTE_ARCH_ARM
+ /* For Arm platforms, only compute continuous status bits */
+ if (var)
+ nb_dd += 1;
+ else
+ break;
+#else
+ nb_dd += var;
+#endif
+ }
nb_rx += nb_dd;
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
#ifdef RTE_LIBRTE_IEEE1588
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update rx tail register */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
break;
}
+ /**
+ * Use acquire fence to ensure that qword1 which includes DD
+ * bit is loaded before loading of other descriptor words.
+ */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rxm->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
#ifdef RTE_LIBRTE_IEEE1588
break;
}
+ /**
+ * Use acquire fence to ensure that qword1 which includes DD
+ * bit is loaded before loading of other descriptor words.
+ */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
first_seg->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
#ifdef RTE_LIBRTE_IEEE1588
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
static inline uint16_t
i40e_calc_context_desc(uint64_t flags)
{
- static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT |
- PKT_TX_TUNNEL_MASK;
+ static uint64_t mask = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_QINQ |
+ RTE_MBUF_F_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
- mask |= PKT_TX_IEEE1588_TMST;
+ mask |= RTE_MBUF_F_TX_IEEE1588_TMST;
#endif
return (flags & mask) ? 1 : 0;
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
- hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = I40E_TX_CTX_DESC_TSO;
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
i40e_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
/* Enable checksum offloading */
}
/* TSO enabled means no timestamp */
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
i40e_set_tso_ctx(tx_pkt, tx_offload);
else {
#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT);
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
- if (ol_flags & PKT_TX_QINQ_PKT) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
- while ((ol_flags & PKT_TX_TCP_SEG) &&
+ while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
txd->buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
- PMD_TX_FREE_LOG(DEBUG,
- "Setting RS bit on TXD id="
- "%4u (port=%d queue=%d)",
- tx_last, txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
td_cmd |= I40E_TX_DESC_CMD_RS;
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
- uint16_t i;
+ uint16_t tx_rs_thresh = txq->tx_rs_thresh;
+ uint16_t i = 0, j = 0;
+ struct rte_mbuf *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+ const uint16_t k = RTE_ALIGN_FLOOR(tx_rs_thresh, RTE_I40E_TX_MAX_FREE_BUF_SZ);
+ const uint16_t m = tx_rs_thresh % RTE_I40E_TX_MAX_FREE_BUF_SZ;
if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
- txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+ txep = &txq->sw_ring[txq->tx_next_dd - (tx_rs_thresh - 1)];
- for (i = 0; i < txq->tx_rs_thresh; i++)
+ for (i = 0; i < tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
- for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
- rte_mempool_put(txep->mbuf->pool, txep->mbuf);
- txep->mbuf = NULL;
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (k) {
+ for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
+ free[i] = txep->mbuf;
+ txep->mbuf = NULL;
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free,
+ RTE_I40E_TX_MAX_FREE_BUF_SZ);
+ }
+ }
+
+ if (m) {
+ for (i = 0; i < m; ++i, ++txep) {
+ free[i] = txep->mbuf;
+ txep->mbuf = NULL;
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, m);
}
} else {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
while (nb_pkts) {
uint16_t ret, num;
+ /* cross rs_thresh boundary is not allowed */
num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
num);
return nb_tx;
}
+/*********************************************************************
+ *
+ * TX simple prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_simple_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (m->nb_segs != 1) {
+ rte_errno = EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_SIMPLE_NOTSUP_MASK) {
+ rte_errno = ENOTSUP;
+ return i;
+ }
+
+ /* check the size of packet */
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ }
+ return i;
+}
+
/*********************************************************************
*
* TX prep functions
ol_flags = m->ol_flags;
/* Check for m->nb_segs to not exceed the limits. */
- if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = EINVAL;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
}
if (rxq->rx_deferred_start)
- PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "RX queue %u is deferred start",
rx_queue_id);
err = i40e_alloc_rx_queue_mbufs(rxq);
return err;
}
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
}
if (txq->tx_deferred_start)
- PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "TX queue %u is deferred start",
tx_queue_id);
/*
dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+#ifdef CC_AVX512_SUPPORT
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 ||
+#endif
dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
return ptypes;
PMD_DRV_LOG(ERR, "Can't use default burst.");
return -EINVAL;
}
- /* check scatterred conflict */
+ /* check scattered conflict */
if (!dev->data->scattered_rx && use_scattered_rx) {
PMD_DRV_LOG(ERR, "Scattered rx is required.");
return -EINVAL;
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_pf *pf = NULL;
- struct i40e_vf *vf = NULL;
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- vsi = &vf->vsi;
- if (!vsi)
- return -EINVAL;
- reg_idx = queue_idx;
- } else {
- pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
- if (!vsi)
- return -EINVAL;
- q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
- if (q_offset < 0)
- return -EINVAL;
- reg_idx = vsi->base_queue + q_offset;
- }
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
- i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
/**
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
return -ENOMEM;
}
+ rxq->mz = rz;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_rx_queue_release(void *rxq)
+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+i40e_rx_queue_release(void *rxq)
{
struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
i40e_rx_queue_release_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
uint32_t
-i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+i40e_dev_rx_queue_count(void *rx_queue)
{
#define I40E_RXQ_SCAN_INTERVAL 4
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = rx_queue;
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
return desc;
}
-int
-i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union i40e_rx_desc *rxdp;
- struct i40e_rx_queue *rxq = rx_queue;
- uint16_t desc;
- int ret;
-
- if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
- return 0;
- }
-
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &(rxq->rx_ring[desc]);
-
- ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
- I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
- (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
-
- return ret;
-}
-
int
i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
}
/* check simple tx conflict */
if (ad->tx_simple_allowed) {
- if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
PMD_DRV_LOG(ERR, "No-simple tx is required.");
return -EINVAL;
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_pf *pf = NULL;
- struct i40e_vf *vf = NULL;
struct i40e_tx_queue *txq;
const struct rte_memzone *tz;
uint32_t ring_size;
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- vsi = &vf->vsi;
- if (!vsi)
- return -EINVAL;
- reg_idx = queue_idx;
- } else {
- pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
- if (!vsi)
- return -EINVAL;
- q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
- if (q_offset < 0)
- return -EINVAL;
- reg_idx = vsi->base_queue + q_offset;
- }
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
*/
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
if (tx_conf->tx_rs_thresh > 0)
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx]) {
- i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
return -ENOMEM;
}
+ txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->tx_rs_thresh = tx_rs_thresh;
txq->tx_free_thresh = tx_free_thresh;
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_tx_queue_release(void *txq)
+i40e_tx_queue_release(void *txq)
{
struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
i40e_tx_queue_release_mbufs(q);
rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
rte_free(q);
}
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
*/
+#ifdef CC_AVX512_SUPPORT
+ if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx512) {
+ struct i40e_vec_tx_entry *swr = (void *)txq->sw_ring;
+
+ i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(swr[i].mbuf);
+ swr[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
+ rte_pktmbuf_free_seg(swr[i].mbuf);
+ swr[i].mbuf = NULL;
+ }
+ return;
+ }
+#endif
if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
}
rxq->max_pkt_len =
- RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
- rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
- rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
- return I40E_ERR_CONFIG;
- }
- } else {
- if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
- return I40E_ERR_CONFIG;
- }
+ RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
+ data->mtu + I40E_ETH_OVERHEAD);
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
}
return 0;
if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!dev->data->rx_queues[i])
continue;
- i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ i40e_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (!dev->data->tx_queues[i])
continue;
- i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ i40e_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
- rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
return I40E_ERR_BAD_PTR;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->dev_data->port_id];
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
return I40E_ERR_NO_MEMORY;
}
+ txq->mz = tz;
txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
txq->queue_id = I40E_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
return I40E_ERR_BAD_PTR;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->dev_data->port_id];
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
return I40E_ERR_NO_MEMORY;
}
+ rxq->mz = rz;
rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
rxq->queue_id = I40E_FDIR_QUEUE_ID;
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
qinfo->conf.offloads = txq->offloads;
}
-static eth_rx_burst_t
-i40e_get_latest_rx_vec(bool scatter)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
- i40e_recv_pkts_vec_avx2;
+#ifdef RTE_ARCH_X86
+static inline bool
+get_avx_supported(bool request_avx512)
+{
+ if (request_avx512) {
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ return true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+ return false;
#endif
- return scatter ? i40e_recv_scattered_pkts_vec :
- i40e_recv_pkts_vec;
-}
-
-static eth_rx_burst_t
-i40e_get_recommend_rx_vec(bool scatter)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- /*
- * since AVX frequency can be different to base frequency, limit
- * use of AVX2 version to later plaforms, not all those that could
- * theoretically run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
- i40e_recv_pkts_vec_avx2;
+ } else {
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+#ifdef CC_AVX2_SUPPORT
+ return true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX2 is not supported in build env");
+ return false;
#endif
- return scatter ? i40e_recv_scattered_pkts_vec :
- i40e_recv_pkts_vec;
+ }
+
+ return false;
}
+#endif /* RTE_ARCH_X86 */
+
void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+#ifdef RTE_ARCH_X86
+ ad->rx_use_avx512 = false;
+ ad->rx_use_avx2 = false;
+#endif
if (i40e_rx_vec_dev_conf_condition_check(dev) ||
!ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
break;
}
}
+#ifdef RTE_ARCH_X86
+ ad->rx_use_avx512 = get_avx_supported(1);
+
+ if (!ad->rx_use_avx512)
+ ad->rx_use_avx2 = get_avx_supported(0);
+#endif
}
}
if (ad->rx_vec_allowed &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- /* Vec Rx path */
- PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
- dev->data->port_id);
- if (ad->use_latest_vec)
- dev->rx_pkt_burst =
- i40e_get_latest_rx_vec(dev->data->scattered_rx);
- else
- dev->rx_pkt_burst =
- i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+#ifdef RTE_ARCH_X86
+ if (dev->data->scattered_rx) {
+ if (ad->rx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ i40e_recv_scattered_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Using %sVector Scattered Rx (port %d).",
+ ad->rx_use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ad->rx_use_avx2 ?
+ i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_scattered_pkts_vec;
+ }
+ } else {
+ if (ad->rx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ i40e_recv_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Using %sVector Rx (port %d).",
+ ad->rx_use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ad->rx_use_avx2 ?
+ i40e_recv_pkts_vec_avx2 :
+ i40e_recv_pkts_vec;
+ }
+ }
+#else /* RTE_ARCH_X86 */
+ if (dev->data->scattered_rx) {
+ PMD_INIT_LOG(DEBUG,
+ "Using Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ }
+#endif /* RTE_ARCH_X86 */
} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
rx_using_sse =
(dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+#ifdef CC_AVX512_SUPPORT
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 ||
+#endif
dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
{ i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
{ i40e_recv_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { i40e_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+ { i40e_recv_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
{ i40e_recv_pkts_vec_avx2, "Vector AVX2" },
{ i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" },
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
ad->tx_vec_allowed = (ad->tx_simple_allowed &&
txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
txq->queue_id);
}
-static eth_tx_burst_t
-i40e_get_latest_tx_vec(void)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- return i40e_xmit_pkts_vec_avx2;
-#endif
- return i40e_xmit_pkts_vec;
-}
-
-static eth_tx_burst_t
-i40e_get_recommend_tx_vec(void)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- /*
- * since AVX frequency can be different to base frequency, limit
- * use of AVX2 version to later plaforms, not all those that could
- * theoretically run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- return i40e_xmit_pkts_vec_avx2;
-#endif
- return i40e_xmit_pkts_vec;
-}
-
void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
int i;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+#ifdef RTE_ARCH_X86
+ ad->tx_use_avx2 = false;
+ ad->tx_use_avx512 = false;
+#endif
if (ad->tx_vec_allowed) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct i40e_tx_queue *txq =
break;
}
}
+#ifdef RTE_ARCH_X86
+ ad->tx_use_avx512 = get_avx_supported(1);
+
+ if (!ad->tx_use_avx512)
+ ad->tx_use_avx2 = get_avx_supported(0);
+#endif
}
}
if (ad->tx_simple_allowed) {
if (ad->tx_vec_allowed &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
- if (ad->use_latest_vec)
- dev->tx_pkt_burst =
- i40e_get_latest_tx_vec();
- else
- dev->tx_pkt_burst =
- i40e_get_recommend_tx_vec();
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+#ifdef RTE_ARCH_X86
+ if (ad->tx_use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ ad->tx_use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ad->tx_use_avx2 ?
+ i40e_xmit_pkts_vec_avx2 :
+ i40e_xmit_pkts_vec;
+ }
+#else /* RTE_ARCH_X86 */
+ PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+#endif /* RTE_ARCH_X86 */
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
- dev->tx_pkt_prepare = NULL;
+ dev->tx_pkt_prepare = i40e_simple_prep_pkts;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts;
{ i40e_xmit_pkts_simple, "Scalar Simple" },
{ i40e_xmit_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { i40e_xmit_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ i40e_xmit_pkts_vec_avx2, "Vector AVX2" },
{ i40e_xmit_pkts_vec, "Vector SSE" },
#elif defined(RTE_ARCH_ARM64)
}
}
-#ifndef RTE_LIBRTE_I40E_INC_VECTOR
-int
-i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
-{
- return -1;
-}
-
-uint16_t
-i40e_recv_pkts_vec(
- void __rte_unused *rx_queue,
- struct rte_mbuf __rte_unused **rx_pkts,
- uint16_t __rte_unused nb_pkts)
-{
- return 0;
-}
-
-uint16_t
-i40e_recv_scattered_pkts_vec(
- void __rte_unused *rx_queue,
- struct rte_mbuf __rte_unused **rx_pkts,
- uint16_t __rte_unused nb_pkts)
-{
- return 0;
-}
-
-int
-i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
-{
- return -1;
-}
-
-int
-i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
-{
- return -1;
-}
-
-void
-i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
-{
- return;
-}
-
-uint16_t
-i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
- struct rte_mbuf __rte_unused **tx_pkts,
- uint16_t __rte_unused nb_pkts)
-{
- return 0;
-}
-#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */
-
#ifndef CC_AVX2_SUPPORT
uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,