#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
-#define I40E_MIN_RING_DESC 64
-#define I40E_MAX_RING_DESC 4096
-#define I40E_ALIGN 128
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
#define I40E_MAX_PKT_TYPE 256
#define I40E_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN 128
+
#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id);
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
}
#endif
-/* For each value it means, datasheet of hardware can tell more details */
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
static inline uint32_t
i40e_rxd_pkt_type_mapping(uint8_t ptype)
{
- static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
/* L2 types */
/* [0] reserved */
[1] = RTE_PTYPE_L2_ETHER,
/* All others reserved */
};
- return ptype_table[ptype];
+ return type_table[ptype];
}
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ rte_mbuf_data_dma_addr_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
return nb_rx;
}
+#else
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
uint16_t
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
mask |= PKT_TX_IEEE1588_TMST;
#endif
- return ((flags & mask) ? 1 : 0);
+ return (flags & mask) ? 1 : 0;
}
/* set i40e TSO context descriptor */
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
+ if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
{
uint64_t dma_addr;
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
- }
+ } else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
}
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
i40e_tx_queue_release_mbufs(txq);
i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
}
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to i40e_rxd_pkt_type_mapping() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+ return ptypes;
+ return NULL;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t base, bsf, tc_mapping;
int use_def_burst_func = 1;
- if (hw->mac.type == I40E_MAC_VF) {
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
"index exceeds the maximum");
return I40E_ERR_PARAM;
}
- if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"rx queue data structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mp = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF)
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
rxq->reg_idx = queue_idx;
else /* PF device */
rxq->reg_idx = vsi->base_queue +
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "rx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_rx_queue(rxq);
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
- if (hw->mac.type == I40E_MAC_VF) {
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
return I40E_ERR_PARAM;
}
- if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"tx queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "tx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF)
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
txq->reg_idx = queue_idx;
else /* PF device */
txq->reg_idx = vsi->base_queue +
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_tx_queue(txq);
rte_free(q);
}
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, I40E_ALIGN);
-#endif
-}
-
const struct rte_memzone *
i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
- const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *mz;
mz = rte_memzone_lookup(name);
if (mz)
return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_ALIGN);
-#endif
+
+ if (rte_xen_dom0_supported())
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
+ else
+ mz = rte_memzone_reserve_aligned(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN);
return mz;
}
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* showiv indicates if inner VLAN is stripped inside of tunnel
+ * packet. When set it to 1, vlan information is stripped from
+ * the inner header, but the hardware does not put it in the
+ * descriptor. So set it zero by default.
+ */
+ rx_ctx.showiv = 0;
rx_ctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "fdir_tx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "fdir_rx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
return I40E_SUCCESS;
}
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
{
return 0;
}
-