/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IGB_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return (m);
-}
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+ PKT_TX_TCP_SEG | \
+ IGB_TX_IEEE1588_TMST)
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
/**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
}
txq->ctx_cache[ctx_curr].flags = ol_flags;
- txq->ctx_cache[ctx_idx].tx_offload.data =
+ txq->ctx_cache[ctx_curr].tx_offload.data =
tx_offload_mask.data & tx_offload.data;
- txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+ txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
vlan_macip_lens = (uint32_t)tx_offload.data;
}
/* Mismatch, use the previous context */
- return (IGB_CTX_NUM);
+ return IGB_CTX_NUM;
}
static inline uint32_t
ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
- ctx = txq->ctx_curr;
+ ctx = txq->ctx_curr + txq->ctx_start;
tx_last = (uint16_t) (tx_last + new_ctx);
}
if (tx_last >= txq->nb_tx_desc)
*/
if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
/*
* Set the Transmit Descriptor Tail (TDT).
*/
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
txq->tx_tail = tx_id;
- return (nb_tx);
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
}
/*********************************************************************
}
static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
{
uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
0, 0, 0, 0,
};
- pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
+ struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
+
+ /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
+ if (hw->mac.type == e1000_i210)
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
+ else
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
+#else
+ RTE_SET_USED(rxq);
#endif
return pkt_flags;
uint64_t pkt_flags;
/* Check if VLAN present */
- pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
*/
static uint64_t error_to_pkt_flags_map[4] = {
- 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
};
return error_to_pkt_flags_map[(rx_status >>
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
rxm->ol_flags = pkt_flags;
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
uint16_t
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
*/
first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
first_seg->ol_flags = pkt_flags;
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
/*
* Maximum number of Ring Descriptors.
*
* desscriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, IGB_ALIGN);
-#endif
-}
static void
igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
igb_tx_queue_release(txq);
}
+static int
+igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+{
+ struct igb_tx_entry *sw_ring;
+ volatile union e1000_adv_tx_desc *txr;
+ uint16_t tx_first; /* First segment analyzed. */
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_last; /* Last segment in the current packet. */
+ uint16_t tx_next; /* First segment of the next packet. */
+ int count;
+
+ if (txq != NULL) {
+ count = 0;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+
+ /*
+ * tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
+
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
+
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
+
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /*
+ * Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /*
+ * Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /*
+ * Loop through all segments in a
+ * packet.
+ */
+ do {
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ } while (tx_id != tx_next);
+
+ if (unlikely(count == (int)free_cnt))
+ break;
+ } else
+ /*
+ * mbuf still in use, nothing left to
+ * free.
+ */
+ break;
+ } else {
+ /*
+ * There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely((tx_id == tx_first) && (count != 0)))
+ break;
+
+ /*
+ * Walk the list and find the next mbuf, if any.
+ */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ if (sw_ring[tx_id].mbuf)
+ break;
+
+ } while (tx_id != tx_first);
+
+ /*
+ * Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+ }
+ }
+ } else
+ count = -ENODEV;
+
+ return count;
+}
+
+int
+eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ return igb_tx_done_cleanup(txq, free_cnt);
+}
+
static void
igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
{
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of IGB_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
- (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+ if (nb_desc % IGB_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return -EINVAL;
}
* driver.
*/
if (tx_conf->tx_free_thresh != 0)
- PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
+ PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
"used for the 1G driver.");
if (tx_conf->tx_rs_thresh != 0)
- PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
+ PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
"used for the 1G driver.");
- if (tx_conf->tx_thresh.wthresh == 0)
- PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
+ if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
+ PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
"consider setting the TX WTHRESH value to 4, 8, "
"or 16.");
txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
- size, socket_id);
+ size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
if (tz == NULL) {
igb_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
- txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+ txq->tx_ring_phys_addr = tz->phys_addr;
+
+ txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(struct igb_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE);
if (txq->sw_ring == NULL) {
igb_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
igb_reset_tx_queue(txq, dev);
dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = ð_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
- return (0);
+ return 0;
}
static void
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of IGB_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
- (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
- return (-EINVAL);
+ if (nb_desc % IGB_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -EINVAL;
}
/* Free memory prior to re-allocation if needed */
rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
rxq->hthresh = rx_conf->rx_thresh.hthresh;
rxq->wthresh = rx_conf->rx_thresh.wthresh;
- if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
+ if (rxq->wthresh > 0 &&
+ (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
rxq->wthresh = 1;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
- rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+ size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
if (rz == NULL) {
igb_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
+ rxq->rx_ring_phys_addr = rz->phys_addr;
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
RTE_CACHE_LINE_SIZE);
if (rxq->sw_ring == NULL) {
igb_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
desc - rxq->nb_rx_desc]);
}
- return 0;
+ return desc;
}
int
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
+int
+eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct igb_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{
/* Initialize software ring entries. */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union e1000_adv_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu", rxq->queue_id);
- return (-ENOMEM);
+ return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
/* Enable both L3/L4 rx checksum offload */
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
- rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
else
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
}
}
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct igb_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct igb_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}