m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
- return (m);
+ return m;
}
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
if (likely (txq->ctx_cache.flags == flags &&
((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
txq->ctx_cache.cmp_mask) == 0))
- return (EM_CTX_0);
+ return EM_CTX_0;
/* Mismatch */
- return (EM_CTX_NUM);
+ return EM_CTX_NUM;
}
/* Reset transmit descriptors after they have been used */
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
/* No Error */
- return (0);
+ return 0;
}
static inline uint32_t
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
- return (tmp);
+ return tmp;
}
uint16_t
if (em_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
}
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
- return (nb_tx);
+ return nb_tx;
}
/*********************************************************************
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
if (rx_error & E1000_RXD_ERR_TCPE)
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
- return (pkt_flags);
+ return pkt_flags;
}
uint16_t
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
uint16_t
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
#define EM_MAX_BUF_SIZE 16384
#define EM_RCTL_FLXBUF_STEP 1024
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- const struct rte_memzone *mz;
- char z_name[RTE_MEMZONE_NAMESIZE];
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name, dev->data->port_id,
- queue_id);
-
- if ((mz = rte_memzone_lookup(z_name)) != 0)
- return (mz);
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
-#endif
-}
-
static void
em_tx_queue_release_mbufs(struct em_tx_queue *txq)
{
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
+ if (nb_desc % EM_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return -(EINVAL);
}
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
- if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
- socket_id)) == NULL)
- return (-ENOMEM);
+ tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (tz == NULL)
+ return -ENOMEM;
/* Allocate the tx queue data structure. */
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
RTE_CACHE_LINE_SIZE)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate software ring */
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(txq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
- return (0);
+ return 0;
}
static void
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
- return (-EINVAL);
+ if (nb_desc % EM_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
+ return -EINVAL;
}
/*
if (rx_conf->rx_drop_en) {
PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
"device");
- return (-EINVAL);
+ return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
}
/* Allocate RX ring for max possible mumber of hardware descriptors. */
- rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
- if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
- socket_id)) == NULL)
- return (-ENOMEM);
+ rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rz == NULL)
+ return -ENOMEM;
/* Allocate the RX queue data structure. */
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
RTE_CACHE_LINE_SIZE)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate software ring. */
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
sizeof (rxq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mb_pool = mp;
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
- return (0);
+ return 0;
}
uint32_t
i++) {
if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
*bufsz = bufsz_to_rctl[i].bufsz;
- return (bufsz_to_rctl[i].rctl);
+ return bufsz_to_rctl[i].rctl;
}
}
/* Should never happen. */
- return (-EINVAL);
+ return -EINVAL;
}
static int
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu", rxq->queue_id);
- return (-ENOMEM);
+ return -ENOMEM;
}
- dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct em_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct em_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}