return (nb_rx);
}
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
#define EM_MAX_BUF_SIZE 16384
#define EM_RCTL_FLXBUF_STEP 1024
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- const struct rte_memzone *mz;
- char z_name[RTE_MEMZONE_NAMESIZE];
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name, dev->data->port_id,
- queue_id);
-
- if ((mz = rte_memzone_lookup(z_name)) != 0)
- return (mz);
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
-#endif
-}
-
static void
em_tx_queue_release_mbufs(struct em_tx_queue *txq)
{
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
+ if (nb_desc % EM_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return -(EINVAL);
}
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
- if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
- socket_id)) == NULL)
+ tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (tz == NULL)
return (-ENOMEM);
/* Allocate the tx queue data structure. */
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
+ if (nb_desc % EM_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return (-EINVAL);
}
}
/* Allocate RX ring for max possible mumber of hardware descriptors. */
- rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
- if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
- socket_id)) == NULL)
+ rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rz == NULL)
return (-ENOMEM);
/* Allocate the RX queue data structure. */
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct em_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct em_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}