#define FM10K_RX_BUFF_ALIGN 512
/* Default delay to acquire mailbox lock */
#define FM10K_MBXLOCK_DELAY_US 20
+#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
/* Number of chars per uint32 type */
#define CHARS_PER_UINT32 (sizeof(uint32_t))
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
+static void fm10k_close_mbx_service(struct fm10k_hw *hw);
+
static void
fm10k_mbx_initlock(struct fm10k_hw *hw)
{
rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
}
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device.
+ * return 0 on success
+ * return -ENOMEM if buffers cannot be allocated
+ * return -EINVAL if buffers do not satisfy alignment condition
+ */
+static inline int
+rx_queue_reset(struct fm10k_rx_queue *q)
+{
+ uint64_t dma_addr;
+ int i, diag;
+ PMD_INIT_FUNC_TRACE();
+
+ diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
+ if (diag != 0)
+ return -ENOMEM;
+
+ for (i = 0; i < q->nb_desc; ++i) {
+ fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
+ if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
+ rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
+ q->nb_desc);
+ return -EINVAL;
+ }
+ dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
+ q->hw_ring[i].q.pkt_addr = dma_addr;
+ q->hw_ring[i].q.hdr_addr = dma_addr;
+ }
+
+ q->next_dd = 0;
+ q->next_alloc = 0;
+ q->next_trigger = q->alloc_thresh - 1;
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
+ return 0;
+}
+
/*
* clean queue, descriptor rings, free software buffers used when stopping
* device.
}
}
+/*
+ * disable RX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
+ reg & ~FM10K_RXQCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ if (!(reg & FM10K_RXQCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device
+ */
+static inline void
+tx_queue_reset(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ q->last_free = 0;
+ q->next_free = 0;
+ q->nb_used = 0;
+ q->nb_free = q->nb_desc - 1;
+ q->free_trigger = q->nb_free - q->free_thresh;
+ fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
+ FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
+}
+
/*
* clean queue, descriptor rings, free software buffers used when stopping
* device
}
}
+/*
+ * disable TX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
+ reg & ~FM10K_TXDCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ if (!(reg & FM10K_TXDCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
static int
fm10k_dev_configure(struct rte_eth_dev *dev)
{
return 0;
}
+static int
+fm10k_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_tx_queue *txq;
+ uint64_t base_addr;
+ uint32_t size;
+
+ /* Disable TXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXINT(i),
+ 3 << FM10K_TXINT_TIMER_SHIFT);
+
+ /* Setup TX queue */
+ for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ txq = dev->data->tx_queues[i];
+ base_addr = txq->hw_ring_phys_addr;
+ size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = tx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* set location and size for descriptor ring */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
+ }
+ return 0;
+}
+
+static int
+fm10k_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_rx_queue *rxq;
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ uint16_t buf_size;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ /* Disable RXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ rxq = dev->data->rx_queues[i];
+ base_addr = rxq->hw_ring_phys_addr;
+ size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = rx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
+
+ /* Configure the Rx buffer size for one buff without split */
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
+ buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* Enable drop on empty, it's RO for VF */
+ if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
+ rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
+
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
+ FM10K_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = -1;
+ uint32_t reg;
+ struct fm10k_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ }
+
+ return err;
+}
+
+static int
+fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
+
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /** @todo - this should be defined in the shared code */
+#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
+ uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
+
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ } else
+ err = -1;
+
+ return err;
+}
+
+static int
+fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ }
+
+ return 0;
+}
+
+/* fls = find last set bit = 32 minus the number of leading zeros */
+#ifndef fls
+#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
+#endif
+#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
+static int
+fm10k_dev_start(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, diag;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* stop, init, then start the hw */
+ diag = fm10k_stop_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_start_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_dev_tx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
+ return diag;
+ }
+
+ diag = fm10k_dev_rx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
+ return diag;
+ }
+
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* Establish only VSI 0 as valid */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
+
+ /* Configure RSS bits used in RETA table */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
+ fls(dev->data->nb_rx_queues - 1) <<
+ FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 1; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
+ FM10K_DGLORTMAP_NONE);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq;
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->rx_deferred_start)
+ continue;
+ diag = fm10k_dev_rx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq;
+ txq = dev->data->tx_queues[i];
+
+ if (txq->tx_deferred_start)
+ continue;
+ diag = fm10k_dev_tx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < dev->data->nb_rx_queues; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ return 0;
+}
+
+static void
+fm10k_dev_stop(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ fm10k_dev_tx_queue_stop(dev, i);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_dev_rx_queue_stop(dev, i);
+}
+
+static void
+fm10k_dev_close(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Stop mailbox service first */
+ fm10k_close_mbx_service(hw);
+ fm10k_dev_stop(dev);
+ fm10k_stop_hw(hw);
+}
+
static int
fm10k_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
return hw->mbx.ops.connect(hw, &hw->mbx);
}
+static void
+fm10k_close_mbx_service(struct fm10k_hw *hw)
+{
+ /* Disconnect from SM for PF device or PF for VF device */
+ hw->mbx.ops.disconnect(hw, &hw->mbx);
+}
+
static struct eth_dev_ops fm10k_eth_dev_ops = {
.dev_configure = fm10k_dev_configure,
+ .dev_start = fm10k_dev_start,
+ .dev_stop = fm10k_dev_stop,
+ .dev_close = fm10k_dev_close,
.stats_get = fm10k_stats_get,
.stats_reset = fm10k_stats_reset,
.link_update = fm10k_link_update,
.dev_infos_get = fm10k_dev_infos_get,
+ .rx_queue_start = fm10k_dev_rx_queue_start,
+ .rx_queue_stop = fm10k_dev_rx_queue_stop,
+ .tx_queue_start = fm10k_dev_tx_queue_start,
+ .tx_queue_stop = fm10k_dev_tx_queue_stop,
.rx_queue_setup = fm10k_rx_queue_setup,
.rx_queue_release = fm10k_rx_queue_release,
.tx_queue_setup = fm10k_tx_queue_setup,