uint16_t *endp;
};
+struct fm10k_txq_ops;
+
struct fm10k_tx_queue {
struct rte_mbuf **sw_ring;
struct fm10k_tx_desc *hw_ring;
uint64_t hw_ring_phys_addr;
struct fifo rs_tracker;
+ const struct fm10k_txq_ops *ops; /* txq ops */
uint16_t last_free;
uint16_t next_free;
uint16_t nb_free;
uint16_t queue_id;
};
+struct fm10k_txq_ops {
+ void (*release_mbufs)(struct fm10k_tx_queue *txq);
+ void (*reset)(struct fm10k_tx_queue *txq);
+};
+
#define MBUF_DMA_ADDR(mb) \
((uint64_t) ((mb)->buf_physaddr + (mb)->data_off))
uint16_t);
uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
#endif
return 0;
}
+static const struct fm10k_txq_ops def_txq_ops = {
+ .release_mbufs = tx_queue_free,
+ .reset = tx_queue_reset,
+};
+
static int
fm10k_dev_configure(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+
+ q->ops->reset(q);
/* reset head and tail pointers */
FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
PMD_INIT_FUNC_TRACE();
if (dev->data->tx_queues) {
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- fm10k_tx_queue_release(dev->data->tx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
+
+ txq->ops->release_mbufs(txq);
+ }
}
if (dev->data->rx_queues) {
* different socket than was previously used.
*/
if (dev->data->tx_queues[queue_id] != NULL) {
- tx_queue_free(dev->data->tx_queues[queue_id]);
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ txq->ops->release_mbufs(txq);
dev->data->tx_queues[queue_id] = NULL;
}
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
+ q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
if (handle_txconf(q, conf))
static void
fm10k_tx_queue_release(void *queue)
{
+ struct fm10k_tx_queue *q = queue;
PMD_INIT_FUNC_TRACE();
- tx_queue_free(queue);
+ q->ops->release_mbufs(q);
}
static int
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
+static void
+fm10k_tx_queue_release_mbufs_vec(struct fm10k_tx_queue *txq);
+static void
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
+
/* Handling the offload flags (olflags) field takes computation
* time when receiving packets. Therefore we provide a flag to disable
* the processing of the olflags field when they are not needed. This
&split_flags[i]);
}
+static const struct fm10k_txq_ops vec_txq_ops = {
+ .release_mbufs = fm10k_tx_queue_release_mbufs_vec,
+ .reset = fm10k_reset_tx_queue,
+};
+
+void __attribute__((cold))
+fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
+{
+ txq->ops = &vec_txq_ops;
+}
+
static inline void
vtx1(volatile struct fm10k_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
return nb_pkts;
}
+
+static void __attribute__((cold))
+fm10k_tx_queue_release_mbufs_vec(struct fm10k_tx_queue *txq)
+{
+ unsigned i;
+ const uint16_t max_desc = (uint16_t)(txq->nb_desc - 1);
+
+ if (txq->sw_ring == NULL || txq->nb_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->next_dd - (txq->rs_thresh - 1);
+ i != txq->next_free;
+ i = (i + 1) & max_desc)
+ rte_pktmbuf_free_seg(txq->sw_ring[i]);
+
+ txq->nb_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->sw_ring[i] = NULL;
+
+ rte_free(txq->sw_ring);
+ txq->sw_ring = NULL;
+}
+
+static void __attribute__((cold))
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
+{
+ static const struct fm10k_tx_desc zeroed_desc = {0};
+ struct rte_mbuf **txe = txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->hw_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_desc; i++)
+ txe[i] = NULL;
+
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ txq->next_free = 0;
+ txq->nb_used = 0;
+ /* Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_free = (uint16_t)(txq->nb_desc - 1);
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
+}