1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
18 /*********************************************************************
20 * Queue management functions
22 **********************************************************************/
25 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
29 if (txq->sw_ring != NULL) {
30 for (i = 0; i < txq->nb_tx_desc; i++) {
31 if (txq->sw_ring[i].mbuf != NULL) {
32 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
33 txq->sw_ring[i].mbuf = NULL;
40 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
43 rte_free(txq->sw_ring);
47 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
50 if (txq->ops != NULL) {
51 txq->ops->release_mbufs(txq);
52 txq->ops->free_swring(txq);
59 ngbe_dev_tx_queue_release(void *txq)
61 ngbe_tx_queue_release(txq);
64 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
66 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
68 static const struct ngbe_tx_desc zeroed_desc = {0};
69 struct ngbe_tx_entry *txe = txq->sw_ring;
72 /* Zero out HW ring memory */
73 for (i = 0; i < txq->nb_tx_desc; i++)
74 txq->tx_ring[i] = zeroed_desc;
76 /* Initialize SW ring entries */
77 prev = (uint16_t)(txq->nb_tx_desc - 1);
78 for (i = 0; i < txq->nb_tx_desc; i++) {
79 /* the ring can also be modified by hardware */
80 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
82 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
85 txe[prev].next_id = i;
89 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
93 * Always allow 1 descriptor to be un-allocated to avoid
94 * a H/W race condition
96 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
97 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
99 memset((void *)&txq->ctx_cache, 0,
100 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
103 static const struct ngbe_txq_ops def_txq_ops = {
104 .release_mbufs = ngbe_tx_queue_release_mbufs,
105 .free_swring = ngbe_tx_free_swring,
106 .reset = ngbe_reset_tx_queue,
110 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
113 unsigned int socket_id,
114 const struct rte_eth_txconf *tx_conf)
116 const struct rte_memzone *tz;
117 struct ngbe_tx_queue *txq;
119 uint16_t tx_free_thresh;
121 PMD_INIT_FUNC_TRACE();
122 hw = ngbe_dev_hw(dev);
125 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
126 * descriptors are used or if the number of descriptors required
127 * to transmit a packet is greater than the number of free Tx
129 * One descriptor in the Tx ring is used as a sentinel to avoid a
130 * H/W race condition, hence the maximum threshold constraints.
131 * When set to zero use default values.
133 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
134 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
135 if (tx_free_thresh >= (nb_desc - 3)) {
137 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
138 (unsigned int)tx_free_thresh,
139 (int)dev->data->port_id, (int)queue_idx);
143 if (nb_desc % tx_free_thresh != 0) {
145 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
146 (unsigned int)tx_free_thresh,
147 (int)dev->data->port_id, (int)queue_idx);
151 /* Free memory prior to re-allocation if needed... */
152 if (dev->data->tx_queues[queue_idx] != NULL) {
153 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
154 dev->data->tx_queues[queue_idx] = NULL;
157 /* First allocate the Tx queue data structure */
158 txq = rte_zmalloc_socket("ethdev Tx queue",
159 sizeof(struct ngbe_tx_queue),
160 RTE_CACHE_LINE_SIZE, socket_id);
165 * Allocate Tx ring hardware descriptors. A memzone large enough to
166 * handle the maximum ring size is allocated in order to allow for
167 * resizing in later calls to the queue setup function.
169 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
170 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
171 NGBE_ALIGN, socket_id);
173 ngbe_tx_queue_release(txq);
177 txq->nb_tx_desc = nb_desc;
178 txq->tx_free_thresh = tx_free_thresh;
179 txq->pthresh = tx_conf->tx_thresh.pthresh;
180 txq->hthresh = tx_conf->tx_thresh.hthresh;
181 txq->wthresh = tx_conf->tx_thresh.wthresh;
182 txq->queue_id = queue_idx;
183 txq->reg_idx = queue_idx;
184 txq->port_id = dev->data->port_id;
185 txq->ops = &def_txq_ops;
186 txq->tx_deferred_start = tx_conf->tx_deferred_start;
188 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
189 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
191 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
192 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
194 /* Allocate software ring */
195 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
196 sizeof(struct ngbe_tx_entry) * nb_desc,
197 RTE_CACHE_LINE_SIZE, socket_id);
198 if (txq->sw_ring == NULL) {
199 ngbe_tx_queue_release(txq);
203 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
204 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
206 txq->ops->reset(txq);
208 dev->data->tx_queues[queue_idx] = txq;
214 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
216 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
217 * in the sw_sc_ring is not set to NULL but rather points to the next
218 * mbuf of this RSC aggregation (that has not been completed yet and still
219 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
220 * will just free first "nb_segs" segments of the cluster explicitly by calling
221 * an rte_pktmbuf_free_seg().
223 * @m scattered cluster head
226 ngbe_free_sc_cluster(struct rte_mbuf *m)
228 uint16_t i, nb_segs = m->nb_segs;
229 struct rte_mbuf *next_seg;
231 for (i = 0; i < nb_segs; i++) {
233 rte_pktmbuf_free_seg(m);
239 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
243 if (rxq->sw_ring != NULL) {
244 for (i = 0; i < rxq->nb_rx_desc; i++) {
245 if (rxq->sw_ring[i].mbuf != NULL) {
246 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
247 rxq->sw_ring[i].mbuf = NULL;
250 for (i = 0; i < rxq->rx_nb_avail; ++i) {
253 mb = rxq->rx_stage[rxq->rx_next_avail + i];
254 rte_pktmbuf_free_seg(mb);
256 rxq->rx_nb_avail = 0;
259 if (rxq->sw_sc_ring != NULL)
260 for (i = 0; i < rxq->nb_rx_desc; i++)
261 if (rxq->sw_sc_ring[i].fbuf != NULL) {
262 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
263 rxq->sw_sc_ring[i].fbuf = NULL;
268 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
271 ngbe_rx_queue_release_mbufs(rxq);
272 rte_free(rxq->sw_ring);
273 rte_free(rxq->sw_sc_ring);
279 ngbe_dev_rx_queue_release(void *rxq)
281 ngbe_rx_queue_release(rxq);
285 * Check if Rx Burst Bulk Alloc function can be used.
287 * 0: the preconditions are satisfied and the bulk allocation function
289 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
290 * function must be used.
293 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
298 * Make sure the following pre-conditions are satisfied:
299 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
300 * rxq->rx_free_thresh < rxq->nb_rx_desc
301 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
302 * Scattered packets are not supported. This should be checked
303 * outside of this function.
305 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
307 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
308 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
310 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
312 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
313 rxq->rx_free_thresh, rxq->nb_rx_desc);
315 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
317 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
318 rxq->nb_rx_desc, rxq->rx_free_thresh);
325 /* Reset dynamic ngbe_rx_queue fields back to defaults */
327 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
329 static const struct ngbe_rx_desc zeroed_desc = {
330 {{0}, {0} }, {{0}, {0} } };
332 uint16_t len = rxq->nb_rx_desc;
335 * By default, the Rx queue setup function allocates enough memory for
336 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
337 * extra memory at the end of the descriptor ring to be zero'd out.
339 if (adapter->rx_bulk_alloc_allowed)
340 /* zero out extra memory */
341 len += RTE_PMD_NGBE_RX_MAX_BURST;
344 * Zero out HW ring memory. Zero out extra memory at the end of
345 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
346 * reads extra memory as zeros.
348 for (i = 0; i < len; i++)
349 rxq->rx_ring[i] = zeroed_desc;
352 * initialize extra software ring entries. Space for these extra
353 * entries is always allocated
355 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
356 for (i = rxq->nb_rx_desc; i < len; ++i)
357 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
359 rxq->rx_nb_avail = 0;
360 rxq->rx_next_avail = 0;
361 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
364 rxq->pkt_first_seg = NULL;
365 rxq->pkt_last_seg = NULL;
369 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
372 unsigned int socket_id,
373 const struct rte_eth_rxconf *rx_conf,
374 struct rte_mempool *mp)
376 const struct rte_memzone *rz;
377 struct ngbe_rx_queue *rxq;
380 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
382 PMD_INIT_FUNC_TRACE();
383 hw = ngbe_dev_hw(dev);
385 /* Free memory prior to re-allocation if needed... */
386 if (dev->data->rx_queues[queue_idx] != NULL) {
387 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
388 dev->data->rx_queues[queue_idx] = NULL;
391 /* First allocate the Rx queue data structure */
392 rxq = rte_zmalloc_socket("ethdev RX queue",
393 sizeof(struct ngbe_rx_queue),
394 RTE_CACHE_LINE_SIZE, socket_id);
398 rxq->nb_rx_desc = nb_desc;
399 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
400 rxq->queue_id = queue_idx;
401 rxq->reg_idx = queue_idx;
402 rxq->port_id = dev->data->port_id;
403 rxq->drop_en = rx_conf->rx_drop_en;
404 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
407 * Allocate Rx ring hardware descriptors. A memzone large enough to
408 * handle the maximum ring size is allocated in order to allow for
409 * resizing in later calls to the queue setup function.
411 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
412 RX_RING_SZ, NGBE_ALIGN, socket_id);
414 ngbe_rx_queue_release(rxq);
419 * Zero init all the descriptors in the ring.
421 memset(rz->addr, 0, RX_RING_SZ);
423 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
424 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
426 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
427 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
430 * Certain constraints must be met in order to use the bulk buffer
431 * allocation Rx burst function. If any of Rx queues doesn't meet them
432 * the feature should be disabled for the whole port.
434 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
436 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
437 rxq->queue_id, rxq->port_id);
438 adapter->rx_bulk_alloc_allowed = false;
442 * Allocate software ring. Allow for space at the end of the
443 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
444 * function does not access an invalid memory region.
447 if (adapter->rx_bulk_alloc_allowed)
448 len += RTE_PMD_NGBE_RX_MAX_BURST;
450 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
451 sizeof(struct ngbe_rx_entry) * len,
452 RTE_CACHE_LINE_SIZE, socket_id);
453 if (rxq->sw_ring == NULL) {
454 ngbe_rx_queue_release(rxq);
459 * Always allocate even if it's not going to be needed in order to
462 * This ring is used in Scattered Rx cases and Scattered Rx may
463 * be requested in ngbe_dev_rx_init(), which is called later from
467 rte_zmalloc_socket("rxq->sw_sc_ring",
468 sizeof(struct ngbe_scattered_rx_entry) * len,
469 RTE_CACHE_LINE_SIZE, socket_id);
470 if (rxq->sw_sc_ring == NULL) {
471 ngbe_rx_queue_release(rxq);
476 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
477 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
478 rxq->rx_ring_phys_addr);
480 dev->data->rx_queues[queue_idx] = rxq;
482 ngbe_reset_rx_queue(adapter, rxq);