1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
19 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
21 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
22 * in the sw_sc_ring is not set to NULL but rather points to the next
23 * mbuf of this RSC aggregation (that has not been completed yet and still
24 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
25 * will just free first "nb_segs" segments of the cluster explicitly by calling
26 * an rte_pktmbuf_free_seg().
28 * @m scattered cluster head
31 ngbe_free_sc_cluster(struct rte_mbuf *m)
33 uint16_t i, nb_segs = m->nb_segs;
34 struct rte_mbuf *next_seg;
36 for (i = 0; i < nb_segs; i++) {
38 rte_pktmbuf_free_seg(m);
44 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
48 if (rxq->sw_ring != NULL) {
49 for (i = 0; i < rxq->nb_rx_desc; i++) {
50 if (rxq->sw_ring[i].mbuf != NULL) {
51 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
52 rxq->sw_ring[i].mbuf = NULL;
55 for (i = 0; i < rxq->rx_nb_avail; ++i) {
58 mb = rxq->rx_stage[rxq->rx_next_avail + i];
59 rte_pktmbuf_free_seg(mb);
64 if (rxq->sw_sc_ring != NULL)
65 for (i = 0; i < rxq->nb_rx_desc; i++)
66 if (rxq->sw_sc_ring[i].fbuf != NULL) {
67 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
68 rxq->sw_sc_ring[i].fbuf = NULL;
73 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
76 ngbe_rx_queue_release_mbufs(rxq);
77 rte_free(rxq->sw_ring);
78 rte_free(rxq->sw_sc_ring);
84 ngbe_dev_rx_queue_release(void *rxq)
86 ngbe_rx_queue_release(rxq);
90 * Check if Rx Burst Bulk Alloc function can be used.
92 * 0: the preconditions are satisfied and the bulk allocation function
94 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
95 * function must be used.
98 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
103 * Make sure the following pre-conditions are satisfied:
104 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
105 * rxq->rx_free_thresh < rxq->nb_rx_desc
106 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
107 * Scattered packets are not supported. This should be checked
108 * outside of this function.
110 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
112 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
113 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
115 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
117 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
118 rxq->rx_free_thresh, rxq->nb_rx_desc);
120 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
122 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
123 rxq->nb_rx_desc, rxq->rx_free_thresh);
130 /* Reset dynamic ngbe_rx_queue fields back to defaults */
132 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
134 static const struct ngbe_rx_desc zeroed_desc = {
135 {{0}, {0} }, {{0}, {0} } };
137 uint16_t len = rxq->nb_rx_desc;
140 * By default, the Rx queue setup function allocates enough memory for
141 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
142 * extra memory at the end of the descriptor ring to be zero'd out.
144 if (adapter->rx_bulk_alloc_allowed)
145 /* zero out extra memory */
146 len += RTE_PMD_NGBE_RX_MAX_BURST;
149 * Zero out HW ring memory. Zero out extra memory at the end of
150 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
151 * reads extra memory as zeros.
153 for (i = 0; i < len; i++)
154 rxq->rx_ring[i] = zeroed_desc;
157 * initialize extra software ring entries. Space for these extra
158 * entries is always allocated
160 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
161 for (i = rxq->nb_rx_desc; i < len; ++i)
162 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
164 rxq->rx_nb_avail = 0;
165 rxq->rx_next_avail = 0;
166 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
169 rxq->pkt_first_seg = NULL;
170 rxq->pkt_last_seg = NULL;
174 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
177 unsigned int socket_id,
178 const struct rte_eth_rxconf *rx_conf,
179 struct rte_mempool *mp)
181 const struct rte_memzone *rz;
182 struct ngbe_rx_queue *rxq;
185 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
187 PMD_INIT_FUNC_TRACE();
188 hw = ngbe_dev_hw(dev);
190 /* Free memory prior to re-allocation if needed... */
191 if (dev->data->rx_queues[queue_idx] != NULL) {
192 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
193 dev->data->rx_queues[queue_idx] = NULL;
196 /* First allocate the Rx queue data structure */
197 rxq = rte_zmalloc_socket("ethdev RX queue",
198 sizeof(struct ngbe_rx_queue),
199 RTE_CACHE_LINE_SIZE, socket_id);
203 rxq->nb_rx_desc = nb_desc;
204 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
205 rxq->queue_id = queue_idx;
206 rxq->reg_idx = queue_idx;
207 rxq->port_id = dev->data->port_id;
208 rxq->drop_en = rx_conf->rx_drop_en;
209 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
212 * Allocate Rx ring hardware descriptors. A memzone large enough to
213 * handle the maximum ring size is allocated in order to allow for
214 * resizing in later calls to the queue setup function.
216 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
217 RX_RING_SZ, NGBE_ALIGN, socket_id);
219 ngbe_rx_queue_release(rxq);
224 * Zero init all the descriptors in the ring.
226 memset(rz->addr, 0, RX_RING_SZ);
228 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
229 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
231 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
232 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
235 * Certain constraints must be met in order to use the bulk buffer
236 * allocation Rx burst function. If any of Rx queues doesn't meet them
237 * the feature should be disabled for the whole port.
239 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
241 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
242 rxq->queue_id, rxq->port_id);
243 adapter->rx_bulk_alloc_allowed = false;
247 * Allocate software ring. Allow for space at the end of the
248 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
249 * function does not access an invalid memory region.
252 if (adapter->rx_bulk_alloc_allowed)
253 len += RTE_PMD_NGBE_RX_MAX_BURST;
255 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
256 sizeof(struct ngbe_rx_entry) * len,
257 RTE_CACHE_LINE_SIZE, socket_id);
258 if (rxq->sw_ring == NULL) {
259 ngbe_rx_queue_release(rxq);
264 * Always allocate even if it's not going to be needed in order to
267 * This ring is used in Scattered Rx cases and Scattered Rx may
268 * be requested in ngbe_dev_rx_init(), which is called later from
272 rte_zmalloc_socket("rxq->sw_sc_ring",
273 sizeof(struct ngbe_scattered_rx_entry) * len,
274 RTE_CACHE_LINE_SIZE, socket_id);
275 if (rxq->sw_sc_ring == NULL) {
276 ngbe_rx_queue_release(rxq);
281 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
282 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
283 rxq->rx_ring_phys_addr);
285 dev->data->rx_queues[queue_idx] = rxq;
287 ngbe_reset_rx_queue(adapter, rxq);