6cb0465ac4cc002fdcf70c404e280157271958ce
[dpdk.git] / drivers / net / ngbe / ngbe_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <sys/queue.h>
7
8 #include <stdint.h>
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
12
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
17
18 /*********************************************************************
19  *
20  *  Queue management functions
21  *
22  **********************************************************************/
23
24 static void
25 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
26 {
27         unsigned int i;
28
29         if (txq->sw_ring != NULL) {
30                 for (i = 0; i < txq->nb_tx_desc; i++) {
31                         if (txq->sw_ring[i].mbuf != NULL) {
32                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
33                                 txq->sw_ring[i].mbuf = NULL;
34                         }
35                 }
36         }
37 }
38
39 static void
40 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
41 {
42         if (txq != NULL)
43                 rte_free(txq->sw_ring);
44 }
45
46 static void
47 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
48 {
49         if (txq != NULL) {
50                 if (txq->ops != NULL) {
51                         txq->ops->release_mbufs(txq);
52                         txq->ops->free_swring(txq);
53                 }
54                 rte_free(txq);
55         }
56 }
57
58 void
59 ngbe_dev_tx_queue_release(void *txq)
60 {
61         ngbe_tx_queue_release(txq);
62 }
63
64 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
65 static void
66 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
67 {
68         static const struct ngbe_tx_desc zeroed_desc = {0};
69         struct ngbe_tx_entry *txe = txq->sw_ring;
70         uint16_t prev, i;
71
72         /* Zero out HW ring memory */
73         for (i = 0; i < txq->nb_tx_desc; i++)
74                 txq->tx_ring[i] = zeroed_desc;
75
76         /* Initialize SW ring entries */
77         prev = (uint16_t)(txq->nb_tx_desc - 1);
78         for (i = 0; i < txq->nb_tx_desc; i++) {
79                 /* the ring can also be modified by hardware */
80                 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
81
82                 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
83                 txe[i].mbuf = NULL;
84                 txe[i].last_id = i;
85                 txe[prev].next_id = i;
86                 prev = i;
87         }
88
89         txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
90         txq->tx_tail = 0;
91
92         /*
93          * Always allow 1 descriptor to be un-allocated to avoid
94          * a H/W race condition
95          */
96         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
97         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
98         txq->ctx_curr = 0;
99         memset((void *)&txq->ctx_cache, 0,
100                 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
101 }
102
103 static const struct ngbe_txq_ops def_txq_ops = {
104         .release_mbufs = ngbe_tx_queue_release_mbufs,
105         .free_swring = ngbe_tx_free_swring,
106         .reset = ngbe_reset_tx_queue,
107 };
108
109 int
110 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
111                          uint16_t queue_idx,
112                          uint16_t nb_desc,
113                          unsigned int socket_id,
114                          const struct rte_eth_txconf *tx_conf)
115 {
116         const struct rte_memzone *tz;
117         struct ngbe_tx_queue *txq;
118         struct ngbe_hw     *hw;
119         uint16_t tx_free_thresh;
120
121         PMD_INIT_FUNC_TRACE();
122         hw = ngbe_dev_hw(dev);
123
124         /*
125          * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
126          * descriptors are used or if the number of descriptors required
127          * to transmit a packet is greater than the number of free Tx
128          * descriptors.
129          * One descriptor in the Tx ring is used as a sentinel to avoid a
130          * H/W race condition, hence the maximum threshold constraints.
131          * When set to zero use default values.
132          */
133         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
134                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
135         if (tx_free_thresh >= (nb_desc - 3)) {
136                 PMD_INIT_LOG(ERR,
137                              "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
138                              (unsigned int)tx_free_thresh,
139                              (int)dev->data->port_id, (int)queue_idx);
140                 return -(EINVAL);
141         }
142
143         if (nb_desc % tx_free_thresh != 0) {
144                 PMD_INIT_LOG(ERR,
145                              "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
146                              (unsigned int)tx_free_thresh,
147                              (int)dev->data->port_id, (int)queue_idx);
148                 return -(EINVAL);
149         }
150
151         /* Free memory prior to re-allocation if needed... */
152         if (dev->data->tx_queues[queue_idx] != NULL) {
153                 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
154                 dev->data->tx_queues[queue_idx] = NULL;
155         }
156
157         /* First allocate the Tx queue data structure */
158         txq = rte_zmalloc_socket("ethdev Tx queue",
159                                  sizeof(struct ngbe_tx_queue),
160                                  RTE_CACHE_LINE_SIZE, socket_id);
161         if (txq == NULL)
162                 return -ENOMEM;
163
164         /*
165          * Allocate Tx ring hardware descriptors. A memzone large enough to
166          * handle the maximum ring size is allocated in order to allow for
167          * resizing in later calls to the queue setup function.
168          */
169         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
170                         sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
171                         NGBE_ALIGN, socket_id);
172         if (tz == NULL) {
173                 ngbe_tx_queue_release(txq);
174                 return -ENOMEM;
175         }
176
177         txq->nb_tx_desc = nb_desc;
178         txq->tx_free_thresh = tx_free_thresh;
179         txq->pthresh = tx_conf->tx_thresh.pthresh;
180         txq->hthresh = tx_conf->tx_thresh.hthresh;
181         txq->wthresh = tx_conf->tx_thresh.wthresh;
182         txq->queue_id = queue_idx;
183         txq->reg_idx = queue_idx;
184         txq->port_id = dev->data->port_id;
185         txq->ops = &def_txq_ops;
186         txq->tx_deferred_start = tx_conf->tx_deferred_start;
187
188         txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
189         txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
190
191         txq->tx_ring_phys_addr = TMZ_PADDR(tz);
192         txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
193
194         /* Allocate software ring */
195         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
196                                 sizeof(struct ngbe_tx_entry) * nb_desc,
197                                 RTE_CACHE_LINE_SIZE, socket_id);
198         if (txq->sw_ring == NULL) {
199                 ngbe_tx_queue_release(txq);
200                 return -ENOMEM;
201         }
202         PMD_INIT_LOG(DEBUG,
203                      "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
204                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
205
206         txq->ops->reset(txq);
207
208         dev->data->tx_queues[queue_idx] = txq;
209
210         return 0;
211 }
212
213 /**
214  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
215  *
216  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
217  * in the sw_sc_ring is not set to NULL but rather points to the next
218  * mbuf of this RSC aggregation (that has not been completed yet and still
219  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
220  * will just free first "nb_segs" segments of the cluster explicitly by calling
221  * an rte_pktmbuf_free_seg().
222  *
223  * @m scattered cluster head
224  */
225 static void
226 ngbe_free_sc_cluster(struct rte_mbuf *m)
227 {
228         uint16_t i, nb_segs = m->nb_segs;
229         struct rte_mbuf *next_seg;
230
231         for (i = 0; i < nb_segs; i++) {
232                 next_seg = m->next;
233                 rte_pktmbuf_free_seg(m);
234                 m = next_seg;
235         }
236 }
237
238 static void
239 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
240 {
241         unsigned int i;
242
243         if (rxq->sw_ring != NULL) {
244                 for (i = 0; i < rxq->nb_rx_desc; i++) {
245                         if (rxq->sw_ring[i].mbuf != NULL) {
246                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
247                                 rxq->sw_ring[i].mbuf = NULL;
248                         }
249                 }
250                 for (i = 0; i < rxq->rx_nb_avail; ++i) {
251                         struct rte_mbuf *mb;
252
253                         mb = rxq->rx_stage[rxq->rx_next_avail + i];
254                         rte_pktmbuf_free_seg(mb);
255                 }
256                 rxq->rx_nb_avail = 0;
257         }
258
259         if (rxq->sw_sc_ring != NULL)
260                 for (i = 0; i < rxq->nb_rx_desc; i++)
261                         if (rxq->sw_sc_ring[i].fbuf != NULL) {
262                                 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
263                                 rxq->sw_sc_ring[i].fbuf = NULL;
264                         }
265 }
266
267 static void
268 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
269 {
270         if (rxq != NULL) {
271                 ngbe_rx_queue_release_mbufs(rxq);
272                 rte_free(rxq->sw_ring);
273                 rte_free(rxq->sw_sc_ring);
274                 rte_free(rxq);
275         }
276 }
277
278 void
279 ngbe_dev_rx_queue_release(void *rxq)
280 {
281         ngbe_rx_queue_release(rxq);
282 }
283
284 /*
285  * Check if Rx Burst Bulk Alloc function can be used.
286  * Return
287  *        0: the preconditions are satisfied and the bulk allocation function
288  *           can be used.
289  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
290  *           function must be used.
291  */
292 static inline int
293 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
294 {
295         int ret = 0;
296
297         /*
298          * Make sure the following pre-conditions are satisfied:
299          *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
300          *   rxq->rx_free_thresh < rxq->nb_rx_desc
301          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
302          * Scattered packets are not supported.  This should be checked
303          * outside of this function.
304          */
305         if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
306                 PMD_INIT_LOG(DEBUG,
307                              "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
308                              rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
309                 ret = -EINVAL;
310         } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
311                 PMD_INIT_LOG(DEBUG,
312                              "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
313                              rxq->rx_free_thresh, rxq->nb_rx_desc);
314                 ret = -EINVAL;
315         } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
316                 PMD_INIT_LOG(DEBUG,
317                              "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
318                              rxq->nb_rx_desc, rxq->rx_free_thresh);
319                 ret = -EINVAL;
320         }
321
322         return ret;
323 }
324
325 /* Reset dynamic ngbe_rx_queue fields back to defaults */
326 static void
327 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
328 {
329         static const struct ngbe_rx_desc zeroed_desc = {
330                                                 {{0}, {0} }, {{0}, {0} } };
331         unsigned int i;
332         uint16_t len = rxq->nb_rx_desc;
333
334         /*
335          * By default, the Rx queue setup function allocates enough memory for
336          * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
337          * extra memory at the end of the descriptor ring to be zero'd out.
338          */
339         if (adapter->rx_bulk_alloc_allowed)
340                 /* zero out extra memory */
341                 len += RTE_PMD_NGBE_RX_MAX_BURST;
342
343         /*
344          * Zero out HW ring memory. Zero out extra memory at the end of
345          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
346          * reads extra memory as zeros.
347          */
348         for (i = 0; i < len; i++)
349                 rxq->rx_ring[i] = zeroed_desc;
350
351         /*
352          * initialize extra software ring entries. Space for these extra
353          * entries is always allocated
354          */
355         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
356         for (i = rxq->nb_rx_desc; i < len; ++i)
357                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
358
359         rxq->rx_nb_avail = 0;
360         rxq->rx_next_avail = 0;
361         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
362         rxq->rx_tail = 0;
363         rxq->nb_rx_hold = 0;
364         rxq->pkt_first_seg = NULL;
365         rxq->pkt_last_seg = NULL;
366 }
367
368 int
369 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
370                          uint16_t queue_idx,
371                          uint16_t nb_desc,
372                          unsigned int socket_id,
373                          const struct rte_eth_rxconf *rx_conf,
374                          struct rte_mempool *mp)
375 {
376         const struct rte_memzone *rz;
377         struct ngbe_rx_queue *rxq;
378         struct ngbe_hw     *hw;
379         uint16_t len;
380         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
381
382         PMD_INIT_FUNC_TRACE();
383         hw = ngbe_dev_hw(dev);
384
385         /* Free memory prior to re-allocation if needed... */
386         if (dev->data->rx_queues[queue_idx] != NULL) {
387                 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
388                 dev->data->rx_queues[queue_idx] = NULL;
389         }
390
391         /* First allocate the Rx queue data structure */
392         rxq = rte_zmalloc_socket("ethdev RX queue",
393                                  sizeof(struct ngbe_rx_queue),
394                                  RTE_CACHE_LINE_SIZE, socket_id);
395         if (rxq == NULL)
396                 return -ENOMEM;
397         rxq->mb_pool = mp;
398         rxq->nb_rx_desc = nb_desc;
399         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
400         rxq->queue_id = queue_idx;
401         rxq->reg_idx = queue_idx;
402         rxq->port_id = dev->data->port_id;
403         rxq->drop_en = rx_conf->rx_drop_en;
404         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
405
406         /*
407          * Allocate Rx ring hardware descriptors. A memzone large enough to
408          * handle the maximum ring size is allocated in order to allow for
409          * resizing in later calls to the queue setup function.
410          */
411         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
412                                       RX_RING_SZ, NGBE_ALIGN, socket_id);
413         if (rz == NULL) {
414                 ngbe_rx_queue_release(rxq);
415                 return -ENOMEM;
416         }
417
418         /*
419          * Zero init all the descriptors in the ring.
420          */
421         memset(rz->addr, 0, RX_RING_SZ);
422
423         rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
424         rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
425
426         rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
427         rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
428
429         /*
430          * Certain constraints must be met in order to use the bulk buffer
431          * allocation Rx burst function. If any of Rx queues doesn't meet them
432          * the feature should be disabled for the whole port.
433          */
434         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
435                 PMD_INIT_LOG(DEBUG,
436                              "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
437                              rxq->queue_id, rxq->port_id);
438                 adapter->rx_bulk_alloc_allowed = false;
439         }
440
441         /*
442          * Allocate software ring. Allow for space at the end of the
443          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
444          * function does not access an invalid memory region.
445          */
446         len = nb_desc;
447         if (adapter->rx_bulk_alloc_allowed)
448                 len += RTE_PMD_NGBE_RX_MAX_BURST;
449
450         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
451                                           sizeof(struct ngbe_rx_entry) * len,
452                                           RTE_CACHE_LINE_SIZE, socket_id);
453         if (rxq->sw_ring == NULL) {
454                 ngbe_rx_queue_release(rxq);
455                 return -ENOMEM;
456         }
457
458         /*
459          * Always allocate even if it's not going to be needed in order to
460          * simplify the code.
461          *
462          * This ring is used in Scattered Rx cases and Scattered Rx may
463          * be requested in ngbe_dev_rx_init(), which is called later from
464          * dev_start() flow.
465          */
466         rxq->sw_sc_ring =
467                 rte_zmalloc_socket("rxq->sw_sc_ring",
468                                   sizeof(struct ngbe_scattered_rx_entry) * len,
469                                   RTE_CACHE_LINE_SIZE, socket_id);
470         if (rxq->sw_sc_ring == NULL) {
471                 ngbe_rx_queue_release(rxq);
472                 return -ENOMEM;
473         }
474
475         PMD_INIT_LOG(DEBUG,
476                      "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
477                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
478                      rxq->rx_ring_phys_addr);
479
480         dev->data->rx_queues[queue_idx] = rxq;
481
482         ngbe_reset_rx_queue(adapter, rxq);
483
484         return 0;
485 }
486
487 void
488 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
489 {
490         unsigned int i;
491         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
492
493         PMD_INIT_FUNC_TRACE();
494
495         for (i = 0; i < dev->data->nb_tx_queues; i++) {
496                 struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
497
498                 if (txq != NULL) {
499                         txq->ops->release_mbufs(txq);
500                         txq->ops->reset(txq);
501                 }
502         }
503
504         for (i = 0; i < dev->data->nb_rx_queues; i++) {
505                 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
506
507                 if (rxq != NULL) {
508                         ngbe_rx_queue_release_mbufs(rxq);
509                         ngbe_reset_rx_queue(adapter, rxq);
510                 }
511         }
512 }
513
514 /*
515  * Initializes Receive Unit.
516  */
517 int
518 ngbe_dev_rx_init(struct rte_eth_dev *dev)
519 {
520         RTE_SET_USED(dev);
521
522         return -EINVAL;
523 }
524
525 /*
526  * Initializes Transmit Unit.
527  */
528 void
529 ngbe_dev_tx_init(struct rte_eth_dev *dev)
530 {
531         RTE_SET_USED(dev);
532 }
533
534 /*
535  * Start Transmit and Receive Units.
536  */
537 int
538 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
539 {
540         RTE_SET_USED(dev);
541
542         return -EINVAL;
543 }