b54253234d7fabe24d5442af364da2b40961a5f7
[dpdk.git] / drivers / net / avf / avf_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_mbuf.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_udp.h>
24 #include <rte_ip.h>
25 #include <rte_net.h>
26
27 #include "avf_log.h"
28 #include "base/avf_prototype.h"
29 #include "base/avf_type.h"
30 #include "avf.h"
31 #include "avf_rxtx.h"
32
33 static inline int
34 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
35 {
36         /* The following constraints must be satisfied:
37          *   thresh < rxq->nb_rx_desc
38          */
39         if (thresh >= nb_desc) {
40                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
41                              thresh, nb_desc);
42                 return -EINVAL;
43         }
44         return 0;
45 }
46
47 static inline int
48 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
49                 uint16_t tx_free_thresh)
50 {
51         /* TX descriptors will have their RS bit set after tx_rs_thresh
52          * descriptors have been used. The TX descriptor ring will be cleaned
53          * after tx_free_thresh descriptors are used or if the number of
54          * descriptors required to transmit a packet is greater than the
55          * number of free TX descriptors.
56          *
57          * The following constraints must be satisfied:
58          *  - tx_rs_thresh must be less than the size of the ring minus 2.
59          *  - tx_free_thresh must be less than the size of the ring minus 3.
60          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
61          *  - tx_rs_thresh must be a divisor of the ring size.
62          *
63          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
64          * race condition, hence the maximum threshold constraints. When set
65          * to zero use default values.
66          */
67         if (tx_rs_thresh >= (nb_desc - 2)) {
68                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
69                              "number of TX descriptors (%u) minus 2",
70                              tx_rs_thresh, nb_desc);
71                 return -EINVAL;
72         }
73         if (tx_free_thresh >= (nb_desc - 3)) {
74                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
75                              "number of TX descriptors (%u) minus 3.",
76                              tx_free_thresh, nb_desc);
77                 return -EINVAL;
78         }
79         if (tx_rs_thresh > tx_free_thresh) {
80                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
81                              "equal to tx_free_thresh (%u).",
82                              tx_rs_thresh, tx_free_thresh);
83                 return -EINVAL;
84         }
85         if ((nb_desc % tx_rs_thresh) != 0) {
86                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
87                              "number of TX descriptors (%u).",
88                              tx_rs_thresh, nb_desc);
89                 return -EINVAL;
90         }
91
92         return 0;
93 }
94
95 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
96 static inline bool
97 check_rx_vec_allow(struct avf_rx_queue *rxq)
98 {
99         if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
100             rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
101                 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
102                 return TRUE;
103         }
104
105         PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
106         return FALSE;
107 }
108
109 static inline bool
110 check_tx_vec_allow(struct avf_tx_queue *txq)
111 {
112         if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
113             txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
114             txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
115                 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
116                 return TRUE;
117         }
118         PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
119         return FALSE;
120 }
121 #endif
122
123 static inline void
124 reset_rx_queue(struct avf_rx_queue *rxq)
125 {
126         uint16_t len, i;
127
128         if (!rxq)
129                 return;
130
131         len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
132
133         for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
134                 ((volatile char *)rxq->rx_ring)[i] = 0;
135
136         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
137
138         for (i = 0; i < AVF_RX_MAX_BURST; i++)
139                 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
140
141         rxq->rx_tail = 0;
142         rxq->nb_rx_hold = 0;
143         rxq->pkt_first_seg = NULL;
144         rxq->pkt_last_seg = NULL;
145 }
146
147 static inline void
148 reset_tx_queue(struct avf_tx_queue *txq)
149 {
150         struct avf_tx_entry *txe;
151         uint16_t i, prev, size;
152
153         if (!txq) {
154                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
155                 return;
156         }
157
158         txe = txq->sw_ring;
159         size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
160         for (i = 0; i < size; i++)
161                 ((volatile char *)txq->tx_ring)[i] = 0;
162
163         prev = (uint16_t)(txq->nb_tx_desc - 1);
164         for (i = 0; i < txq->nb_tx_desc; i++) {
165                 txq->tx_ring[i].cmd_type_offset_bsz =
166                         rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
167                 txe[i].mbuf =  NULL;
168                 txe[i].last_id = i;
169                 txe[prev].next_id = i;
170                 prev = i;
171         }
172
173         txq->tx_tail = 0;
174         txq->nb_used = 0;
175
176         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
177         txq->nb_free = txq->nb_tx_desc - 1;
178
179         txq->next_dd = txq->rs_thresh - 1;
180         txq->next_rs = txq->rs_thresh - 1;
181 }
182
183 static int
184 alloc_rxq_mbufs(struct avf_rx_queue *rxq)
185 {
186         volatile union avf_rx_desc *rxd;
187         struct rte_mbuf *mbuf = NULL;
188         uint64_t dma_addr;
189         uint16_t i;
190
191         for (i = 0; i < rxq->nb_rx_desc; i++) {
192                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
193                 if (unlikely(!mbuf)) {
194                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
195                         return -ENOMEM;
196                 }
197
198                 rte_mbuf_refcnt_set(mbuf, 1);
199                 mbuf->next = NULL;
200                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
201                 mbuf->nb_segs = 1;
202                 mbuf->port = rxq->port_id;
203
204                 dma_addr =
205                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
206
207                 rxd = &rxq->rx_ring[i];
208                 rxd->read.pkt_addr = dma_addr;
209                 rxd->read.hdr_addr = 0;
210 #ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
211                 rxd->read.rsvd1 = 0;
212                 rxd->read.rsvd2 = 0;
213 #endif
214
215                 rxq->sw_ring[i] = mbuf;
216         }
217
218         return 0;
219 }
220
221 static inline void
222 release_rxq_mbufs(struct avf_rx_queue *rxq)
223 {
224         struct rte_mbuf *mbuf;
225         uint16_t i;
226
227         if (!rxq->sw_ring)
228                 return;
229
230         for (i = 0; i < rxq->nb_rx_desc; i++) {
231                 if (rxq->sw_ring[i]) {
232                         rte_pktmbuf_free_seg(rxq->sw_ring[i]);
233                         rxq->sw_ring[i] = NULL;
234                 }
235         }
236 }
237
238 static inline void
239 release_txq_mbufs(struct avf_tx_queue *txq)
240 {
241         uint16_t i;
242
243         if (!txq || !txq->sw_ring) {
244                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
245                 return;
246         }
247
248         for (i = 0; i < txq->nb_tx_desc; i++) {
249                 if (txq->sw_ring[i].mbuf) {
250                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
251                         txq->sw_ring[i].mbuf = NULL;
252                 }
253         }
254 }
255
256 static const struct avf_rxq_ops def_rxq_ops = {
257         .release_mbufs = release_rxq_mbufs,
258 };
259
260 static const struct avf_txq_ops def_txq_ops = {
261         .release_mbufs = release_txq_mbufs,
262 };
263
264 int
265 avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
266                        uint16_t nb_desc, unsigned int socket_id,
267                        const struct rte_eth_rxconf *rx_conf,
268                        struct rte_mempool *mp)
269 {
270         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
271         struct avf_adapter *ad =
272                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
273         struct avf_rx_queue *rxq;
274         const struct rte_memzone *mz;
275         uint32_t ring_size;
276         uint16_t len, i;
277         uint16_t rx_free_thresh;
278         uint16_t base, bsf, tc_mapping;
279
280         PMD_INIT_FUNC_TRACE();
281
282         if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
283             nb_desc > AVF_MAX_RING_DESC ||
284             nb_desc < AVF_MIN_RING_DESC) {
285                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
286                              "invalid", nb_desc);
287                 return -EINVAL;
288         }
289
290         /* Check free threshold */
291         rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
292                          AVF_DEFAULT_RX_FREE_THRESH :
293                          rx_conf->rx_free_thresh;
294         if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
295                 return -EINVAL;
296
297         /* Free memory if needed */
298         if (dev->data->rx_queues[queue_idx]) {
299                 avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
300                 dev->data->rx_queues[queue_idx] = NULL;
301         }
302
303         /* Allocate the rx queue data structure */
304         rxq = rte_zmalloc_socket("avf rxq",
305                                  sizeof(struct avf_rx_queue),
306                                  RTE_CACHE_LINE_SIZE,
307                                  socket_id);
308         if (!rxq) {
309                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
310                              "rx queue data structure");
311                 return -ENOMEM;
312         }
313
314         rxq->mp = mp;
315         rxq->nb_rx_desc = nb_desc;
316         rxq->rx_free_thresh = rx_free_thresh;
317         rxq->queue_id = queue_idx;
318         rxq->port_id = dev->data->port_id;
319         rxq->crc_len = 0; /* crc stripping by default */
320         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
321         rxq->rx_hdr_len = 0;
322
323         len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
324         rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
325
326         /* Allocate the software ring. */
327         len = nb_desc + AVF_RX_MAX_BURST;
328         rxq->sw_ring =
329                 rte_zmalloc_socket("avf rx sw ring",
330                                    sizeof(struct rte_mbuf *) * len,
331                                    RTE_CACHE_LINE_SIZE,
332                                    socket_id);
333         if (!rxq->sw_ring) {
334                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
335                 rte_free(rxq);
336                 return -ENOMEM;
337         }
338
339         /* Allocate the maximun number of RX ring hardware descriptor with
340          * a liitle more to support bulk allocate.
341          */
342         len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
343         ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
344                               AVF_DMA_MEM_ALIGN);
345         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
346                                       ring_size, AVF_RING_BASE_ALIGN,
347                                       socket_id);
348         if (!mz) {
349                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
350                 rte_free(rxq->sw_ring);
351                 rte_free(rxq);
352                 return -ENOMEM;
353         }
354         /* Zero all the descriptors in the ring. */
355         memset(mz->addr, 0, ring_size);
356         rxq->rx_ring_phys_addr = mz->iova;
357         rxq->rx_ring = (union avf_rx_desc *)mz->addr;
358
359         rxq->mz = mz;
360         reset_rx_queue(rxq);
361         rxq->q_set = TRUE;
362         dev->data->rx_queues[queue_idx] = rxq;
363         rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
364         rxq->ops = &def_rxq_ops;
365
366 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
367         if (check_rx_vec_allow(rxq) == FALSE)
368                 ad->rx_vec_allowed = false;
369 #endif
370         return 0;
371 }
372
373 int
374 avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
375                        uint16_t queue_idx,
376                        uint16_t nb_desc,
377                        unsigned int socket_id,
378                        const struct rte_eth_txconf *tx_conf)
379 {
380         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
381         struct avf_adapter *ad =
382                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
383         struct avf_tx_queue *txq;
384         const struct rte_memzone *mz;
385         uint32_t ring_size;
386         uint16_t tx_rs_thresh, tx_free_thresh;
387         uint16_t i, base, bsf, tc_mapping;
388
389         PMD_INIT_FUNC_TRACE();
390
391         if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
392             nb_desc > AVF_MAX_RING_DESC ||
393             nb_desc < AVF_MIN_RING_DESC) {
394                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
395                             "invalid", nb_desc);
396                 return -EINVAL;
397         }
398
399         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
400                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
401         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
402                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
403         check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
404
405         /* Free memory if needed. */
406         if (dev->data->tx_queues[queue_idx]) {
407                 avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
408                 dev->data->tx_queues[queue_idx] = NULL;
409         }
410
411         /* Allocate the TX queue data structure. */
412         txq = rte_zmalloc_socket("avf txq",
413                                  sizeof(struct avf_tx_queue),
414                                  RTE_CACHE_LINE_SIZE,
415                                  socket_id);
416         if (!txq) {
417                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
418                              "tx queue structure");
419                 return -ENOMEM;
420         }
421
422         txq->nb_tx_desc = nb_desc;
423         txq->rs_thresh = tx_rs_thresh;
424         txq->free_thresh = tx_free_thresh;
425         txq->queue_id = queue_idx;
426         txq->port_id = dev->data->port_id;
427         txq->txq_flags = tx_conf->txq_flags;
428         txq->tx_deferred_start = tx_conf->tx_deferred_start;
429
430         /* Allocate software ring */
431         txq->sw_ring =
432                 rte_zmalloc_socket("avf tx sw ring",
433                                    sizeof(struct avf_tx_entry) * nb_desc,
434                                    RTE_CACHE_LINE_SIZE,
435                                    socket_id);
436         if (!txq->sw_ring) {
437                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
438                 rte_free(txq);
439                 return -ENOMEM;
440         }
441
442         /* Allocate TX hardware ring descriptors. */
443         ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
444         ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
445         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
446                                       ring_size, AVF_RING_BASE_ALIGN,
447                                       socket_id);
448         if (!mz) {
449                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
450                 rte_free(txq->sw_ring);
451                 rte_free(txq);
452                 return -ENOMEM;
453         }
454         txq->tx_ring_phys_addr = mz->iova;
455         txq->tx_ring = (struct avf_tx_desc *)mz->addr;
456
457         txq->mz = mz;
458         reset_tx_queue(txq);
459         txq->q_set = TRUE;
460         dev->data->tx_queues[queue_idx] = txq;
461         txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
462         txq->ops = &def_txq_ops;
463
464 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
465         if (check_tx_vec_allow(txq) == FALSE)
466                 ad->tx_vec_allowed = false;
467 #endif
468
469         return 0;
470 }
471
472 int
473 avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
474 {
475         struct avf_adapter *adapter =
476                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
477         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
478         struct avf_rx_queue *rxq;
479         int err = 0;
480
481         PMD_DRV_FUNC_TRACE();
482
483         if (rx_queue_id >= dev->data->nb_rx_queues)
484                 return -EINVAL;
485
486         rxq = dev->data->rx_queues[rx_queue_id];
487
488         err = alloc_rxq_mbufs(rxq);
489         if (err) {
490                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
491                 return err;
492         }
493
494         rte_wmb();
495
496         /* Init the RX tail register. */
497         AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
498         AVF_WRITE_FLUSH(hw);
499
500         /* Ready to switch the queue on */
501         err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
502         if (err)
503                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
504                             rx_queue_id);
505         else
506                 dev->data->rx_queue_state[rx_queue_id] =
507                         RTE_ETH_QUEUE_STATE_STARTED;
508
509         return err;
510 }
511
512 int
513 avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
514 {
515         struct avf_adapter *adapter =
516                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
517         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518         struct avf_tx_queue *txq;
519         int err = 0;
520
521         PMD_DRV_FUNC_TRACE();
522
523         if (tx_queue_id >= dev->data->nb_tx_queues)
524                 return -EINVAL;
525
526         txq = dev->data->tx_queues[tx_queue_id];
527
528         /* Init the RX tail register. */
529         AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
530         AVF_WRITE_FLUSH(hw);
531
532         /* Ready to switch the queue on */
533         err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
534
535         if (err)
536                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
537                             tx_queue_id);
538         else
539                 dev->data->tx_queue_state[tx_queue_id] =
540                         RTE_ETH_QUEUE_STATE_STARTED;
541
542         return err;
543 }
544
545 int
546 avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
547 {
548         struct avf_adapter *adapter =
549                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
550         struct avf_rx_queue *rxq;
551         int err;
552
553         PMD_DRV_FUNC_TRACE();
554
555         if (rx_queue_id >= dev->data->nb_rx_queues)
556                 return -EINVAL;
557
558         err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
559         if (err) {
560                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
561                             rx_queue_id);
562                 return err;
563         }
564
565         rxq = dev->data->rx_queues[rx_queue_id];
566         rxq->ops->release_mbufs(rxq);
567         reset_rx_queue(rxq);
568         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
569
570         return 0;
571 }
572
573 int
574 avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
575 {
576         struct avf_adapter *adapter =
577                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
578         struct avf_tx_queue *txq;
579         int err;
580
581         PMD_DRV_FUNC_TRACE();
582
583         if (tx_queue_id >= dev->data->nb_tx_queues)
584                 return -EINVAL;
585
586         err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
587         if (err) {
588                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
589                             tx_queue_id);
590                 return err;
591         }
592
593         txq = dev->data->tx_queues[tx_queue_id];
594         txq->ops->release_mbufs(txq);
595         reset_tx_queue(txq);
596         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
597
598         return 0;
599 }
600
601 void
602 avf_dev_rx_queue_release(void *rxq)
603 {
604         struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
605
606         if (!q)
607                 return;
608
609         q->ops->release_mbufs(q);
610         rte_free(q->sw_ring);
611         rte_memzone_free(q->mz);
612         rte_free(q);
613 }
614
615 void
616 avf_dev_tx_queue_release(void *txq)
617 {
618         struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
619
620         if (!q)
621                 return;
622
623         q->ops->release_mbufs(q);
624         rte_free(q->sw_ring);
625         rte_memzone_free(q->mz);
626         rte_free(q);
627 }
628
629 void
630 avf_stop_queues(struct rte_eth_dev *dev)
631 {
632         struct avf_adapter *adapter =
633                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
634         struct avf_rx_queue *rxq;
635         struct avf_tx_queue *txq;
636         int ret, i;
637
638         /* Stop All queues */
639         ret = avf_disable_queues(adapter);
640         if (ret)
641                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
642
643         for (i = 0; i < dev->data->nb_tx_queues; i++) {
644                 txq = dev->data->tx_queues[i];
645                 if (!txq)
646                         continue;
647                 txq->ops->release_mbufs(txq);
648                 reset_tx_queue(txq);
649                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
650         }
651         for (i = 0; i < dev->data->nb_rx_queues; i++) {
652                 rxq = dev->data->rx_queues[i];
653                 if (!rxq)
654                         continue;
655                 rxq->ops->release_mbufs(rxq);
656                 reset_rx_queue(rxq);
657                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
658         }
659 }
660
661 static inline void
662 avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
663 {
664         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
665                 (1 << AVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
666                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
667                 mb->vlan_tci =
668                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
669         } else {
670                 mb->vlan_tci = 0;
671         }
672 }
673
674 /* Translate the rx descriptor status and error fields to pkt flags */
675 static inline uint64_t
676 avf_rxd_to_pkt_flags(uint64_t qword)
677 {
678         uint64_t flags;
679         uint64_t error_bits = (qword >> AVF_RXD_QW1_ERROR_SHIFT);
680
681 #define AVF_RX_ERR_BITS 0x3f
682
683         /* Check if RSS_HASH */
684         flags = (((qword >> AVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
685                                         AVF_RX_DESC_FLTSTAT_RSS_HASH) ==
686                         AVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
687
688         if (likely((error_bits & AVF_RX_ERR_BITS) == 0)) {
689                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
690                 return flags;
691         }
692
693         if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_IPE_SHIFT)))
694                 flags |= PKT_RX_IP_CKSUM_BAD;
695         else
696                 flags |= PKT_RX_IP_CKSUM_GOOD;
697
698         if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_L4E_SHIFT)))
699                 flags |= PKT_RX_L4_CKSUM_BAD;
700         else
701                 flags |= PKT_RX_L4_CKSUM_GOOD;
702
703         /* TODO: Oversize error bit is not processed here */
704
705         return flags;
706 }
707
708 /* implement recv_pkts */
709 uint16_t
710 avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
711 {
712         volatile union avf_rx_desc *rx_ring;
713         volatile union avf_rx_desc *rxdp;
714         struct avf_rx_queue *rxq;
715         union avf_rx_desc rxd;
716         struct rte_mbuf *rxe;
717         struct rte_eth_dev *dev;
718         struct rte_mbuf *rxm;
719         struct rte_mbuf *nmb;
720         uint16_t nb_rx;
721         uint32_t rx_status;
722         uint64_t qword1;
723         uint16_t rx_packet_len;
724         uint16_t rx_id, nb_hold;
725         uint64_t dma_addr;
726         uint64_t pkt_flags;
727         static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
728                 /* [0] reserved */
729                 [1] = RTE_PTYPE_L2_ETHER,
730                 /* [2] - [21] reserved */
731                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
732                         RTE_PTYPE_L4_FRAG,
733                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
734                         RTE_PTYPE_L4_NONFRAG,
735                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
736                         RTE_PTYPE_L4_UDP,
737                 /* [25] reserved */
738                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
739                         RTE_PTYPE_L4_TCP,
740                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
741                         RTE_PTYPE_L4_SCTP,
742                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
743                         RTE_PTYPE_L4_ICMP,
744                 /* All others reserved */
745         };
746
747         nb_rx = 0;
748         nb_hold = 0;
749         rxq = rx_queue;
750         rx_id = rxq->rx_tail;
751         rx_ring = rxq->rx_ring;
752
753         while (nb_rx < nb_pkts) {
754                 rxdp = &rx_ring[rx_id];
755                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
756                 rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
757                             AVF_RXD_QW1_STATUS_SHIFT;
758
759                 /* Check the DD bit first */
760                 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
761                         break;
762                 AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
763
764                 nmb = rte_mbuf_raw_alloc(rxq->mp);
765                 if (unlikely(!nmb)) {
766                         dev = &rte_eth_devices[rxq->port_id];
767                         dev->data->rx_mbuf_alloc_failed++;
768                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
769                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
770                         break;
771                 }
772
773                 rxd = *rxdp;
774                 nb_hold++;
775                 rxe = rxq->sw_ring[rx_id];
776                 rx_id++;
777                 if (unlikely(rx_id == rxq->nb_rx_desc))
778                         rx_id = 0;
779
780                 /* Prefetch next mbuf */
781                 rte_prefetch0(rxq->sw_ring[rx_id]);
782
783                 /* When next RX descriptor is on a cache line boundary,
784                  * prefetch the next 4 RX descriptors and next 8 pointers
785                  * to mbufs.
786                  */
787                 if ((rx_id & 0x3) == 0) {
788                         rte_prefetch0(&rx_ring[rx_id]);
789                         rte_prefetch0(rxq->sw_ring[rx_id]);
790                 }
791                 rxm = rxe;
792                 rxe = nmb;
793                 dma_addr =
794                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
795                 rxdp->read.hdr_addr = 0;
796                 rxdp->read.pkt_addr = dma_addr;
797
798                 rx_packet_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
799                                 AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
800
801                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
802                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
803                 rxm->nb_segs = 1;
804                 rxm->next = NULL;
805                 rxm->pkt_len = rx_packet_len;
806                 rxm->data_len = rx_packet_len;
807                 rxm->port = rxq->port_id;
808                 rxm->ol_flags = 0;
809                 avf_rxd_to_vlan_tci(rxm, &rxd);
810                 pkt_flags = avf_rxd_to_pkt_flags(qword1);
811                 rxm->packet_type =
812                         ptype_tbl[(uint8_t)((qword1 &
813                         AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
814
815                 if (pkt_flags & PKT_RX_RSS_HASH)
816                         rxm->hash.rss =
817                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
818
819                 rxm->ol_flags |= pkt_flags;
820
821                 rx_pkts[nb_rx++] = rxm;
822         }
823         rxq->rx_tail = rx_id;
824
825         /* If the number of free RX descriptors is greater than the RX free
826          * threshold of the queue, advance the receive tail register of queue.
827          * Update that register with the value of the last processed RX
828          * descriptor minus 1.
829          */
830         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
831         if (nb_hold > rxq->rx_free_thresh) {
832                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
833                            "nb_hold=%u nb_rx=%u",
834                            rxq->port_id, rxq->queue_id,
835                            rx_id, nb_hold, nb_rx);
836                 rx_id = (uint16_t)((rx_id == 0) ?
837                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
838                 AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
839                 nb_hold = 0;
840         }
841         rxq->nb_rx_hold = nb_hold;
842
843         return nb_rx;
844 }
845
846 /* implement recv_scattered_pkts  */
847 uint16_t
848 avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
849                         uint16_t nb_pkts)
850 {
851         struct avf_rx_queue *rxq = rx_queue;
852         union avf_rx_desc rxd;
853         struct rte_mbuf *rxe;
854         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
855         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
856         struct rte_mbuf *nmb, *rxm;
857         uint16_t rx_id = rxq->rx_tail;
858         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
859         struct rte_eth_dev *dev;
860         uint32_t rx_status;
861         uint64_t qword1;
862         uint64_t dma_addr;
863         uint64_t pkt_flags;
864
865         volatile union avf_rx_desc *rx_ring = rxq->rx_ring;
866         volatile union avf_rx_desc *rxdp;
867         static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
868                 /* [0] reserved */
869                 [1] = RTE_PTYPE_L2_ETHER,
870                 /* [2] - [21] reserved */
871                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
872                         RTE_PTYPE_L4_FRAG,
873                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
874                         RTE_PTYPE_L4_NONFRAG,
875                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
876                         RTE_PTYPE_L4_UDP,
877                 /* [25] reserved */
878                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
879                         RTE_PTYPE_L4_TCP,
880                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
881                         RTE_PTYPE_L4_SCTP,
882                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
883                         RTE_PTYPE_L4_ICMP,
884                 /* All others reserved */
885         };
886
887         while (nb_rx < nb_pkts) {
888                 rxdp = &rx_ring[rx_id];
889                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
890                 rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
891                             AVF_RXD_QW1_STATUS_SHIFT;
892
893                 /* Check the DD bit */
894                 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
895                         break;
896                 AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
897
898                 nmb = rte_mbuf_raw_alloc(rxq->mp);
899                 if (unlikely(!nmb)) {
900                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
901                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
902                         dev = &rte_eth_devices[rxq->port_id];
903                         dev->data->rx_mbuf_alloc_failed++;
904                         break;
905                 }
906
907                 rxd = *rxdp;
908                 nb_hold++;
909                 rxe = rxq->sw_ring[rx_id];
910                 rx_id++;
911                 if (rx_id == rxq->nb_rx_desc)
912                         rx_id = 0;
913
914                 /* Prefetch next mbuf */
915                 rte_prefetch0(rxq->sw_ring[rx_id]);
916
917                 /* When next RX descriptor is on a cache line boundary,
918                  * prefetch the next 4 RX descriptors and next 8 pointers
919                  * to mbufs.
920                  */
921                 if ((rx_id & 0x3) == 0) {
922                         rte_prefetch0(&rx_ring[rx_id]);
923                         rte_prefetch0(rxq->sw_ring[rx_id]);
924                 }
925
926                 rxm = rxe;
927                 rxe = nmb;
928                 dma_addr =
929                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
930
931                 /* Set data buffer address and data length of the mbuf */
932                 rxdp->read.hdr_addr = 0;
933                 rxdp->read.pkt_addr = dma_addr;
934                 rx_packet_len = (qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
935                                  AVF_RXD_QW1_LENGTH_PBUF_SHIFT;
936                 rxm->data_len = rx_packet_len;
937                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
938
939                 /* If this is the first buffer of the received packet, set the
940                  * pointer to the first mbuf of the packet and initialize its
941                  * context. Otherwise, update the total length and the number
942                  * of segments of the current scattered packet, and update the
943                  * pointer to the last mbuf of the current packet.
944                  */
945                 if (!first_seg) {
946                         first_seg = rxm;
947                         first_seg->nb_segs = 1;
948                         first_seg->pkt_len = rx_packet_len;
949                 } else {
950                         first_seg->pkt_len =
951                                 (uint16_t)(first_seg->pkt_len +
952                                                 rx_packet_len);
953                         first_seg->nb_segs++;
954                         last_seg->next = rxm;
955                 }
956
957                 /* If this is not the last buffer of the received packet,
958                  * update the pointer to the last mbuf of the current scattered
959                  * packet and continue to parse the RX ring.
960                  */
961                 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_EOF_SHIFT))) {
962                         last_seg = rxm;
963                         continue;
964                 }
965
966                 /* This is the last buffer of the received packet. If the CRC
967                  * is not stripped by the hardware:
968                  *  - Subtract the CRC length from the total packet length.
969                  *  - If the last buffer only contains the whole CRC or a part
970                  *  of it, free the mbuf associated to the last buffer. If part
971                  *  of the CRC is also contained in the previous mbuf, subtract
972                  *  the length of that CRC part from the data length of the
973                  *  previous mbuf.
974                  */
975                 rxm->next = NULL;
976                 if (unlikely(rxq->crc_len > 0)) {
977                         first_seg->pkt_len -= ETHER_CRC_LEN;
978                         if (rx_packet_len <= ETHER_CRC_LEN) {
979                                 rte_pktmbuf_free_seg(rxm);
980                                 first_seg->nb_segs--;
981                                 last_seg->data_len =
982                                         (uint16_t)(last_seg->data_len -
983                                         (ETHER_CRC_LEN - rx_packet_len));
984                                 last_seg->next = NULL;
985                         } else
986                                 rxm->data_len = (uint16_t)(rx_packet_len -
987                                                                 ETHER_CRC_LEN);
988                 }
989
990                 first_seg->port = rxq->port_id;
991                 first_seg->ol_flags = 0;
992                 avf_rxd_to_vlan_tci(first_seg, &rxd);
993                 pkt_flags = avf_rxd_to_pkt_flags(qword1);
994                 first_seg->packet_type =
995                         ptype_tbl[(uint8_t)((qword1 &
996                         AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
997
998                 if (pkt_flags & PKT_RX_RSS_HASH)
999                         first_seg->hash.rss =
1000                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1001
1002                 first_seg->ol_flags |= pkt_flags;
1003
1004                 /* Prefetch data of first segment, if configured to do so. */
1005                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1006                                           first_seg->data_off));
1007                 rx_pkts[nb_rx++] = first_seg;
1008                 first_seg = NULL;
1009         }
1010
1011         /* Record index of the next RX descriptor to probe. */
1012         rxq->rx_tail = rx_id;
1013         rxq->pkt_first_seg = first_seg;
1014         rxq->pkt_last_seg = last_seg;
1015
1016         /* If the number of free RX descriptors is greater than the RX free
1017          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1018          * register. Update the RDT with the value of the last processed RX
1019          * descriptor minus 1, to guarantee that the RDT register is never
1020          * equal to the RDH register, which creates a "full" ring situtation
1021          * from the hardware point of view.
1022          */
1023         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1024         if (nb_hold > rxq->rx_free_thresh) {
1025                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1026                            "nb_hold=%u nb_rx=%u",
1027                            rxq->port_id, rxq->queue_id,
1028                            rx_id, nb_hold, nb_rx);
1029                 rx_id = (uint16_t)(rx_id == 0 ?
1030                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1031                 AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1032                 nb_hold = 0;
1033         }
1034         rxq->nb_rx_hold = nb_hold;
1035
1036         return nb_rx;
1037 }
1038
1039 static inline int
1040 avf_xmit_cleanup(struct avf_tx_queue *txq)
1041 {
1042         struct avf_tx_entry *sw_ring = txq->sw_ring;
1043         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1044         uint16_t nb_tx_desc = txq->nb_tx_desc;
1045         uint16_t desc_to_clean_to;
1046         uint16_t nb_tx_to_clean;
1047
1048         volatile struct avf_tx_desc *txd = txq->tx_ring;
1049
1050         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1051         if (desc_to_clean_to >= nb_tx_desc)
1052                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1053
1054         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1055         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1056                         rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
1057                         rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) {
1058                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1059                                 "(port=%d queue=%d)", desc_to_clean_to,
1060                                 txq->port_id, txq->queue_id);
1061                 return -1;
1062         }
1063
1064         if (last_desc_cleaned > desc_to_clean_to)
1065                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1066                                                         desc_to_clean_to);
1067         else
1068                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1069                                         last_desc_cleaned);
1070
1071         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1072
1073         txq->last_desc_cleaned = desc_to_clean_to;
1074         txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1075
1076         return 0;
1077 }
1078
1079 /* Check if the context descriptor is needed for TX offloading */
1080 static inline uint16_t
1081 avf_calc_context_desc(uint64_t flags)
1082 {
1083         static uint64_t mask = PKT_TX_TCP_SEG;
1084
1085         return (flags & mask) ? 1 : 0;
1086 }
1087
1088 static inline void
1089 avf_txd_enable_checksum(uint64_t ol_flags,
1090                         uint32_t *td_cmd,
1091                         uint32_t *td_offset,
1092                         union avf_tx_offload tx_offload)
1093 {
1094         /* Set MACLEN */
1095         *td_offset |= (tx_offload.l2_len >> 1) <<
1096                       AVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1097
1098         /* Enable L3 checksum offloads */
1099         if (ol_flags & PKT_TX_IP_CKSUM) {
1100                 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1101                 *td_offset |= (tx_offload.l3_len >> 2) <<
1102                               AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1103         } else if (ol_flags & PKT_TX_IPV4) {
1104                 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4;
1105                 *td_offset |= (tx_offload.l3_len >> 2) <<
1106                               AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1107         } else if (ol_flags & PKT_TX_IPV6) {
1108                 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV6;
1109                 *td_offset |= (tx_offload.l3_len >> 2) <<
1110                               AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1111         }
1112
1113         if (ol_flags & PKT_TX_TCP_SEG) {
1114                 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
1115                 *td_offset |= (tx_offload.l4_len >> 2) <<
1116                               AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1117                 return;
1118         }
1119
1120         /* Enable L4 checksum offloads */
1121         switch (ol_flags & PKT_TX_L4_MASK) {
1122         case PKT_TX_TCP_CKSUM:
1123                 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
1124                 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1125                               AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1126                 break;
1127         case PKT_TX_SCTP_CKSUM:
1128                 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1129                 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1130                               AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1131                 break;
1132         case PKT_TX_UDP_CKSUM:
1133                 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_UDP;
1134                 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1135                               AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1136                 break;
1137         default:
1138                 break;
1139         }
1140 }
1141
1142 /* set TSO context descriptor
1143  * support IP -> L4 and IP -> IP -> L4
1144  */
1145 static inline uint64_t
1146 avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
1147 {
1148         uint64_t ctx_desc = 0;
1149         uint32_t cd_cmd, hdr_len, cd_tso_len;
1150
1151         if (!tx_offload.l4_len) {
1152                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1153                 return ctx_desc;
1154         }
1155
1156         /* in case of non tunneling packet, the outer_l2_len and
1157          * outer_l3_len must be 0.
1158          */
1159         hdr_len = tx_offload.l2_len +
1160                   tx_offload.l3_len +
1161                   tx_offload.l4_len;
1162
1163         cd_cmd = AVF_TX_CTX_DESC_TSO;
1164         cd_tso_len = mbuf->pkt_len - hdr_len;
1165         ctx_desc |= ((uint64_t)cd_cmd << AVF_TXD_CTX_QW1_CMD_SHIFT) |
1166                      ((uint64_t)cd_tso_len << AVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1167                      ((uint64_t)mbuf->tso_segsz << AVF_TXD_CTX_QW1_MSS_SHIFT);
1168
1169         return ctx_desc;
1170 }
1171
1172 /* Construct the tx flags */
1173 static inline uint64_t
1174 avf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1175                uint32_t td_tag)
1176 {
1177         return rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DATA |
1178                                 ((uint64_t)td_cmd  << AVF_TXD_QW1_CMD_SHIFT) |
1179                                 ((uint64_t)td_offset <<
1180                                  AVF_TXD_QW1_OFFSET_SHIFT) |
1181                                 ((uint64_t)size  <<
1182                                  AVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1183                                 ((uint64_t)td_tag  <<
1184                                  AVF_TXD_QW1_L2TAG1_SHIFT));
1185 }
1186
1187 /* TX function */
1188 uint16_t
1189 avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1190 {
1191         volatile struct avf_tx_desc *txd;
1192         volatile struct avf_tx_desc *txr;
1193         struct avf_tx_queue *txq;
1194         struct avf_tx_entry *sw_ring;
1195         struct avf_tx_entry *txe, *txn;
1196         struct rte_mbuf *tx_pkt;
1197         struct rte_mbuf *m_seg;
1198         uint16_t tx_id;
1199         uint16_t nb_tx;
1200         uint32_t td_cmd;
1201         uint32_t td_offset;
1202         uint32_t td_tag;
1203         uint64_t ol_flags;
1204         uint16_t nb_used;
1205         uint16_t nb_ctx;
1206         uint16_t tx_last;
1207         uint16_t slen;
1208         uint64_t buf_dma_addr;
1209         union avf_tx_offload tx_offload = {0};
1210
1211         txq = tx_queue;
1212         sw_ring = txq->sw_ring;
1213         txr = txq->tx_ring;
1214         tx_id = txq->tx_tail;
1215         txe = &sw_ring[tx_id];
1216
1217         /* Check if the descriptor ring needs to be cleaned. */
1218         if (txq->nb_free < txq->free_thresh)
1219                 avf_xmit_cleanup(txq);
1220
1221         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1222                 td_cmd = 0;
1223                 td_tag = 0;
1224                 td_offset = 0;
1225
1226                 tx_pkt = *tx_pkts++;
1227                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1228
1229                 ol_flags = tx_pkt->ol_flags;
1230                 tx_offload.l2_len = tx_pkt->l2_len;
1231                 tx_offload.l3_len = tx_pkt->l3_len;
1232                 tx_offload.l4_len = tx_pkt->l4_len;
1233                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1234
1235                 /* Calculate the number of context descriptors needed. */
1236                 nb_ctx = avf_calc_context_desc(ol_flags);
1237
1238                 /* The number of descriptors that must be allocated for
1239                  * a packet equals to the number of the segments of that
1240                  * packet plus 1 context descriptor if needed.
1241                  */
1242                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1243                 tx_last = (uint16_t)(tx_id + nb_used - 1);
1244
1245                 /* Circular ring */
1246                 if (tx_last >= txq->nb_tx_desc)
1247                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1248
1249                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1250                            " tx_first=%u tx_last=%u",
1251                            txq->port_id, txq->queue_id, tx_id, tx_last);
1252
1253                 if (nb_used > txq->nb_free) {
1254                         if (avf_xmit_cleanup(txq)) {
1255                                 if (nb_tx == 0)
1256                                         return 0;
1257                                 goto end_of_tx;
1258                         }
1259                         if (unlikely(nb_used > txq->rs_thresh)) {
1260                                 while (nb_used > txq->nb_free) {
1261                                         if (avf_xmit_cleanup(txq)) {
1262                                                 if (nb_tx == 0)
1263                                                         return 0;
1264                                                 goto end_of_tx;
1265                                         }
1266                                 }
1267                         }
1268                 }
1269
1270                 /* Descriptor based VLAN insertion */
1271                 if (ol_flags & PKT_TX_VLAN_PKT) {
1272                         td_cmd |= AVF_TX_DESC_CMD_IL2TAG1;
1273                         td_tag = tx_pkt->vlan_tci;
1274                 }
1275
1276                 /* According to datasheet, the bit2 is reserved and must be
1277                  * set to 1.
1278                  */
1279                 td_cmd |= 0x04;
1280
1281                 /* Enable checksum offloading */
1282                 if (ol_flags & AVF_TX_CKSUM_OFFLOAD_MASK)
1283                         avf_txd_enable_checksum(ol_flags, &td_cmd,
1284                                                 &td_offset, tx_offload);
1285
1286                 if (nb_ctx) {
1287                         /* Setup TX context descriptor if required */
1288                         volatile struct avf_tx_context_desc *ctx_txd =
1289                                 (volatile struct avf_tx_context_desc *)
1290                                         &txr[tx_id];
1291                         uint16_t cd_l2tag2 = 0;
1292                         uint64_t cd_type_cmd_tso_mss =
1293                                 AVF_TX_DESC_DTYPE_CONTEXT;
1294
1295                         txn = &sw_ring[txe->next_id];
1296                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1297                         if (txe->mbuf) {
1298                                 rte_pktmbuf_free_seg(txe->mbuf);
1299                                 txe->mbuf = NULL;
1300                         }
1301
1302                         /* TSO enabled */
1303                         if (ol_flags & PKT_TX_TCP_SEG)
1304                                 cd_type_cmd_tso_mss |=
1305                                         avf_set_tso_ctx(tx_pkt, tx_offload);
1306
1307                         AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
1308                         txe->last_id = tx_last;
1309                         tx_id = txe->next_id;
1310                         txe = txn;
1311                 }
1312
1313                 m_seg = tx_pkt;
1314                 do {
1315                         txd = &txr[tx_id];
1316                         txn = &sw_ring[txe->next_id];
1317
1318                         if (txe->mbuf)
1319                                 rte_pktmbuf_free_seg(txe->mbuf);
1320                         txe->mbuf = m_seg;
1321
1322                         /* Setup TX Descriptor */
1323                         slen = m_seg->data_len;
1324                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
1325                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1326                         txd->cmd_type_offset_bsz = avf_build_ctob(td_cmd,
1327                                                                   td_offset,
1328                                                                   slen,
1329                                                                   td_tag);
1330
1331                         AVF_DUMP_TX_DESC(txq, txd, tx_id);
1332                         txe->last_id = tx_last;
1333                         tx_id = txe->next_id;
1334                         txe = txn;
1335                         m_seg = m_seg->next;
1336                 } while (m_seg);
1337
1338                 /* The last packet data descriptor needs End Of Packet (EOP) */
1339                 td_cmd |= AVF_TX_DESC_CMD_EOP;
1340                 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
1341                 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
1342
1343                 if (txq->nb_used >= txq->rs_thresh) {
1344                         PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
1345                                    "%4u (port=%d queue=%d)",
1346                                    tx_last, txq->port_id, txq->queue_id);
1347
1348                         td_cmd |= AVF_TX_DESC_CMD_RS;
1349
1350                         /* Update txq RS bit counters */
1351                         txq->nb_used = 0;
1352                 }
1353
1354                 txd->cmd_type_offset_bsz |=
1355                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1356                                          AVF_TXD_QW1_CMD_SHIFT);
1357                 AVF_DUMP_TX_DESC(txq, txd, tx_id);
1358         }
1359
1360 end_of_tx:
1361         rte_wmb();
1362
1363         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1364                    txq->port_id, txq->queue_id, tx_id, nb_tx);
1365
1366         AVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
1367         txq->tx_tail = tx_id;
1368
1369         return nb_tx;
1370 }
1371
1372 static uint16_t
1373 avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
1374                   uint16_t nb_pkts)
1375 {
1376         uint16_t nb_tx = 0;
1377         struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
1378
1379         while (nb_pkts) {
1380                 uint16_t ret, num;
1381
1382                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
1383                 ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
1384                 nb_tx += ret;
1385                 nb_pkts -= ret;
1386                 if (ret < num)
1387                         break;
1388         }
1389
1390         return nb_tx;
1391 }
1392
1393 /* TX prep functions */
1394 uint16_t
1395 avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1396               uint16_t nb_pkts)
1397 {
1398         int i, ret;
1399         uint64_t ol_flags;
1400         struct rte_mbuf *m;
1401
1402         for (i = 0; i < nb_pkts; i++) {
1403                 m = tx_pkts[i];
1404                 ol_flags = m->ol_flags;
1405
1406                 /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
1407                 if (!(ol_flags & PKT_TX_TCP_SEG)) {
1408                         if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
1409                                 rte_errno = -EINVAL;
1410                                 return i;
1411                         }
1412                 } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
1413                            (m->tso_segsz > AVF_MAX_TSO_MSS)) {
1414                         /* MSS outside the range are considered malicious */
1415                         rte_errno = -EINVAL;
1416                         return i;
1417                 }
1418
1419                 if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
1420                         rte_errno = -ENOTSUP;
1421                         return i;
1422                 }
1423
1424 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1425                 ret = rte_validate_tx_offload(m);
1426                 if (ret != 0) {
1427                         rte_errno = ret;
1428                         return i;
1429                 }
1430 #endif
1431                 ret = rte_net_intel_cksum_prepare(m);
1432                 if (ret != 0) {
1433                         rte_errno = ret;
1434                         return i;
1435                 }
1436         }
1437
1438         return i;
1439 }
1440
1441 /* choose rx function*/
1442 void
1443 avf_set_rx_function(struct rte_eth_dev *dev)
1444 {
1445         struct avf_adapter *adapter =
1446                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1447         struct avf_rx_queue *rxq;
1448         int i;
1449
1450         if (adapter->rx_vec_allowed) {
1451                 if (dev->data->scattered_rx) {
1452                         PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
1453                                     " (port=%d).", dev->data->port_id);
1454                         dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
1455                 } else {
1456                         PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
1457                                     " (port=%d).", dev->data->port_id);
1458                         dev->rx_pkt_burst = avf_recv_pkts_vec;
1459                 }
1460                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1461                         rxq = dev->data->rx_queues[i];
1462                         if (!rxq)
1463                                 continue;
1464                         avf_rxq_vec_setup(rxq);
1465                 }
1466         } else if (dev->data->scattered_rx) {
1467                 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
1468                             dev->data->port_id);
1469                 dev->rx_pkt_burst = avf_recv_scattered_pkts;
1470         } else {
1471                 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
1472                             dev->data->port_id);
1473                 dev->rx_pkt_burst = avf_recv_pkts;
1474         }
1475 }
1476
1477 /* choose tx function*/
1478 void
1479 avf_set_tx_function(struct rte_eth_dev *dev)
1480 {
1481         struct avf_adapter *adapter =
1482                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1483         struct avf_tx_queue *txq;
1484         int i;
1485
1486         if (adapter->tx_vec_allowed) {
1487                 PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
1488                             dev->data->port_id);
1489                 dev->tx_pkt_burst = avf_xmit_pkts_vec;
1490                 dev->tx_pkt_prepare = NULL;
1491                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1492                         txq = dev->data->tx_queues[i];
1493                         if (!txq)
1494                                 continue;
1495                         avf_txq_vec_setup(txq);
1496                 }
1497         } else {
1498                 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
1499                             dev->data->port_id);
1500                 dev->tx_pkt_burst = avf_xmit_pkts;
1501                 dev->tx_pkt_prepare = avf_prep_pkts;
1502         }
1503 }
1504
1505 void
1506 avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1507                      struct rte_eth_rxq_info *qinfo)
1508 {
1509         struct avf_rx_queue *rxq;
1510
1511         rxq = dev->data->rx_queues[queue_id];
1512
1513         qinfo->mp = rxq->mp;
1514         qinfo->scattered_rx = dev->data->scattered_rx;
1515         qinfo->nb_desc = rxq->nb_rx_desc;
1516
1517         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1518         qinfo->conf.rx_drop_en = TRUE;
1519         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1520 }
1521
1522 void
1523 avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1524                      struct rte_eth_txq_info *qinfo)
1525 {
1526         struct avf_tx_queue *txq;
1527
1528         txq = dev->data->tx_queues[queue_id];
1529
1530         qinfo->nb_desc = txq->nb_tx_desc;
1531
1532         qinfo->conf.tx_free_thresh = txq->free_thresh;
1533         qinfo->conf.tx_rs_thresh = txq->rs_thresh;
1534         qinfo->conf.txq_flags = txq->txq_flags;
1535         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1536 }
1537
1538 /* Get the number of used descriptors of a rx queue */
1539 uint32_t
1540 avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
1541 {
1542 #define AVF_RXQ_SCAN_INTERVAL 4
1543         volatile union avf_rx_desc *rxdp;
1544         struct avf_rx_queue *rxq;
1545         uint16_t desc = 0;
1546
1547         rxq = dev->data->rx_queues[queue_id];
1548         rxdp = &rxq->rx_ring[rxq->rx_tail];
1549         while ((desc < rxq->nb_rx_desc) &&
1550                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1551                  AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
1552                (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
1553                 /* Check the DD bit of a rx descriptor of each 4 in a group,
1554                  * to avoid checking too frequently and downgrading performance
1555                  * too much.
1556                  */
1557                 desc += AVF_RXQ_SCAN_INTERVAL;
1558                 rxdp += AVF_RXQ_SCAN_INTERVAL;
1559                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1560                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1561                                         desc - rxq->nb_rx_desc]);
1562         }
1563
1564         return desc;
1565 }
1566
1567 int
1568 avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
1569 {
1570         struct avf_rx_queue *rxq = rx_queue;
1571         volatile uint64_t *status;
1572         uint64_t mask;
1573         uint32_t desc;
1574
1575         if (unlikely(offset >= rxq->nb_rx_desc))
1576                 return -EINVAL;
1577
1578         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1579                 return RTE_ETH_RX_DESC_UNAVAIL;
1580
1581         desc = rxq->rx_tail + offset;
1582         if (desc >= rxq->nb_rx_desc)
1583                 desc -= rxq->nb_rx_desc;
1584
1585         status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1586         mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
1587                 << AVF_RXD_QW1_STATUS_SHIFT);
1588         if (*status & mask)
1589                 return RTE_ETH_RX_DESC_DONE;
1590
1591         return RTE_ETH_RX_DESC_AVAIL;
1592 }
1593
1594 int
1595 avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
1596 {
1597         struct avf_tx_queue *txq = tx_queue;
1598         volatile uint64_t *status;
1599         uint64_t mask, expect;
1600         uint32_t desc;
1601
1602         if (unlikely(offset >= txq->nb_tx_desc))
1603                 return -EINVAL;
1604
1605         desc = txq->tx_tail + offset;
1606         /* go to next desc that has the RS bit */
1607         desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
1608                 txq->rs_thresh;
1609         if (desc >= txq->nb_tx_desc) {
1610                 desc -= txq->nb_tx_desc;
1611                 if (desc >= txq->nb_tx_desc)
1612                         desc -= txq->nb_tx_desc;
1613         }
1614
1615         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1616         mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
1617         expect = rte_cpu_to_le_64(
1618                  AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
1619         if ((*status & mask) == expect)
1620                 return RTE_ETH_TX_DESC_DONE;
1621
1622         return RTE_ETH_TX_DESC_FULL;
1623 }
1624
1625 uint16_t __attribute__((weak))
1626 avf_recv_pkts_vec(__rte_unused void *rx_queue,
1627                   __rte_unused struct rte_mbuf **rx_pkts,
1628                   __rte_unused uint16_t nb_pkts)
1629 {
1630         return 0;
1631 }
1632
1633 uint16_t __attribute__((weak))
1634 avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
1635                             __rte_unused struct rte_mbuf **rx_pkts,
1636                             __rte_unused uint16_t nb_pkts)
1637 {
1638         return 0;
1639 }
1640
1641 uint16_t __attribute__((weak))
1642 avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
1643                          __rte_unused struct rte_mbuf **tx_pkts,
1644                          __rte_unused uint16_t nb_pkts)
1645 {
1646         return 0;
1647 }
1648
1649 int __attribute__((weak))
1650 avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
1651 {
1652         return -1;
1653 }
1654
1655 int __attribute__((weak))
1656 avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
1657 {
1658         return -1;
1659 }