ethdev: query supported packet types
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   Copyright 2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <sys/queue.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <stdarg.h>
43 #include <unistd.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
49 #include <rte_log.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
52 #include <rte_pci.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
68 #include <rte_udp.h>
69 #include <rte_tcp.h>
70 #include <rte_sctp.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
73 #include <rte_ip.h>
74
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
82
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK (                  \
85                 PKT_TX_VLAN_PKT |                \
86                 PKT_TX_IP_CKSUM |                \
87                 PKT_TX_L4_MASK |                 \
88                 PKT_TX_TCP_SEG |                 \
89                 PKT_TX_OUTER_IP_CKSUM)
90
91 static inline struct rte_mbuf *
92 rte_rxmbuf_alloc(struct rte_mempool *mp)
93 {
94         struct rte_mbuf *m;
95
96         m = __rte_mbuf_raw_alloc(mp);
97         __rte_mbuf_sanity_check_raw(m, 0);
98         return m;
99 }
100
101
102 #if 1
103 #define RTE_PMD_USE_PREFETCH
104 #endif
105
106 #ifdef RTE_PMD_USE_PREFETCH
107 /*
108  * Prefetch a cache line into all cache levels.
109  */
110 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
111 #else
112 #define rte_ixgbe_prefetch(p)   do {} while (0)
113 #endif
114
115 /*********************************************************************
116  *
117  *  TX functions
118  *
119  **********************************************************************/
120
121 /*
122  * Check for descriptors with their DD bit set and free mbufs.
123  * Return the total number of buffers freed.
124  */
125 static inline int __attribute__((always_inline))
126 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
127 {
128         struct ixgbe_tx_entry *txep;
129         uint32_t status;
130         int i, nb_free = 0;
131         struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
132
133         /* check DD bit on threshold descriptor */
134         status = txq->tx_ring[txq->tx_next_dd].wb.status;
135         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
136                 return 0;
137
138         /*
139          * first buffer to free from S/W ring is at index
140          * tx_next_dd - (tx_rs_thresh-1)
141          */
142         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
143
144         for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145                 /* free buffers one at a time */
146                 m = __rte_pktmbuf_prefree_seg(txep->mbuf);
147                 txep->mbuf = NULL;
148
149                 if (unlikely(m == NULL))
150                         continue;
151
152                 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
153                     (nb_free > 0 && m->pool != free[0]->pool)) {
154                         rte_mempool_put_bulk(free[0]->pool,
155                                              (void **)free, nb_free);
156                         nb_free = 0;
157                 }
158
159                 free[nb_free++] = m;
160         }
161
162         if (nb_free > 0)
163                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
164
165         /* buffers were freed, update counters */
166         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
167         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
168         if (txq->tx_next_dd >= txq->nb_tx_desc)
169                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
170
171         return txq->tx_rs_thresh;
172 }
173
174 /* Populate 4 descriptors with data from 4 mbufs */
175 static inline void
176 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
177 {
178         uint64_t buf_dma_addr;
179         uint32_t pkt_len;
180         int i;
181
182         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
183                 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
184                 pkt_len = (*pkts)->data_len;
185
186                 /* write data to descriptor */
187                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
188
189                 txdp->read.cmd_type_len =
190                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
191
192                 txdp->read.olinfo_status =
193                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
194
195                 rte_prefetch0(&(*pkts)->pool);
196         }
197 }
198
199 /* Populate 1 descriptor with data from 1 mbuf */
200 static inline void
201 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
202 {
203         uint64_t buf_dma_addr;
204         uint32_t pkt_len;
205
206         buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
207         pkt_len = (*pkts)->data_len;
208
209         /* write data to descriptor */
210         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
211         txdp->read.cmd_type_len =
212                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
213         txdp->read.olinfo_status =
214                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
215         rte_prefetch0(&(*pkts)->pool);
216 }
217
218 /*
219  * Fill H/W descriptor ring with mbuf data.
220  * Copy mbuf pointers to the S/W ring.
221  */
222 static inline void
223 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
224                       uint16_t nb_pkts)
225 {
226         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
227         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
228         const int N_PER_LOOP = 4;
229         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
230         int mainpart, leftover;
231         int i, j;
232
233         /*
234          * Process most of the packets in chunks of N pkts.  Any
235          * leftover packets will get processed one at a time.
236          */
237         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
238         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
239         for (i = 0; i < mainpart; i += N_PER_LOOP) {
240                 /* Copy N mbuf pointers to the S/W ring */
241                 for (j = 0; j < N_PER_LOOP; ++j) {
242                         (txep + i + j)->mbuf = *(pkts + i + j);
243                 }
244                 tx4(txdp + i, pkts + i);
245         }
246
247         if (unlikely(leftover > 0)) {
248                 for (i = 0; i < leftover; ++i) {
249                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
250                         tx1(txdp + mainpart + i, pkts + mainpart + i);
251                 }
252         }
253 }
254
255 static inline uint16_t
256 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
257              uint16_t nb_pkts)
258 {
259         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
260         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
261         uint16_t n = 0;
262
263         /*
264          * Begin scanning the H/W ring for done descriptors when the
265          * number of available descriptors drops below tx_free_thresh.  For
266          * each done descriptor, free the associated buffer.
267          */
268         if (txq->nb_tx_free < txq->tx_free_thresh)
269                 ixgbe_tx_free_bufs(txq);
270
271         /* Only use descriptors that are available */
272         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
273         if (unlikely(nb_pkts == 0))
274                 return 0;
275
276         /* Use exactly nb_pkts descriptors */
277         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
278
279         /*
280          * At this point, we know there are enough descriptors in the
281          * ring to transmit all the packets.  This assumes that each
282          * mbuf contains a single segment, and that no new offloads
283          * are expected, which would require a new context descriptor.
284          */
285
286         /*
287          * See if we're going to wrap-around. If so, handle the top
288          * of the descriptor ring first, then do the bottom.  If not,
289          * the processing looks just like the "bottom" part anyway...
290          */
291         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
292                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
293                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
294
295                 /*
296                  * We know that the last descriptor in the ring will need to
297                  * have its RS bit set because tx_rs_thresh has to be
298                  * a divisor of the ring size
299                  */
300                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
301                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
302                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
303
304                 txq->tx_tail = 0;
305         }
306
307         /* Fill H/W descriptor ring with mbuf data */
308         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
309         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
310
311         /*
312          * Determine if RS bit should be set
313          * This is what we actually want:
314          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
315          * but instead of subtracting 1 and doing >=, we can just do
316          * greater than without subtracting.
317          */
318         if (txq->tx_tail > txq->tx_next_rs) {
319                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
320                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
321                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
322                                                 txq->tx_rs_thresh);
323                 if (txq->tx_next_rs >= txq->nb_tx_desc)
324                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
325         }
326
327         /*
328          * Check for wrap-around. This would only happen if we used
329          * up to the last descriptor in the ring, no more, no less.
330          */
331         if (txq->tx_tail >= txq->nb_tx_desc)
332                 txq->tx_tail = 0;
333
334         /* update tail pointer */
335         rte_wmb();
336         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
337
338         return nb_pkts;
339 }
340
341 uint16_t
342 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
343                        uint16_t nb_pkts)
344 {
345         uint16_t nb_tx;
346
347         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
348         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
349                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
350
351         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
352         nb_tx = 0;
353         while (nb_pkts) {
354                 uint16_t ret, n;
355                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
356                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
357                 nb_tx = (uint16_t)(nb_tx + ret);
358                 nb_pkts = (uint16_t)(nb_pkts - ret);
359                 if (ret < n)
360                         break;
361         }
362
363         return nb_tx;
364 }
365
366 static inline void
367 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
368                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
369                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
370 {
371         uint32_t type_tucmd_mlhl;
372         uint32_t mss_l4len_idx = 0;
373         uint32_t ctx_idx;
374         uint32_t vlan_macip_lens;
375         union ixgbe_tx_offload tx_offload_mask;
376         uint32_t seqnum_seed = 0;
377
378         ctx_idx = txq->ctx_curr;
379         tx_offload_mask.data[0] = 0;
380         tx_offload_mask.data[1] = 0;
381         type_tucmd_mlhl = 0;
382
383         /* Specify which HW CTX to upload. */
384         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
385
386         if (ol_flags & PKT_TX_VLAN_PKT) {
387                 tx_offload_mask.vlan_tci |= ~0;
388         }
389
390         /* check if TCP segmentation required for this packet */
391         if (ol_flags & PKT_TX_TCP_SEG) {
392                 /* implies IP cksum in IPv4 */
393                 if (ol_flags & PKT_TX_IP_CKSUM)
394                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
395                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
396                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
397                 else
398                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
399                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
400                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
401
402                 tx_offload_mask.l2_len |= ~0;
403                 tx_offload_mask.l3_len |= ~0;
404                 tx_offload_mask.l4_len |= ~0;
405                 tx_offload_mask.tso_segsz |= ~0;
406                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
407                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
408         } else { /* no TSO, check if hardware checksum is needed */
409                 if (ol_flags & PKT_TX_IP_CKSUM) {
410                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
411                         tx_offload_mask.l2_len |= ~0;
412                         tx_offload_mask.l3_len |= ~0;
413                 }
414
415                 switch (ol_flags & PKT_TX_L4_MASK) {
416                 case PKT_TX_UDP_CKSUM:
417                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
418                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
419                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
420                         tx_offload_mask.l2_len |= ~0;
421                         tx_offload_mask.l3_len |= ~0;
422                         break;
423                 case PKT_TX_TCP_CKSUM:
424                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
425                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
426                         mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
427                         tx_offload_mask.l2_len |= ~0;
428                         tx_offload_mask.l3_len |= ~0;
429                         break;
430                 case PKT_TX_SCTP_CKSUM:
431                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
432                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433                         mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
434                         tx_offload_mask.l2_len |= ~0;
435                         tx_offload_mask.l3_len |= ~0;
436                         break;
437                 default:
438                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
439                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
440                         break;
441                 }
442         }
443
444         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
445                 tx_offload_mask.outer_l2_len |= ~0;
446                 tx_offload_mask.outer_l3_len |= ~0;
447                 tx_offload_mask.l2_len |= ~0;
448                 seqnum_seed |= tx_offload.outer_l3_len
449                                << IXGBE_ADVTXD_OUTER_IPLEN;
450                 seqnum_seed |= tx_offload.l2_len
451                                << IXGBE_ADVTXD_TUNNEL_LEN;
452         }
453
454         txq->ctx_cache[ctx_idx].flags = ol_flags;
455         txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
456                 tx_offload_mask.data[0] & tx_offload.data[0];
457         txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
458                 tx_offload_mask.data[1] & tx_offload.data[1];
459         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
460
461         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
462         vlan_macip_lens = tx_offload.l3_len;
463         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
464                 vlan_macip_lens |= (tx_offload.outer_l2_len <<
465                                     IXGBE_ADVTXD_MACLEN_SHIFT);
466         else
467                 vlan_macip_lens |= (tx_offload.l2_len <<
468                                     IXGBE_ADVTXD_MACLEN_SHIFT);
469         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
470         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
471         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
472         ctx_txd->seqnum_seed     = seqnum_seed;
473 }
474
475 /*
476  * Check which hardware context can be used. Use the existing match
477  * or create a new context descriptor.
478  */
479 static inline uint32_t
480 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
481                 union ixgbe_tx_offload tx_offload)
482 {
483         /* If match with the current used context */
484         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
485                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
486                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
487                  & tx_offload.data[0])) &&
488                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
489                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
490                  & tx_offload.data[1])))) {
491                         return txq->ctx_curr;
492         }
493
494         /* What if match with the next context  */
495         txq->ctx_curr ^= 1;
496         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
497                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
498                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
499                  & tx_offload.data[0])) &&
500                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
501                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
502                  & tx_offload.data[1])))) {
503                         return txq->ctx_curr;
504         }
505
506         /* Mismatch, use the previous context */
507         return IXGBE_CTX_NUM;
508 }
509
510 static inline uint32_t
511 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
512 {
513         uint32_t tmp = 0;
514         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
515                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
516         if (ol_flags & PKT_TX_IP_CKSUM)
517                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
518         if (ol_flags & PKT_TX_TCP_SEG)
519                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
520         return tmp;
521 }
522
523 static inline uint32_t
524 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
525 {
526         uint32_t cmdtype = 0;
527         if (ol_flags & PKT_TX_VLAN_PKT)
528                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
529         if (ol_flags & PKT_TX_TCP_SEG)
530                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
531         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
532                 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
533         return cmdtype;
534 }
535
536 /* Default RS bit threshold values */
537 #ifndef DEFAULT_TX_RS_THRESH
538 #define DEFAULT_TX_RS_THRESH   32
539 #endif
540 #ifndef DEFAULT_TX_FREE_THRESH
541 #define DEFAULT_TX_FREE_THRESH 32
542 #endif
543
544 /* Reset transmit descriptors after they have been used */
545 static inline int
546 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
547 {
548         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
549         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
550         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
551         uint16_t nb_tx_desc = txq->nb_tx_desc;
552         uint16_t desc_to_clean_to;
553         uint16_t nb_tx_to_clean;
554         uint32_t status;
555
556         /* Determine the last descriptor needing to be cleaned */
557         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
558         if (desc_to_clean_to >= nb_tx_desc)
559                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
560
561         /* Check to make sure the last descriptor to clean is done */
562         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
563         status = txr[desc_to_clean_to].wb.status;
564         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
565         {
566                 PMD_TX_FREE_LOG(DEBUG,
567                                 "TX descriptor %4u is not done"
568                                 "(port=%d queue=%d)",
569                                 desc_to_clean_to,
570                                 txq->port_id, txq->queue_id);
571                 /* Failed to clean any descriptors, better luck next time */
572                 return -(1);
573         }
574
575         /* Figure out how many descriptors will be cleaned */
576         if (last_desc_cleaned > desc_to_clean_to)
577                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
578                                                         desc_to_clean_to);
579         else
580                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
581                                                 last_desc_cleaned);
582
583         PMD_TX_FREE_LOG(DEBUG,
584                         "Cleaning %4u TX descriptors: %4u to %4u "
585                         "(port=%d queue=%d)",
586                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
587                         txq->port_id, txq->queue_id);
588
589         /*
590          * The last descriptor to clean is done, so that means all the
591          * descriptors from the last descriptor that was cleaned
592          * up to the last descriptor with the RS bit set
593          * are done. Only reset the threshold descriptor.
594          */
595         txr[desc_to_clean_to].wb.status = 0;
596
597         /* Update the txq to reflect the last descriptor that was cleaned */
598         txq->last_desc_cleaned = desc_to_clean_to;
599         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
600
601         /* No Error */
602         return 0;
603 }
604
605 uint16_t
606 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
607                 uint16_t nb_pkts)
608 {
609         struct ixgbe_tx_queue *txq;
610         struct ixgbe_tx_entry *sw_ring;
611         struct ixgbe_tx_entry *txe, *txn;
612         volatile union ixgbe_adv_tx_desc *txr;
613         volatile union ixgbe_adv_tx_desc *txd, *txp;
614         struct rte_mbuf     *tx_pkt;
615         struct rte_mbuf     *m_seg;
616         uint64_t buf_dma_addr;
617         uint32_t olinfo_status;
618         uint32_t cmd_type_len;
619         uint32_t pkt_len;
620         uint16_t slen;
621         uint64_t ol_flags;
622         uint16_t tx_id;
623         uint16_t tx_last;
624         uint16_t nb_tx;
625         uint16_t nb_used;
626         uint64_t tx_ol_req;
627         uint32_t ctx = 0;
628         uint32_t new_ctx;
629         union ixgbe_tx_offload tx_offload;
630
631         tx_offload.data[0] = 0;
632         tx_offload.data[1] = 0;
633         txq = tx_queue;
634         sw_ring = txq->sw_ring;
635         txr     = txq->tx_ring;
636         tx_id   = txq->tx_tail;
637         txe = &sw_ring[tx_id];
638         txp = NULL;
639
640         /* Determine if the descriptor ring needs to be cleaned. */
641         if (txq->nb_tx_free < txq->tx_free_thresh)
642                 ixgbe_xmit_cleanup(txq);
643
644         rte_prefetch0(&txe->mbuf->pool);
645
646         /* TX loop */
647         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
648                 new_ctx = 0;
649                 tx_pkt = *tx_pkts++;
650                 pkt_len = tx_pkt->pkt_len;
651
652                 /*
653                  * Determine how many (if any) context descriptors
654                  * are needed for offload functionality.
655                  */
656                 ol_flags = tx_pkt->ol_flags;
657
658                 /* If hardware offload required */
659                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
660                 if (tx_ol_req) {
661                         tx_offload.l2_len = tx_pkt->l2_len;
662                         tx_offload.l3_len = tx_pkt->l3_len;
663                         tx_offload.l4_len = tx_pkt->l4_len;
664                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
665                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
666                         tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
667                         tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
668
669                         /* If new context need be built or reuse the exist ctx. */
670                         ctx = what_advctx_update(txq, tx_ol_req,
671                                 tx_offload);
672                         /* Only allocate context descriptor if required*/
673                         new_ctx = (ctx == IXGBE_CTX_NUM);
674                         ctx = txq->ctx_curr;
675                 }
676
677                 /*
678                  * Keep track of how many descriptors are used this loop
679                  * This will always be the number of segments + the number of
680                  * Context descriptors required to transmit the packet
681                  */
682                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
683
684                 if (txp != NULL &&
685                                 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
686                         /* set RS on the previous packet in the burst */
687                         txp->read.cmd_type_len |=
688                                 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
689
690                 /*
691                  * The number of descriptors that must be allocated for a
692                  * packet is the number of segments of that packet, plus 1
693                  * Context Descriptor for the hardware offload, if any.
694                  * Determine the last TX descriptor to allocate in the TX ring
695                  * for the packet, starting from the current position (tx_id)
696                  * in the ring.
697                  */
698                 tx_last = (uint16_t) (tx_id + nb_used - 1);
699
700                 /* Circular ring */
701                 if (tx_last >= txq->nb_tx_desc)
702                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
703
704                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
705                            " tx_first=%u tx_last=%u",
706                            (unsigned) txq->port_id,
707                            (unsigned) txq->queue_id,
708                            (unsigned) pkt_len,
709                            (unsigned) tx_id,
710                            (unsigned) tx_last);
711
712                 /*
713                  * Make sure there are enough TX descriptors available to
714                  * transmit the entire packet.
715                  * nb_used better be less than or equal to txq->tx_rs_thresh
716                  */
717                 if (nb_used > txq->nb_tx_free) {
718                         PMD_TX_FREE_LOG(DEBUG,
719                                         "Not enough free TX descriptors "
720                                         "nb_used=%4u nb_free=%4u "
721                                         "(port=%d queue=%d)",
722                                         nb_used, txq->nb_tx_free,
723                                         txq->port_id, txq->queue_id);
724
725                         if (ixgbe_xmit_cleanup(txq) != 0) {
726                                 /* Could not clean any descriptors */
727                                 if (nb_tx == 0)
728                                         return 0;
729                                 goto end_of_tx;
730                         }
731
732                         /* nb_used better be <= txq->tx_rs_thresh */
733                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
734                                 PMD_TX_FREE_LOG(DEBUG,
735                                         "The number of descriptors needed to "
736                                         "transmit the packet exceeds the "
737                                         "RS bit threshold. This will impact "
738                                         "performance."
739                                         "nb_used=%4u nb_free=%4u "
740                                         "tx_rs_thresh=%4u. "
741                                         "(port=%d queue=%d)",
742                                         nb_used, txq->nb_tx_free,
743                                         txq->tx_rs_thresh,
744                                         txq->port_id, txq->queue_id);
745                                 /*
746                                  * Loop here until there are enough TX
747                                  * descriptors or until the ring cannot be
748                                  * cleaned.
749                                  */
750                                 while (nb_used > txq->nb_tx_free) {
751                                         if (ixgbe_xmit_cleanup(txq) != 0) {
752                                                 /*
753                                                  * Could not clean any
754                                                  * descriptors
755                                                  */
756                                                 if (nb_tx == 0)
757                                                         return 0;
758                                                 goto end_of_tx;
759                                         }
760                                 }
761                         }
762                 }
763
764                 /*
765                  * By now there are enough free TX descriptors to transmit
766                  * the packet.
767                  */
768
769                 /*
770                  * Set common flags of all TX Data Descriptors.
771                  *
772                  * The following bits must be set in all Data Descriptors:
773                  *   - IXGBE_ADVTXD_DTYP_DATA
774                  *   - IXGBE_ADVTXD_DCMD_DEXT
775                  *
776                  * The following bits must be set in the first Data Descriptor
777                  * and are ignored in the other ones:
778                  *   - IXGBE_ADVTXD_DCMD_IFCS
779                  *   - IXGBE_ADVTXD_MAC_1588
780                  *   - IXGBE_ADVTXD_DCMD_VLE
781                  *
782                  * The following bits must only be set in the last Data
783                  * Descriptor:
784                  *   - IXGBE_TXD_CMD_EOP
785                  *
786                  * The following bits can be set in any Data Descriptor, but
787                  * are only set in the last Data Descriptor:
788                  *   - IXGBE_TXD_CMD_RS
789                  */
790                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
791                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
792
793 #ifdef RTE_LIBRTE_IEEE1588
794                 if (ol_flags & PKT_TX_IEEE1588_TMST)
795                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
796 #endif
797
798                 olinfo_status = 0;
799                 if (tx_ol_req) {
800
801                         if (ol_flags & PKT_TX_TCP_SEG) {
802                                 /* when TSO is on, paylen in descriptor is the
803                                  * not the packet len but the tcp payload len */
804                                 pkt_len -= (tx_offload.l2_len +
805                                         tx_offload.l3_len + tx_offload.l4_len);
806                         }
807
808                         /*
809                          * Setup the TX Advanced Context Descriptor if required
810                          */
811                         if (new_ctx) {
812                                 volatile struct ixgbe_adv_tx_context_desc *
813                                     ctx_txd;
814
815                                 ctx_txd = (volatile struct
816                                     ixgbe_adv_tx_context_desc *)
817                                     &txr[tx_id];
818
819                                 txn = &sw_ring[txe->next_id];
820                                 rte_prefetch0(&txn->mbuf->pool);
821
822                                 if (txe->mbuf != NULL) {
823                                         rte_pktmbuf_free_seg(txe->mbuf);
824                                         txe->mbuf = NULL;
825                                 }
826
827                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
828                                         tx_offload);
829
830                                 txe->last_id = tx_last;
831                                 tx_id = txe->next_id;
832                                 txe = txn;
833                         }
834
835                         /*
836                          * Setup the TX Advanced Data Descriptor,
837                          * This path will go through
838                          * whatever new/reuse the context descriptor
839                          */
840                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
841                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
842                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
843                 }
844
845                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
846
847                 m_seg = tx_pkt;
848                 do {
849                         txd = &txr[tx_id];
850                         txn = &sw_ring[txe->next_id];
851                         rte_prefetch0(&txn->mbuf->pool);
852
853                         if (txe->mbuf != NULL)
854                                 rte_pktmbuf_free_seg(txe->mbuf);
855                         txe->mbuf = m_seg;
856
857                         /*
858                          * Set up Transmit Data Descriptor.
859                          */
860                         slen = m_seg->data_len;
861                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
862                         txd->read.buffer_addr =
863                                 rte_cpu_to_le_64(buf_dma_addr);
864                         txd->read.cmd_type_len =
865                                 rte_cpu_to_le_32(cmd_type_len | slen);
866                         txd->read.olinfo_status =
867                                 rte_cpu_to_le_32(olinfo_status);
868                         txe->last_id = tx_last;
869                         tx_id = txe->next_id;
870                         txe = txn;
871                         m_seg = m_seg->next;
872                 } while (m_seg != NULL);
873
874                 /*
875                  * The last packet data descriptor needs End Of Packet (EOP)
876                  */
877                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
878                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
879                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
880
881                 /* Set RS bit only on threshold packets' last descriptor */
882                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
883                         PMD_TX_FREE_LOG(DEBUG,
884                                         "Setting RS bit on TXD id="
885                                         "%4u (port=%d queue=%d)",
886                                         tx_last, txq->port_id, txq->queue_id);
887
888                         cmd_type_len |= IXGBE_TXD_CMD_RS;
889
890                         /* Update txq RS bit counters */
891                         txq->nb_tx_used = 0;
892                         txp = NULL;
893                 } else
894                         txp = txd;
895
896                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
897         }
898
899 end_of_tx:
900         /* set RS on last packet in the burst */
901         if (txp != NULL)
902                 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
903
904         rte_wmb();
905
906         /*
907          * Set the Transmit Descriptor Tail (TDT)
908          */
909         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
910                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
911                    (unsigned) tx_id, (unsigned) nb_tx);
912         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
913         txq->tx_tail = tx_id;
914
915         return nb_tx;
916 }
917
918 /*********************************************************************
919  *
920  *  RX functions
921  *
922  **********************************************************************/
923 #define IXGBE_PACKET_TYPE_IPV4              0X01
924 #define IXGBE_PACKET_TYPE_IPV4_TCP          0X11
925 #define IXGBE_PACKET_TYPE_IPV4_UDP          0X21
926 #define IXGBE_PACKET_TYPE_IPV4_SCTP         0X41
927 #define IXGBE_PACKET_TYPE_IPV4_EXT          0X03
928 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP     0X43
929 #define IXGBE_PACKET_TYPE_IPV6              0X04
930 #define IXGBE_PACKET_TYPE_IPV6_TCP          0X14
931 #define IXGBE_PACKET_TYPE_IPV6_UDP          0X24
932 #define IXGBE_PACKET_TYPE_IPV6_EXT          0X0C
933 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP      0X1C
934 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP      0X2C
935 #define IXGBE_PACKET_TYPE_IPV4_IPV6         0X05
936 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP     0X15
937 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP     0X25
938 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
939 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
940 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
941 #define IXGBE_PACKET_TYPE_MAX               0X80
942 #define IXGBE_PACKET_TYPE_MASK              0X7F
943 #define IXGBE_PACKET_TYPE_SHIFT             0X04
944
945 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
946 static inline uint32_t
947 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
948 {
949         static const uint32_t
950                 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
951                 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
952                         RTE_PTYPE_L3_IPV4,
953                 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
954                         RTE_PTYPE_L3_IPV4_EXT,
955                 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
956                         RTE_PTYPE_L3_IPV6,
957                 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
958                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
959                         RTE_PTYPE_INNER_L3_IPV6,
960                 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
961                         RTE_PTYPE_L3_IPV6_EXT,
962                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
963                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
964                         RTE_PTYPE_INNER_L3_IPV6_EXT,
965                 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
966                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
967                 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
968                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
969                 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
970                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
971                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
972                 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
973                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
974                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
975                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
976                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
977                 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
978                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
979                 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
980                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
981                 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
982                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
983                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
984                 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
985                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
986                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
987                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
988                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
989                 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
990                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
991                 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
992                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
993         };
994         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
995                 return RTE_PTYPE_UNKNOWN;
996
997         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
998                                 IXGBE_PACKET_TYPE_MASK;
999
1000         return ptype_table[pkt_info];
1001 }
1002
1003 static inline uint64_t
1004 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1005 {
1006         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1007                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1008                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1009                 PKT_RX_RSS_HASH, 0, 0, 0,
1010                 0, 0, 0,  PKT_RX_FDIR,
1011         };
1012 #ifdef RTE_LIBRTE_IEEE1588
1013         static uint64_t ip_pkt_etqf_map[8] = {
1014                 0, 0, 0, PKT_RX_IEEE1588_PTP,
1015                 0, 0, 0, 0,
1016         };
1017
1018         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1019                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1020                                 ip_rss_types_map[pkt_info & 0XF];
1021         else
1022                 return ip_rss_types_map[pkt_info & 0XF];
1023 #else
1024         return ip_rss_types_map[pkt_info & 0XF];
1025 #endif
1026 }
1027
1028 static inline uint64_t
1029 rx_desc_status_to_pkt_flags(uint32_t rx_status)
1030 {
1031         uint64_t pkt_flags;
1032
1033         /*
1034          * Check if VLAN present only.
1035          * Do not check whether L3/L4 rx checksum done by NIC or not,
1036          * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1037          */
1038         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
1039
1040 #ifdef RTE_LIBRTE_IEEE1588
1041         if (rx_status & IXGBE_RXD_STAT_TMST)
1042                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1043 #endif
1044         return pkt_flags;
1045 }
1046
1047 static inline uint64_t
1048 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1049 {
1050         uint64_t pkt_flags;
1051
1052         /*
1053          * Bit 31: IPE, IPv4 checksum error
1054          * Bit 30: L4I, L4I integrity error
1055          */
1056         static uint64_t error_to_pkt_flags_map[4] = {
1057                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
1058                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1059         };
1060         pkt_flags = error_to_pkt_flags_map[(rx_status >>
1061                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1062
1063         if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1064             (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1065                 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1066         }
1067
1068         return pkt_flags;
1069 }
1070
1071 /*
1072  * LOOK_AHEAD defines how many desc statuses to check beyond the
1073  * current descriptor.
1074  * It must be a pound define for optimal performance.
1075  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1076  * function only works with LOOK_AHEAD=8.
1077  */
1078 #define LOOK_AHEAD 8
1079 #if (LOOK_AHEAD != 8)
1080 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1081 #endif
1082 static inline int
1083 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1084 {
1085         volatile union ixgbe_adv_rx_desc *rxdp;
1086         struct ixgbe_rx_entry *rxep;
1087         struct rte_mbuf *mb;
1088         uint16_t pkt_len;
1089         uint64_t pkt_flags;
1090         int nb_dd;
1091         uint32_t s[LOOK_AHEAD];
1092         uint16_t pkt_info[LOOK_AHEAD];
1093         int i, j, nb_rx = 0;
1094         uint32_t status;
1095
1096         /* get references to current descriptor and S/W ring entry */
1097         rxdp = &rxq->rx_ring[rxq->rx_tail];
1098         rxep = &rxq->sw_ring[rxq->rx_tail];
1099
1100         status = rxdp->wb.upper.status_error;
1101         /* check to make sure there is at least 1 packet to receive */
1102         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1103                 return 0;
1104
1105         /*
1106          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1107          * reference packets that are ready to be received.
1108          */
1109         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1110              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1111         {
1112                 /* Read desc statuses backwards to avoid race condition */
1113                 for (j = LOOK_AHEAD-1; j >= 0; --j)
1114                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1115
1116                 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1117                         pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1118                                                 hs_rss.pkt_info;
1119
1120                 /* Compute how many status bits were set */
1121                 nb_dd = 0;
1122                 for (j = 0; j < LOOK_AHEAD; ++j)
1123                         nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1124
1125                 nb_rx += nb_dd;
1126
1127                 /* Translate descriptor info to mbuf format */
1128                 for (j = 0; j < nb_dd; ++j) {
1129                         mb = rxep[j].mbuf;
1130                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1131                                   rxq->crc_len;
1132                         mb->data_len = pkt_len;
1133                         mb->pkt_len = pkt_len;
1134                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1135
1136                         /* convert descriptor fields to rte mbuf flags */
1137                         pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1138                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1139                         pkt_flags |=
1140                                 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1141                         mb->ol_flags = pkt_flags;
1142                         mb->packet_type =
1143                                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1144
1145                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1146                                 mb->hash.rss = rte_le_to_cpu_32(
1147                                     rxdp[j].wb.lower.hi_dword.rss);
1148                         else if (pkt_flags & PKT_RX_FDIR) {
1149                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1150                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1151                                     IXGBE_ATR_HASH_MASK;
1152                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1153                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1154                         }
1155                 }
1156
1157                 /* Move mbuf pointers from the S/W ring to the stage */
1158                 for (j = 0; j < LOOK_AHEAD; ++j) {
1159                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1160                 }
1161
1162                 /* stop if all requested packets could not be received */
1163                 if (nb_dd != LOOK_AHEAD)
1164                         break;
1165         }
1166
1167         /* clear software ring entries so we can cleanup correctly */
1168         for (i = 0; i < nb_rx; ++i) {
1169                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1170         }
1171
1172
1173         return nb_rx;
1174 }
1175
1176 static inline int
1177 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1178 {
1179         volatile union ixgbe_adv_rx_desc *rxdp;
1180         struct ixgbe_rx_entry *rxep;
1181         struct rte_mbuf *mb;
1182         uint16_t alloc_idx;
1183         __le64 dma_addr;
1184         int diag, i;
1185
1186         /* allocate buffers in bulk directly into the S/W ring */
1187         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1188         rxep = &rxq->sw_ring[alloc_idx];
1189         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1190                                     rxq->rx_free_thresh);
1191         if (unlikely(diag != 0))
1192                 return -ENOMEM;
1193
1194         rxdp = &rxq->rx_ring[alloc_idx];
1195         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1196                 /* populate the static rte mbuf fields */
1197                 mb = rxep[i].mbuf;
1198                 if (reset_mbuf) {
1199                         mb->next = NULL;
1200                         mb->nb_segs = 1;
1201                         mb->port = rxq->port_id;
1202                 }
1203
1204                 rte_mbuf_refcnt_set(mb, 1);
1205                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1206
1207                 /* populate the descriptors */
1208                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
1209                 rxdp[i].read.hdr_addr = 0;
1210                 rxdp[i].read.pkt_addr = dma_addr;
1211         }
1212
1213         /* update state of internal queue structure */
1214         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1215         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1216                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1217
1218         /* no errors */
1219         return 0;
1220 }
1221
1222 static inline uint16_t
1223 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1224                          uint16_t nb_pkts)
1225 {
1226         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1227         int i;
1228
1229         /* how many packets are ready to return? */
1230         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1231
1232         /* copy mbuf pointers to the application's packet list */
1233         for (i = 0; i < nb_pkts; ++i)
1234                 rx_pkts[i] = stage[i];
1235
1236         /* update internal queue state */
1237         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1238         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1239
1240         return nb_pkts;
1241 }
1242
1243 static inline uint16_t
1244 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1245              uint16_t nb_pkts)
1246 {
1247         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1248         uint16_t nb_rx = 0;
1249
1250         /* Any previously recv'd pkts will be returned from the Rx stage */
1251         if (rxq->rx_nb_avail)
1252                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1253
1254         /* Scan the H/W ring for packets to receive */
1255         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1256
1257         /* update internal queue state */
1258         rxq->rx_next_avail = 0;
1259         rxq->rx_nb_avail = nb_rx;
1260         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1261
1262         /* if required, allocate new buffers to replenish descriptors */
1263         if (rxq->rx_tail > rxq->rx_free_trigger) {
1264                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1265
1266                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1267                         int i, j;
1268                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1269                                    "queue_id=%u", (unsigned) rxq->port_id,
1270                                    (unsigned) rxq->queue_id);
1271
1272                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1273                                 rxq->rx_free_thresh;
1274
1275                         /*
1276                          * Need to rewind any previous receives if we cannot
1277                          * allocate new buffers to replenish the old ones.
1278                          */
1279                         rxq->rx_nb_avail = 0;
1280                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1281                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1282                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1283
1284                         return 0;
1285                 }
1286
1287                 /* update tail pointer */
1288                 rte_wmb();
1289                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1290         }
1291
1292         if (rxq->rx_tail >= rxq->nb_rx_desc)
1293                 rxq->rx_tail = 0;
1294
1295         /* received any packets this loop? */
1296         if (rxq->rx_nb_avail)
1297                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1298
1299         return 0;
1300 }
1301
1302 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1303 uint16_t
1304 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1305                            uint16_t nb_pkts)
1306 {
1307         uint16_t nb_rx;
1308
1309         if (unlikely(nb_pkts == 0))
1310                 return 0;
1311
1312         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1313                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1314
1315         /* request is relatively large, chunk it up */
1316         nb_rx = 0;
1317         while (nb_pkts) {
1318                 uint16_t ret, n;
1319                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1320                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1321                 nb_rx = (uint16_t)(nb_rx + ret);
1322                 nb_pkts = (uint16_t)(nb_pkts - ret);
1323                 if (ret < n)
1324                         break;
1325         }
1326
1327         return nb_rx;
1328 }
1329
1330 uint16_t
1331 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1332                 uint16_t nb_pkts)
1333 {
1334         struct ixgbe_rx_queue *rxq;
1335         volatile union ixgbe_adv_rx_desc *rx_ring;
1336         volatile union ixgbe_adv_rx_desc *rxdp;
1337         struct ixgbe_rx_entry *sw_ring;
1338         struct ixgbe_rx_entry *rxe;
1339         struct rte_mbuf *rxm;
1340         struct rte_mbuf *nmb;
1341         union ixgbe_adv_rx_desc rxd;
1342         uint64_t dma_addr;
1343         uint32_t staterr;
1344         uint32_t pkt_info;
1345         uint16_t pkt_len;
1346         uint16_t rx_id;
1347         uint16_t nb_rx;
1348         uint16_t nb_hold;
1349         uint64_t pkt_flags;
1350
1351         nb_rx = 0;
1352         nb_hold = 0;
1353         rxq = rx_queue;
1354         rx_id = rxq->rx_tail;
1355         rx_ring = rxq->rx_ring;
1356         sw_ring = rxq->sw_ring;
1357         while (nb_rx < nb_pkts) {
1358                 /*
1359                  * The order of operations here is important as the DD status
1360                  * bit must not be read after any other descriptor fields.
1361                  * rx_ring and rxdp are pointing to volatile data so the order
1362                  * of accesses cannot be reordered by the compiler. If they were
1363                  * not volatile, they could be reordered which could lead to
1364                  * using invalid descriptor fields when read from rxd.
1365                  */
1366                 rxdp = &rx_ring[rx_id];
1367                 staterr = rxdp->wb.upper.status_error;
1368                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1369                         break;
1370                 rxd = *rxdp;
1371
1372                 /*
1373                  * End of packet.
1374                  *
1375                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1376                  * is likely to be invalid and to be dropped by the various
1377                  * validation checks performed by the network stack.
1378                  *
1379                  * Allocate a new mbuf to replenish the RX ring descriptor.
1380                  * If the allocation fails:
1381                  *    - arrange for that RX descriptor to be the first one
1382                  *      being parsed the next time the receive function is
1383                  *      invoked [on the same queue].
1384                  *
1385                  *    - Stop parsing the RX ring and return immediately.
1386                  *
1387                  * This policy do not drop the packet received in the RX
1388                  * descriptor for which the allocation of a new mbuf failed.
1389                  * Thus, it allows that packet to be later retrieved if
1390                  * mbuf have been freed in the mean time.
1391                  * As a side effect, holding RX descriptors instead of
1392                  * systematically giving them back to the NIC may lead to
1393                  * RX ring exhaustion situations.
1394                  * However, the NIC can gracefully prevent such situations
1395                  * to happen by sending specific "back-pressure" flow control
1396                  * frames to its peer(s).
1397                  */
1398                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1399                            "ext_err_stat=0x%08x pkt_len=%u",
1400                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1401                            (unsigned) rx_id, (unsigned) staterr,
1402                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1403
1404                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1405                 if (nmb == NULL) {
1406                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1407                                    "queue_id=%u", (unsigned) rxq->port_id,
1408                                    (unsigned) rxq->queue_id);
1409                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1410                         break;
1411                 }
1412
1413                 nb_hold++;
1414                 rxe = &sw_ring[rx_id];
1415                 rx_id++;
1416                 if (rx_id == rxq->nb_rx_desc)
1417                         rx_id = 0;
1418
1419                 /* Prefetch next mbuf while processing current one. */
1420                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1421
1422                 /*
1423                  * When next RX descriptor is on a cache-line boundary,
1424                  * prefetch the next 4 RX descriptors and the next 8 pointers
1425                  * to mbufs.
1426                  */
1427                 if ((rx_id & 0x3) == 0) {
1428                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1429                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1430                 }
1431
1432                 rxm = rxe->mbuf;
1433                 rxe->mbuf = nmb;
1434                 dma_addr =
1435                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1436                 rxdp->read.hdr_addr = 0;
1437                 rxdp->read.pkt_addr = dma_addr;
1438
1439                 /*
1440                  * Initialize the returned mbuf.
1441                  * 1) setup generic mbuf fields:
1442                  *    - number of segments,
1443                  *    - next segment,
1444                  *    - packet length,
1445                  *    - RX port identifier.
1446                  * 2) integrate hardware offload data, if any:
1447                  *    - RSS flag & hash,
1448                  *    - IP checksum flag,
1449                  *    - VLAN TCI, if any,
1450                  *    - error flags.
1451                  */
1452                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1453                                       rxq->crc_len);
1454                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1455                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1456                 rxm->nb_segs = 1;
1457                 rxm->next = NULL;
1458                 rxm->pkt_len = pkt_len;
1459                 rxm->data_len = pkt_len;
1460                 rxm->port = rxq->port_id;
1461
1462                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1463                                                                 pkt_info);
1464                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1465                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1466
1467                 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1468                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1469                 pkt_flags = pkt_flags |
1470                         ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1471                 rxm->ol_flags = pkt_flags;
1472                 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1473
1474                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1475                         rxm->hash.rss = rte_le_to_cpu_32(
1476                                                 rxd.wb.lower.hi_dword.rss);
1477                 else if (pkt_flags & PKT_RX_FDIR) {
1478                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1479                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1480                                         IXGBE_ATR_HASH_MASK;
1481                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1482                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1483                 }
1484                 /*
1485                  * Store the mbuf address into the next entry of the array
1486                  * of returned packets.
1487                  */
1488                 rx_pkts[nb_rx++] = rxm;
1489         }
1490         rxq->rx_tail = rx_id;
1491
1492         /*
1493          * If the number of free RX descriptors is greater than the RX free
1494          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1495          * register.
1496          * Update the RDT with the value of the last processed RX descriptor
1497          * minus 1, to guarantee that the RDT register is never equal to the
1498          * RDH register, which creates a "full" ring situtation from the
1499          * hardware point of view...
1500          */
1501         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1502         if (nb_hold > rxq->rx_free_thresh) {
1503                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1504                            "nb_hold=%u nb_rx=%u",
1505                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1506                            (unsigned) rx_id, (unsigned) nb_hold,
1507                            (unsigned) nb_rx);
1508                 rx_id = (uint16_t) ((rx_id == 0) ?
1509                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1510                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1511                 nb_hold = 0;
1512         }
1513         rxq->nb_rx_hold = nb_hold;
1514         return nb_rx;
1515 }
1516
1517 /**
1518  * Detect an RSC descriptor.
1519  */
1520 static inline uint32_t
1521 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1522 {
1523         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1524                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1525 }
1526
1527 /**
1528  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1529  *
1530  * Fill the following info in the HEAD buffer of the Rx cluster:
1531  *    - RX port identifier
1532  *    - hardware offload data, if any:
1533  *      - RSS flag & hash
1534  *      - IP checksum flag
1535  *      - VLAN TCI, if any
1536  *      - error flags
1537  * @head HEAD of the packet cluster
1538  * @desc HW descriptor to get data from
1539  * @port_id Port ID of the Rx queue
1540  */
1541 static inline void
1542 ixgbe_fill_cluster_head_buf(
1543         struct rte_mbuf *head,
1544         union ixgbe_adv_rx_desc *desc,
1545         uint8_t port_id,
1546         uint32_t staterr)
1547 {
1548         uint16_t pkt_info;
1549         uint64_t pkt_flags;
1550
1551         head->port = port_id;
1552
1553         /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1554          * set in the pkt_flags field.
1555          */
1556         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1557         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1558         pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1559         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1560         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1561         head->ol_flags = pkt_flags;
1562         head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1563
1564         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1565                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1566         else if (pkt_flags & PKT_RX_FDIR) {
1567                 head->hash.fdir.hash =
1568                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1569                                                           & IXGBE_ATR_HASH_MASK;
1570                 head->hash.fdir.id =
1571                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1572         }
1573 }
1574
1575 /**
1576  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1577  *
1578  * @rx_queue Rx queue handle
1579  * @rx_pkts table of received packets
1580  * @nb_pkts size of rx_pkts table
1581  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1582  *
1583  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1584  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1585  *
1586  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1587  * 1) When non-EOP RSC completion arrives:
1588  *    a) Update the HEAD of the current RSC aggregation cluster with the new
1589  *       segment's data length.
1590  *    b) Set the "next" pointer of the current segment to point to the segment
1591  *       at the NEXTP index.
1592  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1593  *       in the sw_rsc_ring.
1594  * 2) When EOP arrives we just update the cluster's total length and offload
1595  *    flags and deliver the cluster up to the upper layers. In our case - put it
1596  *    in the rx_pkts table.
1597  *
1598  * Returns the number of received packets/clusters (according to the "bulk
1599  * receive" interface).
1600  */
1601 static inline uint16_t
1602 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1603                     bool bulk_alloc)
1604 {
1605         struct ixgbe_rx_queue *rxq = rx_queue;
1606         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1607         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1608         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1609         uint16_t rx_id = rxq->rx_tail;
1610         uint16_t nb_rx = 0;
1611         uint16_t nb_hold = rxq->nb_rx_hold;
1612         uint16_t prev_id = rxq->rx_tail;
1613
1614         while (nb_rx < nb_pkts) {
1615                 bool eop;
1616                 struct ixgbe_rx_entry *rxe;
1617                 struct ixgbe_scattered_rx_entry *sc_entry;
1618                 struct ixgbe_scattered_rx_entry *next_sc_entry;
1619                 struct ixgbe_rx_entry *next_rxe;
1620                 struct rte_mbuf *first_seg;
1621                 struct rte_mbuf *rxm;
1622                 struct rte_mbuf *nmb;
1623                 union ixgbe_adv_rx_desc rxd;
1624                 uint16_t data_len;
1625                 uint16_t next_id;
1626                 volatile union ixgbe_adv_rx_desc *rxdp;
1627                 uint32_t staterr;
1628
1629 next_desc:
1630                 /*
1631                  * The code in this whole file uses the volatile pointer to
1632                  * ensure the read ordering of the status and the rest of the
1633                  * descriptor fields (on the compiler level only!!!). This is so
1634                  * UGLY - why not to just use the compiler barrier instead? DPDK
1635                  * even has the rte_compiler_barrier() for that.
1636                  *
1637                  * But most importantly this is just wrong because this doesn't
1638                  * ensure memory ordering in a general case at all. For
1639                  * instance, DPDK is supposed to work on Power CPUs where
1640                  * compiler barrier may just not be enough!
1641                  *
1642                  * I tried to write only this function properly to have a
1643                  * starting point (as a part of an LRO/RSC series) but the
1644                  * compiler cursed at me when I tried to cast away the
1645                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1646                  * keeping it the way it is for now.
1647                  *
1648                  * The code in this file is broken in so many other places and
1649                  * will just not work on a big endian CPU anyway therefore the
1650                  * lines below will have to be revisited together with the rest
1651                  * of the ixgbe PMD.
1652                  *
1653                  * TODO:
1654                  *    - Get rid of "volatile" crap and let the compiler do its
1655                  *      job.
1656                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
1657                  *      memory ordering below.
1658                  */
1659                 rxdp = &rx_ring[rx_id];
1660                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1661
1662                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1663                         break;
1664
1665                 rxd = *rxdp;
1666
1667                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1668                                   "staterr=0x%x data_len=%u",
1669                            rxq->port_id, rxq->queue_id, rx_id, staterr,
1670                            rte_le_to_cpu_16(rxd.wb.upper.length));
1671
1672                 if (!bulk_alloc) {
1673                         nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1674                         if (nmb == NULL) {
1675                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1676                                                   "port_id=%u queue_id=%u",
1677                                            rxq->port_id, rxq->queue_id);
1678
1679                                 rte_eth_devices[rxq->port_id].data->
1680                                                         rx_mbuf_alloc_failed++;
1681                                 break;
1682                         }
1683                 }
1684                 else if (nb_hold > rxq->rx_free_thresh) {
1685                         uint16_t next_rdt = rxq->rx_free_trigger;
1686
1687                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1688                                 rte_wmb();
1689                                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1690                                                     next_rdt);
1691                                 nb_hold -= rxq->rx_free_thresh;
1692                         } else {
1693                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1694                                                   "port_id=%u queue_id=%u",
1695                                            rxq->port_id, rxq->queue_id);
1696
1697                                 rte_eth_devices[rxq->port_id].data->
1698                                                         rx_mbuf_alloc_failed++;
1699                                 break;
1700                         }
1701                 }
1702
1703                 nb_hold++;
1704                 rxe = &sw_ring[rx_id];
1705                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1706
1707                 next_id = rx_id + 1;
1708                 if (next_id == rxq->nb_rx_desc)
1709                         next_id = 0;
1710
1711                 /* Prefetch next mbuf while processing current one. */
1712                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1713
1714                 /*
1715                  * When next RX descriptor is on a cache-line boundary,
1716                  * prefetch the next 4 RX descriptors and the next 4 pointers
1717                  * to mbufs.
1718                  */
1719                 if ((next_id & 0x3) == 0) {
1720                         rte_ixgbe_prefetch(&rx_ring[next_id]);
1721                         rte_ixgbe_prefetch(&sw_ring[next_id]);
1722                 }
1723
1724                 rxm = rxe->mbuf;
1725
1726                 if (!bulk_alloc) {
1727                         __le64 dma =
1728                           rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1729                         /*
1730                          * Update RX descriptor with the physical address of the
1731                          * new data buffer of the new allocated mbuf.
1732                          */
1733                         rxe->mbuf = nmb;
1734
1735                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
1736                         rxdp->read.hdr_addr = 0;
1737                         rxdp->read.pkt_addr = dma;
1738                 } else
1739                         rxe->mbuf = NULL;
1740
1741                 /*
1742                  * Set data length & data buffer address of mbuf.
1743                  */
1744                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1745                 rxm->data_len = data_len;
1746
1747                 if (!eop) {
1748                         uint16_t nextp_id;
1749                         /*
1750                          * Get next descriptor index:
1751                          *  - For RSC it's in the NEXTP field.
1752                          *  - For a scattered packet - it's just a following
1753                          *    descriptor.
1754                          */
1755                         if (ixgbe_rsc_count(&rxd))
1756                                 nextp_id =
1757                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1758                                                        IXGBE_RXDADV_NEXTP_SHIFT;
1759                         else
1760                                 nextp_id = next_id;
1761
1762                         next_sc_entry = &sw_sc_ring[nextp_id];
1763                         next_rxe = &sw_ring[nextp_id];
1764                         rte_ixgbe_prefetch(next_rxe);
1765                 }
1766
1767                 sc_entry = &sw_sc_ring[rx_id];
1768                 first_seg = sc_entry->fbuf;
1769                 sc_entry->fbuf = NULL;
1770
1771                 /*
1772                  * If this is the first buffer of the received packet,
1773                  * set the pointer to the first mbuf of the packet and
1774                  * initialize its context.
1775                  * Otherwise, update the total length and the number of segments
1776                  * of the current scattered packet, and update the pointer to
1777                  * the last mbuf of the current packet.
1778                  */
1779                 if (first_seg == NULL) {
1780                         first_seg = rxm;
1781                         first_seg->pkt_len = data_len;
1782                         first_seg->nb_segs = 1;
1783                 } else {
1784                         first_seg->pkt_len += data_len;
1785                         first_seg->nb_segs++;
1786                 }
1787
1788                 prev_id = rx_id;
1789                 rx_id = next_id;
1790
1791                 /*
1792                  * If this is not the last buffer of the received packet, update
1793                  * the pointer to the first mbuf at the NEXTP entry in the
1794                  * sw_sc_ring and continue to parse the RX ring.
1795                  */
1796                 if (!eop) {
1797                         rxm->next = next_rxe->mbuf;
1798                         next_sc_entry->fbuf = first_seg;
1799                         goto next_desc;
1800                 }
1801
1802                 /*
1803                  * This is the last buffer of the received packet - return
1804                  * the current cluster to the user.
1805                  */
1806                 rxm->next = NULL;
1807
1808                 /* Initialize the first mbuf of the returned packet */
1809                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1810                                             staterr);
1811
1812                 /*
1813                  * Deal with the case, when HW CRC srip is disabled.
1814                  * That can't happen when LRO is enabled, but still could
1815                  * happen for scattered RX mode.
1816                  */
1817                 first_seg->pkt_len -= rxq->crc_len;
1818                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1819                         struct rte_mbuf *lp;
1820
1821                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
1822                                 ;
1823
1824                         first_seg->nb_segs--;
1825                         lp->data_len -= rxq->crc_len - rxm->data_len;
1826                         lp->next = NULL;
1827                         rte_pktmbuf_free_seg(rxm);
1828                 } else
1829                         rxm->data_len -= rxq->crc_len;
1830
1831                 /* Prefetch data of first segment, if configured to do so. */
1832                 rte_packet_prefetch((char *)first_seg->buf_addr +
1833                         first_seg->data_off);
1834
1835                 /*
1836                  * Store the mbuf address into the next entry of the array
1837                  * of returned packets.
1838                  */
1839                 rx_pkts[nb_rx++] = first_seg;
1840         }
1841
1842         /*
1843          * Record index of the next RX descriptor to probe.
1844          */
1845         rxq->rx_tail = rx_id;
1846
1847         /*
1848          * If the number of free RX descriptors is greater than the RX free
1849          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1850          * register.
1851          * Update the RDT with the value of the last processed RX descriptor
1852          * minus 1, to guarantee that the RDT register is never equal to the
1853          * RDH register, which creates a "full" ring situtation from the
1854          * hardware point of view...
1855          */
1856         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1857                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1858                            "nb_hold=%u nb_rx=%u",
1859                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1860
1861                 rte_wmb();
1862                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1863                 nb_hold = 0;
1864         }
1865
1866         rxq->nb_rx_hold = nb_hold;
1867         return nb_rx;
1868 }
1869
1870 uint16_t
1871 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1872                                  uint16_t nb_pkts)
1873 {
1874         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1875 }
1876
1877 uint16_t
1878 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1879                                uint16_t nb_pkts)
1880 {
1881         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1882 }
1883
1884 /*********************************************************************
1885  *
1886  *  Queue management functions
1887  *
1888  **********************************************************************/
1889
1890 static void __attribute__((cold))
1891 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1892 {
1893         unsigned i;
1894
1895         if (txq->sw_ring != NULL) {
1896                 for (i = 0; i < txq->nb_tx_desc; i++) {
1897                         if (txq->sw_ring[i].mbuf != NULL) {
1898                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1899                                 txq->sw_ring[i].mbuf = NULL;
1900                         }
1901                 }
1902         }
1903 }
1904
1905 static void __attribute__((cold))
1906 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1907 {
1908         if (txq != NULL &&
1909             txq->sw_ring != NULL)
1910                 rte_free(txq->sw_ring);
1911 }
1912
1913 static void __attribute__((cold))
1914 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1915 {
1916         if (txq != NULL && txq->ops != NULL) {
1917                 txq->ops->release_mbufs(txq);
1918                 txq->ops->free_swring(txq);
1919                 rte_free(txq);
1920         }
1921 }
1922
1923 void __attribute__((cold))
1924 ixgbe_dev_tx_queue_release(void *txq)
1925 {
1926         ixgbe_tx_queue_release(txq);
1927 }
1928
1929 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1930 static void __attribute__((cold))
1931 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1932 {
1933         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1934         struct ixgbe_tx_entry *txe = txq->sw_ring;
1935         uint16_t prev, i;
1936
1937         /* Zero out HW ring memory */
1938         for (i = 0; i < txq->nb_tx_desc; i++) {
1939                 txq->tx_ring[i] = zeroed_desc;
1940         }
1941
1942         /* Initialize SW ring entries */
1943         prev = (uint16_t) (txq->nb_tx_desc - 1);
1944         for (i = 0; i < txq->nb_tx_desc; i++) {
1945                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1946                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1947                 txe[i].mbuf = NULL;
1948                 txe[i].last_id = i;
1949                 txe[prev].next_id = i;
1950                 prev = i;
1951         }
1952
1953         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1954         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1955
1956         txq->tx_tail = 0;
1957         txq->nb_tx_used = 0;
1958         /*
1959          * Always allow 1 descriptor to be un-allocated to avoid
1960          * a H/W race condition
1961          */
1962         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1963         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1964         txq->ctx_curr = 0;
1965         memset((void*)&txq->ctx_cache, 0,
1966                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1967 }
1968
1969 static const struct ixgbe_txq_ops def_txq_ops = {
1970         .release_mbufs = ixgbe_tx_queue_release_mbufs,
1971         .free_swring = ixgbe_tx_free_swring,
1972         .reset = ixgbe_reset_tx_queue,
1973 };
1974
1975 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1976  * the queue parameters. Used in tx_queue_setup by primary process and then
1977  * in dev_init by secondary process when attaching to an existing ethdev.
1978  */
1979 void __attribute__((cold))
1980 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1981 {
1982         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1983         if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1984                         && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1985                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1986 #ifdef RTE_IXGBE_INC_VECTOR
1987                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1988                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1989                                         ixgbe_txq_vec_setup(txq) == 0)) {
1990                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1991                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1992                 } else
1993 #endif
1994                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1995         } else {
1996                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1997                 PMD_INIT_LOG(DEBUG,
1998                                 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1999                                 (unsigned long)txq->txq_flags,
2000                                 (unsigned long)IXGBE_SIMPLE_FLAGS);
2001                 PMD_INIT_LOG(DEBUG,
2002                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2003                                 (unsigned long)txq->tx_rs_thresh,
2004                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2005                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2006         }
2007 }
2008
2009 int __attribute__((cold))
2010 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2011                          uint16_t queue_idx,
2012                          uint16_t nb_desc,
2013                          unsigned int socket_id,
2014                          const struct rte_eth_txconf *tx_conf)
2015 {
2016         const struct rte_memzone *tz;
2017         struct ixgbe_tx_queue *txq;
2018         struct ixgbe_hw     *hw;
2019         uint16_t tx_rs_thresh, tx_free_thresh;
2020
2021         PMD_INIT_FUNC_TRACE();
2022         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023
2024         /*
2025          * Validate number of transmit descriptors.
2026          * It must not exceed hardware maximum, and must be multiple
2027          * of IXGBE_ALIGN.
2028          */
2029         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2030                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2031                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2032                 return -EINVAL;
2033         }
2034
2035         /*
2036          * The following two parameters control the setting of the RS bit on
2037          * transmit descriptors.
2038          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2039          * descriptors have been used.
2040          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2041          * descriptors are used or if the number of descriptors required
2042          * to transmit a packet is greater than the number of free TX
2043          * descriptors.
2044          * The following constraints must be satisfied:
2045          *  tx_rs_thresh must be greater than 0.
2046          *  tx_rs_thresh must be less than the size of the ring minus 2.
2047          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2048          *  tx_rs_thresh must be a divisor of the ring size.
2049          *  tx_free_thresh must be greater than 0.
2050          *  tx_free_thresh must be less than the size of the ring minus 3.
2051          * One descriptor in the TX ring is used as a sentinel to avoid a
2052          * H/W race condition, hence the maximum threshold constraints.
2053          * When set to zero use default values.
2054          */
2055         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2056                         tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2057         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2058                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2059         if (tx_rs_thresh >= (nb_desc - 2)) {
2060                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2061                         "of TX descriptors minus 2. (tx_rs_thresh=%u "
2062                         "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2063                         (int)dev->data->port_id, (int)queue_idx);
2064                 return -(EINVAL);
2065         }
2066         if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2067                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2068                         "(tx_rs_thresh=%u port=%d queue=%d)",
2069                         DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2070                         (int)dev->data->port_id, (int)queue_idx);
2071                 return -(EINVAL);
2072         }
2073         if (tx_free_thresh >= (nb_desc - 3)) {
2074                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2075                              "tx_free_thresh must be less than the number of "
2076                              "TX descriptors minus 3. (tx_free_thresh=%u "
2077                              "port=%d queue=%d)",
2078                              (unsigned int)tx_free_thresh,
2079                              (int)dev->data->port_id, (int)queue_idx);
2080                 return -(EINVAL);
2081         }
2082         if (tx_rs_thresh > tx_free_thresh) {
2083                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2084                              "tx_free_thresh. (tx_free_thresh=%u "
2085                              "tx_rs_thresh=%u port=%d queue=%d)",
2086                              (unsigned int)tx_free_thresh,
2087                              (unsigned int)tx_rs_thresh,
2088                              (int)dev->data->port_id,
2089                              (int)queue_idx);
2090                 return -(EINVAL);
2091         }
2092         if ((nb_desc % tx_rs_thresh) != 0) {
2093                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2094                              "number of TX descriptors. (tx_rs_thresh=%u "
2095                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2096                              (int)dev->data->port_id, (int)queue_idx);
2097                 return -(EINVAL);
2098         }
2099
2100         /*
2101          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2102          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2103          * by the NIC and all descriptors are written back after the NIC
2104          * accumulates WTHRESH descriptors.
2105          */
2106         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2107                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2108                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2109                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2110                              (int)dev->data->port_id, (int)queue_idx);
2111                 return -(EINVAL);
2112         }
2113
2114         /* Free memory prior to re-allocation if needed... */
2115         if (dev->data->tx_queues[queue_idx] != NULL) {
2116                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2117                 dev->data->tx_queues[queue_idx] = NULL;
2118         }
2119
2120         /* First allocate the tx queue data structure */
2121         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2122                                  RTE_CACHE_LINE_SIZE, socket_id);
2123         if (txq == NULL)
2124                 return -ENOMEM;
2125
2126         /*
2127          * Allocate TX ring hardware descriptors. A memzone large enough to
2128          * handle the maximum ring size is allocated in order to allow for
2129          * resizing in later calls to the queue setup function.
2130          */
2131         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2132                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2133                         IXGBE_ALIGN, socket_id);
2134         if (tz == NULL) {
2135                 ixgbe_tx_queue_release(txq);
2136                 return -ENOMEM;
2137         }
2138
2139         txq->nb_tx_desc = nb_desc;
2140         txq->tx_rs_thresh = tx_rs_thresh;
2141         txq->tx_free_thresh = tx_free_thresh;
2142         txq->pthresh = tx_conf->tx_thresh.pthresh;
2143         txq->hthresh = tx_conf->tx_thresh.hthresh;
2144         txq->wthresh = tx_conf->tx_thresh.wthresh;
2145         txq->queue_id = queue_idx;
2146         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2147                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2148         txq->port_id = dev->data->port_id;
2149         txq->txq_flags = tx_conf->txq_flags;
2150         txq->ops = &def_txq_ops;
2151         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2152
2153         /*
2154          * Modification to set VFTDT for virtual function if vf is detected
2155          */
2156         if (hw->mac.type == ixgbe_mac_82599_vf ||
2157             hw->mac.type == ixgbe_mac_X540_vf ||
2158             hw->mac.type == ixgbe_mac_X550_vf ||
2159             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2160             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2161                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2162         else
2163                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2164
2165         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2166         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2167
2168         /* Allocate software ring */
2169         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2170                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2171                                 RTE_CACHE_LINE_SIZE, socket_id);
2172         if (txq->sw_ring == NULL) {
2173                 ixgbe_tx_queue_release(txq);
2174                 return -ENOMEM;
2175         }
2176         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2177                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2178
2179         /* set up vector or scalar TX function as appropriate */
2180         ixgbe_set_tx_function(dev, txq);
2181
2182         txq->ops->reset(txq);
2183
2184         dev->data->tx_queues[queue_idx] = txq;
2185
2186
2187         return 0;
2188 }
2189
2190 /**
2191  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2192  *
2193  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2194  * in the sw_rsc_ring is not set to NULL but rather points to the next
2195  * mbuf of this RSC aggregation (that has not been completed yet and still
2196  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2197  * will just free first "nb_segs" segments of the cluster explicitly by calling
2198  * an rte_pktmbuf_free_seg().
2199  *
2200  * @m scattered cluster head
2201  */
2202 static void __attribute__((cold))
2203 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2204 {
2205         uint8_t i, nb_segs = m->nb_segs;
2206         struct rte_mbuf *next_seg;
2207
2208         for (i = 0; i < nb_segs; i++) {
2209                 next_seg = m->next;
2210                 rte_pktmbuf_free_seg(m);
2211                 m = next_seg;
2212         }
2213 }
2214
2215 static void __attribute__((cold))
2216 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2217 {
2218         unsigned i;
2219
2220 #ifdef RTE_IXGBE_INC_VECTOR
2221         /* SSE Vector driver has a different way of releasing mbufs. */
2222         if (rxq->rx_using_sse) {
2223                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2224                 return;
2225         }
2226 #endif
2227
2228         if (rxq->sw_ring != NULL) {
2229                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2230                         if (rxq->sw_ring[i].mbuf != NULL) {
2231                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2232                                 rxq->sw_ring[i].mbuf = NULL;
2233                         }
2234                 }
2235                 if (rxq->rx_nb_avail) {
2236                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2237                                 struct rte_mbuf *mb;
2238                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2239                                 rte_pktmbuf_free_seg(mb);
2240                         }
2241                         rxq->rx_nb_avail = 0;
2242                 }
2243         }
2244
2245         if (rxq->sw_sc_ring)
2246                 for (i = 0; i < rxq->nb_rx_desc; i++)
2247                         if (rxq->sw_sc_ring[i].fbuf) {
2248                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2249                                 rxq->sw_sc_ring[i].fbuf = NULL;
2250                         }
2251 }
2252
2253 static void __attribute__((cold))
2254 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2255 {
2256         if (rxq != NULL) {
2257                 ixgbe_rx_queue_release_mbufs(rxq);
2258                 rte_free(rxq->sw_ring);
2259                 rte_free(rxq->sw_sc_ring);
2260                 rte_free(rxq);
2261         }
2262 }
2263
2264 void __attribute__((cold))
2265 ixgbe_dev_rx_queue_release(void *rxq)
2266 {
2267         ixgbe_rx_queue_release(rxq);
2268 }
2269
2270 /*
2271  * Check if Rx Burst Bulk Alloc function can be used.
2272  * Return
2273  *        0: the preconditions are satisfied and the bulk allocation function
2274  *           can be used.
2275  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2276  *           function must be used.
2277  */
2278 static inline int __attribute__((cold))
2279 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2280 {
2281         int ret = 0;
2282
2283         /*
2284          * Make sure the following pre-conditions are satisfied:
2285          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2286          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2287          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2288          *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2289          * Scattered packets are not supported.  This should be checked
2290          * outside of this function.
2291          */
2292         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2293                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2294                              "rxq->rx_free_thresh=%d, "
2295                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2296                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2297                 ret = -EINVAL;
2298         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2299                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2300                              "rxq->rx_free_thresh=%d, "
2301                              "rxq->nb_rx_desc=%d",
2302                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2303                 ret = -EINVAL;
2304         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2305                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2306                              "rxq->nb_rx_desc=%d, "
2307                              "rxq->rx_free_thresh=%d",
2308                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2309                 ret = -EINVAL;
2310         } else if (!(rxq->nb_rx_desc <
2311                (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2312                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2313                              "rxq->nb_rx_desc=%d, "
2314                              "IXGBE_MAX_RING_DESC=%d, "
2315                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2316                              rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2317                              RTE_PMD_IXGBE_RX_MAX_BURST);
2318                 ret = -EINVAL;
2319         }
2320
2321         return ret;
2322 }
2323
2324 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2325 static void __attribute__((cold))
2326 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2327 {
2328         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2329         unsigned i;
2330         uint16_t len = rxq->nb_rx_desc;
2331
2332         /*
2333          * By default, the Rx queue setup function allocates enough memory for
2334          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2335          * extra memory at the end of the descriptor ring to be zero'd out. A
2336          * pre-condition for using the Rx burst bulk alloc function is that the
2337          * number of descriptors is less than or equal to
2338          * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2339          * constraints here to see if we need to zero out memory after the end
2340          * of the H/W descriptor ring.
2341          */
2342         if (adapter->rx_bulk_alloc_allowed)
2343                 /* zero out extra memory */
2344                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2345
2346         /*
2347          * Zero out HW ring memory. Zero out extra memory at the end of
2348          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2349          * reads extra memory as zeros.
2350          */
2351         for (i = 0; i < len; i++) {
2352                 rxq->rx_ring[i] = zeroed_desc;
2353         }
2354
2355         /*
2356          * initialize extra software ring entries. Space for these extra
2357          * entries is always allocated
2358          */
2359         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2360         for (i = rxq->nb_rx_desc; i < len; ++i) {
2361                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2362         }
2363
2364         rxq->rx_nb_avail = 0;
2365         rxq->rx_next_avail = 0;
2366         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2367         rxq->rx_tail = 0;
2368         rxq->nb_rx_hold = 0;
2369         rxq->pkt_first_seg = NULL;
2370         rxq->pkt_last_seg = NULL;
2371
2372 #ifdef RTE_IXGBE_INC_VECTOR
2373         rxq->rxrearm_start = 0;
2374         rxq->rxrearm_nb = 0;
2375 #endif
2376 }
2377
2378 int __attribute__((cold))
2379 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2380                          uint16_t queue_idx,
2381                          uint16_t nb_desc,
2382                          unsigned int socket_id,
2383                          const struct rte_eth_rxconf *rx_conf,
2384                          struct rte_mempool *mp)
2385 {
2386         const struct rte_memzone *rz;
2387         struct ixgbe_rx_queue *rxq;
2388         struct ixgbe_hw     *hw;
2389         uint16_t len;
2390         struct ixgbe_adapter *adapter =
2391                 (struct ixgbe_adapter *)dev->data->dev_private;
2392
2393         PMD_INIT_FUNC_TRACE();
2394         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2395
2396         /*
2397          * Validate number of receive descriptors.
2398          * It must not exceed hardware maximum, and must be multiple
2399          * of IXGBE_ALIGN.
2400          */
2401         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2402                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2403                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2404                 return -EINVAL;
2405         }
2406
2407         /* Free memory prior to re-allocation if needed... */
2408         if (dev->data->rx_queues[queue_idx] != NULL) {
2409                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2410                 dev->data->rx_queues[queue_idx] = NULL;
2411         }
2412
2413         /* First allocate the rx queue data structure */
2414         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2415                                  RTE_CACHE_LINE_SIZE, socket_id);
2416         if (rxq == NULL)
2417                 return -ENOMEM;
2418         rxq->mb_pool = mp;
2419         rxq->nb_rx_desc = nb_desc;
2420         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2421         rxq->queue_id = queue_idx;
2422         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2423                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2424         rxq->port_id = dev->data->port_id;
2425         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2426                                                         0 : ETHER_CRC_LEN);
2427         rxq->drop_en = rx_conf->rx_drop_en;
2428         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2429
2430         /*
2431          * Allocate RX ring hardware descriptors. A memzone large enough to
2432          * handle the maximum ring size is allocated in order to allow for
2433          * resizing in later calls to the queue setup function.
2434          */
2435         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2436                                       RX_RING_SZ, IXGBE_ALIGN, socket_id);
2437         if (rz == NULL) {
2438                 ixgbe_rx_queue_release(rxq);
2439                 return -ENOMEM;
2440         }
2441
2442         /*
2443          * Zero init all the descriptors in the ring.
2444          */
2445         memset (rz->addr, 0, RX_RING_SZ);
2446
2447         /*
2448          * Modified to setup VFRDT for Virtual Function
2449          */
2450         if (hw->mac.type == ixgbe_mac_82599_vf ||
2451             hw->mac.type == ixgbe_mac_X540_vf ||
2452             hw->mac.type == ixgbe_mac_X550_vf ||
2453             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2454             hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2455                 rxq->rdt_reg_addr =
2456                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2457                 rxq->rdh_reg_addr =
2458                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2459         }
2460         else {
2461                 rxq->rdt_reg_addr =
2462                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2463                 rxq->rdh_reg_addr =
2464                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2465         }
2466
2467         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2468         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2469
2470         /*
2471          * Certain constraints must be met in order to use the bulk buffer
2472          * allocation Rx burst function. If any of Rx queues doesn't meet them
2473          * the feature should be disabled for the whole port.
2474          */
2475         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2476                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2477                                     "preconditions - canceling the feature for "
2478                                     "the whole port[%d]",
2479                              rxq->queue_id, rxq->port_id);
2480                 adapter->rx_bulk_alloc_allowed = false;
2481         }
2482
2483         /*
2484          * Allocate software ring. Allow for space at the end of the
2485          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2486          * function does not access an invalid memory region.
2487          */
2488         len = nb_desc;
2489         if (adapter->rx_bulk_alloc_allowed)
2490                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2491
2492         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2493                                           sizeof(struct ixgbe_rx_entry) * len,
2494                                           RTE_CACHE_LINE_SIZE, socket_id);
2495         if (!rxq->sw_ring) {
2496                 ixgbe_rx_queue_release(rxq);
2497                 return -ENOMEM;
2498         }
2499
2500         /*
2501          * Always allocate even if it's not going to be needed in order to
2502          * simplify the code.
2503          *
2504          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2505          * be requested in ixgbe_dev_rx_init(), which is called later from
2506          * dev_start() flow.
2507          */
2508         rxq->sw_sc_ring =
2509                 rte_zmalloc_socket("rxq->sw_sc_ring",
2510                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
2511                                    RTE_CACHE_LINE_SIZE, socket_id);
2512         if (!rxq->sw_sc_ring) {
2513                 ixgbe_rx_queue_release(rxq);
2514                 return -ENOMEM;
2515         }
2516
2517         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2518                             "dma_addr=0x%"PRIx64,
2519                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2520                      rxq->rx_ring_phys_addr);
2521
2522         if (!rte_is_power_of_2(nb_desc)) {
2523                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2524                                     "preconditions - canceling the feature for "
2525                                     "the whole port[%d]",
2526                              rxq->queue_id, rxq->port_id);
2527                 adapter->rx_vec_allowed = false;
2528         } else
2529                 ixgbe_rxq_vec_setup(rxq);
2530
2531         dev->data->rx_queues[queue_idx] = rxq;
2532
2533         ixgbe_reset_rx_queue(adapter, rxq);
2534
2535         return 0;
2536 }
2537
2538 uint32_t
2539 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2540 {
2541 #define IXGBE_RXQ_SCAN_INTERVAL 4
2542         volatile union ixgbe_adv_rx_desc *rxdp;
2543         struct ixgbe_rx_queue *rxq;
2544         uint32_t desc = 0;
2545
2546         if (rx_queue_id >= dev->data->nb_rx_queues) {
2547                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2548                 return 0;
2549         }
2550
2551         rxq = dev->data->rx_queues[rx_queue_id];
2552         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2553
2554         while ((desc < rxq->nb_rx_desc) &&
2555                 (rxdp->wb.upper.status_error &
2556                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2557                 desc += IXGBE_RXQ_SCAN_INTERVAL;
2558                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2559                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2560                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2561                                 desc - rxq->nb_rx_desc]);
2562         }
2563
2564         return desc;
2565 }
2566
2567 int
2568 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2569 {
2570         volatile union ixgbe_adv_rx_desc *rxdp;
2571         struct ixgbe_rx_queue *rxq = rx_queue;
2572         uint32_t desc;
2573
2574         if (unlikely(offset >= rxq->nb_rx_desc))
2575                 return 0;
2576         desc = rxq->rx_tail + offset;
2577         if (desc >= rxq->nb_rx_desc)
2578                 desc -= rxq->nb_rx_desc;
2579
2580         rxdp = &rxq->rx_ring[desc];
2581         return !!(rxdp->wb.upper.status_error &
2582                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2583 }
2584
2585 void __attribute__((cold))
2586 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2587 {
2588         unsigned i;
2589         struct ixgbe_adapter *adapter =
2590                 (struct ixgbe_adapter *)dev->data->dev_private;
2591
2592         PMD_INIT_FUNC_TRACE();
2593
2594         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2595                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2596                 if (txq != NULL) {
2597                         txq->ops->release_mbufs(txq);
2598                         txq->ops->reset(txq);
2599                 }
2600         }
2601
2602         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2603                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2604                 if (rxq != NULL) {
2605                         ixgbe_rx_queue_release_mbufs(rxq);
2606                         ixgbe_reset_rx_queue(adapter, rxq);
2607                 }
2608         }
2609 }
2610
2611 void
2612 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2613 {
2614         unsigned i;
2615
2616         PMD_INIT_FUNC_TRACE();
2617
2618         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2619                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2620                 dev->data->rx_queues[i] = NULL;
2621         }
2622         dev->data->nb_rx_queues = 0;
2623
2624         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2625                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2626                 dev->data->tx_queues[i] = NULL;
2627         }
2628         dev->data->nb_tx_queues = 0;
2629 }
2630
2631 /*********************************************************************
2632  *
2633  *  Device RX/TX init functions
2634  *
2635  **********************************************************************/
2636
2637 /**
2638  * Receive Side Scaling (RSS)
2639  * See section 7.1.2.8 in the following document:
2640  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2641  *
2642  * Principles:
2643  * The source and destination IP addresses of the IP header and the source
2644  * and destination ports of TCP/UDP headers, if any, of received packets are
2645  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2646  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2647  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2648  * RSS output index which is used as the RX queue index where to store the
2649  * received packets.
2650  * The following output is supplied in the RX write-back descriptor:
2651  *     - 32-bit result of the Microsoft RSS hash function,
2652  *     - 4-bit RSS type field.
2653  */
2654
2655 /*
2656  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2657  * Used as the default key.
2658  */
2659 static uint8_t rss_intel_key[40] = {
2660         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2661         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2662         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2663         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2664         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2665 };
2666
2667 static void
2668 ixgbe_rss_disable(struct rte_eth_dev *dev)
2669 {
2670         struct ixgbe_hw *hw;
2671         uint32_t mrqc;
2672         uint32_t mrqc_reg;
2673
2674         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2675         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2676         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2677         mrqc &= ~IXGBE_MRQC_RSSEN;
2678         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2679 }
2680
2681 static void
2682 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2683 {
2684         uint8_t  *hash_key;
2685         uint32_t mrqc;
2686         uint32_t rss_key;
2687         uint64_t rss_hf;
2688         uint16_t i;
2689         uint32_t mrqc_reg;
2690         uint32_t rssrk_reg;
2691
2692         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2693         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2694
2695         hash_key = rss_conf->rss_key;
2696         if (hash_key != NULL) {
2697                 /* Fill in RSS hash key */
2698                 for (i = 0; i < 10; i++) {
2699                         rss_key  = hash_key[(i * 4)];
2700                         rss_key |= hash_key[(i * 4) + 1] << 8;
2701                         rss_key |= hash_key[(i * 4) + 2] << 16;
2702                         rss_key |= hash_key[(i * 4) + 3] << 24;
2703                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2704                 }
2705         }
2706
2707         /* Set configured hashing protocols in MRQC register */
2708         rss_hf = rss_conf->rss_hf;
2709         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2710         if (rss_hf & ETH_RSS_IPV4)
2711                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2712         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2713                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2714         if (rss_hf & ETH_RSS_IPV6)
2715                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2716         if (rss_hf & ETH_RSS_IPV6_EX)
2717                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2718         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2719                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2720         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2721                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2722         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2723                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2724         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2725                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2726         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2727                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2728         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2729 }
2730
2731 int
2732 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2733                           struct rte_eth_rss_conf *rss_conf)
2734 {
2735         struct ixgbe_hw *hw;
2736         uint32_t mrqc;
2737         uint64_t rss_hf;
2738         uint32_t mrqc_reg;
2739
2740         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2741
2742         if (!ixgbe_rss_update_sp(hw->mac.type)) {
2743                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2744                         "NIC.");
2745                 return -ENOTSUP;
2746         }
2747         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2748
2749         /*
2750          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2751          *     "RSS enabling cannot be done dynamically while it must be
2752          *      preceded by a software reset"
2753          * Before changing anything, first check that the update RSS operation
2754          * does not attempt to disable RSS, if RSS was enabled at
2755          * initialization time, or does not attempt to enable RSS, if RSS was
2756          * disabled at initialization time.
2757          */
2758         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2759         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2760         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2761                 if (rss_hf != 0) /* Enable RSS */
2762                         return -(EINVAL);
2763                 return 0; /* Nothing to do */
2764         }
2765         /* RSS enabled */
2766         if (rss_hf == 0) /* Disable RSS */
2767                 return -(EINVAL);
2768         ixgbe_hw_rss_hash_set(hw, rss_conf);
2769         return 0;
2770 }
2771
2772 int
2773 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2774                             struct rte_eth_rss_conf *rss_conf)
2775 {
2776         struct ixgbe_hw *hw;
2777         uint8_t *hash_key;
2778         uint32_t mrqc;
2779         uint32_t rss_key;
2780         uint64_t rss_hf;
2781         uint16_t i;
2782         uint32_t mrqc_reg;
2783         uint32_t rssrk_reg;
2784
2785         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2786         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2787         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2788         hash_key = rss_conf->rss_key;
2789         if (hash_key != NULL) {
2790                 /* Return RSS hash key */
2791                 for (i = 0; i < 10; i++) {
2792                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2793                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2794                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2795                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2796                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2797                 }
2798         }
2799
2800         /* Get RSS functions configured in MRQC register */
2801         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2802         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2803                 rss_conf->rss_hf = 0;
2804                 return 0;
2805         }
2806         rss_hf = 0;
2807         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2808                 rss_hf |= ETH_RSS_IPV4;
2809         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2810                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2811         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2812                 rss_hf |= ETH_RSS_IPV6;
2813         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2814                 rss_hf |= ETH_RSS_IPV6_EX;
2815         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2816                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2817         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2818                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2819         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2820                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2821         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2822                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2823         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2824                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2825         rss_conf->rss_hf = rss_hf;
2826         return 0;
2827 }
2828
2829 static void
2830 ixgbe_rss_configure(struct rte_eth_dev *dev)
2831 {
2832         struct rte_eth_rss_conf rss_conf;
2833         struct ixgbe_hw *hw;
2834         uint32_t reta;
2835         uint16_t i;
2836         uint16_t j;
2837         uint16_t sp_reta_size;
2838         uint32_t reta_reg;
2839
2840         PMD_INIT_FUNC_TRACE();
2841         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2842
2843         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2844
2845         /*
2846          * Fill in redirection table
2847          * The byte-swap is needed because NIC registers are in
2848          * little-endian order.
2849          */
2850         reta = 0;
2851         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2852                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2853
2854                 if (j == dev->data->nb_rx_queues)
2855                         j = 0;
2856                 reta = (reta << 8) | j;
2857                 if ((i & 3) == 3)
2858                         IXGBE_WRITE_REG(hw, reta_reg,
2859                                         rte_bswap32(reta));
2860         }
2861
2862         /*
2863          * Configure the RSS key and the RSS protocols used to compute
2864          * the RSS hash of input packets.
2865          */
2866         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2867         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2868                 ixgbe_rss_disable(dev);
2869                 return;
2870         }
2871         if (rss_conf.rss_key == NULL)
2872                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2873         ixgbe_hw_rss_hash_set(hw, &rss_conf);
2874 }
2875
2876 #define NUM_VFTA_REGISTERS 128
2877 #define NIC_RX_BUFFER_SIZE 0x200
2878 #define X550_RX_BUFFER_SIZE 0x180
2879
2880 static void
2881 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2882 {
2883         struct rte_eth_vmdq_dcb_conf *cfg;
2884         struct ixgbe_hw *hw;
2885         enum rte_eth_nb_pools num_pools;
2886         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2887         uint16_t pbsize;
2888         uint8_t nb_tcs; /* number of traffic classes */
2889         int i;
2890
2891         PMD_INIT_FUNC_TRACE();
2892         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2893         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2894         num_pools = cfg->nb_queue_pools;
2895         /* Check we have a valid number of pools */
2896         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2897                 ixgbe_rss_disable(dev);
2898                 return;
2899         }
2900         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2901         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2902
2903         /*
2904          * RXPBSIZE
2905          * split rx buffer up into sections, each for 1 traffic class
2906          */
2907         switch (hw->mac.type) {
2908         case ixgbe_mac_X550:
2909         case ixgbe_mac_X550EM_x:
2910         case ixgbe_mac_X550EM_a:
2911                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2912                 break;
2913         default:
2914                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2915                 break;
2916         }
2917         for (i = 0; i < nb_tcs; i++) {
2918                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2919                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2920                 /* clear 10 bits. */
2921                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2922                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2923         }
2924         /* zero alloc all unused TCs */
2925         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2926                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2927                 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2928                 /* clear 10 bits. */
2929                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2930         }
2931
2932         /* MRQC: enable vmdq and dcb */
2933         mrqc = ((num_pools == ETH_16_POOLS) ? \
2934                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2935         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2936
2937         /* PFVTCTL: turn on virtualisation and set the default pool */
2938         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2939         if (cfg->enable_default_pool) {
2940                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2941         } else {
2942                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2943         }
2944
2945         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2946
2947         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2948         queue_mapping = 0;
2949         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2950                 /*
2951                  * mapping is done with 3 bits per priority,
2952                  * so shift by i*3 each time
2953                  */
2954                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2955
2956         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2957
2958         /* RTRPCS: DCB related */
2959         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2960
2961         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2962         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2963         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2964         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2965
2966         /* VFTA - enable all vlan filters */
2967         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2968                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2969         }
2970
2971         /* VFRE: pool enabling for receive - 16 or 32 */
2972         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2973                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2974
2975         /*
2976          * MPSAR - allow pools to read specific mac addresses
2977          * In this case, all pools should be able to read from mac addr 0
2978          */
2979         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2980         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2981
2982         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2983         for (i = 0; i < cfg->nb_pool_maps; i++) {
2984                 /* set vlan id in VF register and set the valid bit */
2985                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2986                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
2987                 /*
2988                  * Put the allowed pools in VFB reg. As we only have 16 or 32
2989                  * pools, we only need to use the first half of the register
2990                  * i.e. bits 0-31
2991                  */
2992                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2993         }
2994 }
2995
2996 /**
2997  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2998  * @hw: pointer to hardware structure
2999  * @dcb_config: pointer to ixgbe_dcb_config structure
3000  */
3001 static void
3002 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
3003                struct ixgbe_dcb_config *dcb_config)
3004 {
3005         uint32_t reg;
3006         uint32_t q;
3007
3008         PMD_INIT_FUNC_TRACE();
3009         if (hw->mac.type != ixgbe_mac_82598EB) {
3010                 /* Disable the Tx desc arbiter so that MTQC can be changed */
3011                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3012                 reg |= IXGBE_RTTDCS_ARBDIS;
3013                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3014
3015                 /* Enable DCB for Tx with 8 TCs */
3016                 if (dcb_config->num_tcs.pg_tcs == 8) {
3017                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3018                 }
3019                 else {
3020                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3021                 }
3022                 if (dcb_config->vt_mode)
3023                     reg |= IXGBE_MTQC_VT_ENA;
3024                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3025
3026                 /* Disable drop for all queues */
3027                 for (q = 0; q < 128; q++)
3028                         IXGBE_WRITE_REG(hw, IXGBE_QDE,
3029                      (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3030
3031                 /* Enable the Tx desc arbiter */
3032                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3033                 reg &= ~IXGBE_RTTDCS_ARBDIS;
3034                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3035
3036                 /* Enable Security TX Buffer IFG for DCB */
3037                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3038                 reg |= IXGBE_SECTX_DCB;
3039                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3040         }
3041         return;
3042 }
3043
3044 /**
3045  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3046  * @dev: pointer to rte_eth_dev structure
3047  * @dcb_config: pointer to ixgbe_dcb_config structure
3048  */
3049 static void
3050 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3051                         struct ixgbe_dcb_config *dcb_config)
3052 {
3053         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3054                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3055         struct ixgbe_hw *hw =
3056                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3057
3058         PMD_INIT_FUNC_TRACE();
3059         if (hw->mac.type != ixgbe_mac_82598EB)
3060                 /*PF VF Transmit Enable*/
3061                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3062                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3063
3064         /*Configure general DCB TX parameters*/
3065         ixgbe_dcb_tx_hw_config(hw,dcb_config);
3066         return;
3067 }
3068
3069 static void
3070 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3071                         struct ixgbe_dcb_config *dcb_config)
3072 {
3073         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3074                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3075         struct ixgbe_dcb_tc_config *tc;
3076         uint8_t i,j;
3077
3078         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3079         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3080                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3081                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3082         }
3083         else {
3084                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3085                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3086         }
3087         /* User Priority to Traffic Class mapping */
3088         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3089                 j = vmdq_rx_conf->dcb_tc[i];
3090                 tc = &dcb_config->tc_config[j];
3091                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3092                                                 (uint8_t)(1 << j);
3093         }
3094 }
3095
3096 static void
3097 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3098                         struct ixgbe_dcb_config *dcb_config)
3099 {
3100         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3101                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3102         struct ixgbe_dcb_tc_config *tc;
3103         uint8_t i,j;
3104
3105         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3106         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3107                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3108                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3109         }
3110         else {
3111                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3112                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3113         }
3114
3115         /* User Priority to Traffic Class mapping */
3116         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3117                 j = vmdq_tx_conf->dcb_tc[i];
3118                 tc = &dcb_config->tc_config[j];
3119                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3120                                                 (uint8_t)(1 << j);
3121         }
3122         return;
3123 }
3124
3125 static void
3126 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3127                 struct ixgbe_dcb_config *dcb_config)
3128 {
3129         struct rte_eth_dcb_rx_conf *rx_conf =
3130                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3131         struct ixgbe_dcb_tc_config *tc;
3132         uint8_t i,j;
3133
3134         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3135         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3136
3137         /* User Priority to Traffic Class mapping */
3138         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3139                 j = rx_conf->dcb_tc[i];
3140                 tc = &dcb_config->tc_config[j];
3141                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3142                                                 (uint8_t)(1 << j);
3143         }
3144 }
3145
3146 static void
3147 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3148                 struct ixgbe_dcb_config *dcb_config)
3149 {
3150         struct rte_eth_dcb_tx_conf *tx_conf =
3151                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3152         struct ixgbe_dcb_tc_config *tc;
3153         uint8_t i,j;
3154
3155         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3156         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3157
3158         /* User Priority to Traffic Class mapping */
3159         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3160                 j = tx_conf->dcb_tc[i];
3161                 tc = &dcb_config->tc_config[j];
3162                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3163                                                 (uint8_t)(1 << j);
3164         }
3165 }
3166
3167 /**
3168  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3169  * @hw: pointer to hardware structure
3170  * @dcb_config: pointer to ixgbe_dcb_config structure
3171  */
3172 static void
3173 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3174                struct ixgbe_dcb_config *dcb_config)
3175 {
3176         uint32_t reg;
3177         uint32_t vlanctrl;
3178         uint8_t i;
3179
3180         PMD_INIT_FUNC_TRACE();
3181         /*
3182          * Disable the arbiter before changing parameters
3183          * (always enable recycle mode; WSP)
3184          */
3185         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3186         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3187
3188         if (hw->mac.type != ixgbe_mac_82598EB) {
3189                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3190                 if (dcb_config->num_tcs.pg_tcs == 4) {
3191                         if (dcb_config->vt_mode)
3192                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3193                                         IXGBE_MRQC_VMDQRT4TCEN;
3194                         else {
3195                                 /* no matter the mode is DCB or DCB_RSS, just
3196                                  * set the MRQE to RSSXTCEN. RSS is controlled
3197                                  * by RSS_FIELD
3198                                  */
3199                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3200                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3201                                         IXGBE_MRQC_RTRSS4TCEN;
3202                         }
3203                 }
3204                 if (dcb_config->num_tcs.pg_tcs == 8) {
3205                         if (dcb_config->vt_mode)
3206                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3207                                         IXGBE_MRQC_VMDQRT8TCEN;
3208                         else {
3209                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3210                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3211                                         IXGBE_MRQC_RTRSS8TCEN;
3212                         }
3213                 }
3214
3215                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3216         }
3217
3218         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3219         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3220         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3221         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3222
3223         /* VFTA - enable all vlan filters */
3224         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3225                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3226         }
3227
3228         /*
3229          * Configure Rx packet plane (recycle mode; WSP) and
3230          * enable arbiter
3231          */
3232         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3233         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3234
3235         return;
3236 }
3237
3238 static void
3239 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3240                         uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3241 {
3242         switch (hw->mac.type) {
3243         case ixgbe_mac_82598EB:
3244                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3245                 break;
3246         case ixgbe_mac_82599EB:
3247         case ixgbe_mac_X540:
3248         case ixgbe_mac_X550:
3249         case ixgbe_mac_X550EM_x:
3250         case ixgbe_mac_X550EM_a:
3251                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3252                                                   tsa, map);
3253                 break;
3254         default:
3255                 break;
3256         }
3257 }
3258
3259 static void
3260 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3261                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3262 {
3263         switch (hw->mac.type) {
3264         case ixgbe_mac_82598EB:
3265                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3266                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3267                 break;
3268         case ixgbe_mac_82599EB:
3269         case ixgbe_mac_X540:
3270         case ixgbe_mac_X550:
3271         case ixgbe_mac_X550EM_x:
3272         case ixgbe_mac_X550EM_a:
3273                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3274                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3275                 break;
3276         default:
3277                 break;
3278         }
3279 }
3280
3281 #define DCB_RX_CONFIG  1
3282 #define DCB_TX_CONFIG  1
3283 #define DCB_TX_PB      1024
3284 /**
3285  * ixgbe_dcb_hw_configure - Enable DCB and configure
3286  * general DCB in VT mode and non-VT mode parameters
3287  * @dev: pointer to rte_eth_dev structure
3288  * @dcb_config: pointer to ixgbe_dcb_config structure
3289  */
3290 static int
3291 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3292                         struct ixgbe_dcb_config *dcb_config)
3293 {
3294         int     ret = 0;
3295         uint8_t i,pfc_en,nb_tcs;
3296         uint16_t pbsize, rx_buffer_size;
3297         uint8_t config_dcb_rx = 0;
3298         uint8_t config_dcb_tx = 0;
3299         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3300         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3301         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3302         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3303         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3304         struct ixgbe_dcb_tc_config *tc;
3305         uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3306         struct ixgbe_hw *hw =
3307                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3308
3309         switch(dev->data->dev_conf.rxmode.mq_mode){
3310         case ETH_MQ_RX_VMDQ_DCB:
3311                 dcb_config->vt_mode = true;
3312                 if (hw->mac.type != ixgbe_mac_82598EB) {
3313                         config_dcb_rx = DCB_RX_CONFIG;
3314                         /*
3315                          *get dcb and VT rx configuration parameters
3316                          *from rte_eth_conf
3317                          */
3318                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3319                         /*Configure general VMDQ and DCB RX parameters*/
3320                         ixgbe_vmdq_dcb_configure(dev);
3321                 }
3322                 break;
3323         case ETH_MQ_RX_DCB:
3324         case ETH_MQ_RX_DCB_RSS:
3325                 dcb_config->vt_mode = false;
3326                 config_dcb_rx = DCB_RX_CONFIG;
3327                 /* Get dcb TX configuration parameters from rte_eth_conf */
3328                 ixgbe_dcb_rx_config(dev, dcb_config);
3329                 /*Configure general DCB RX parameters*/
3330                 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3331                 break;
3332         default:
3333                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3334                 break;
3335         }
3336         switch (dev->data->dev_conf.txmode.mq_mode) {
3337         case ETH_MQ_TX_VMDQ_DCB:
3338                 dcb_config->vt_mode = true;
3339                 config_dcb_tx = DCB_TX_CONFIG;
3340                 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3341                 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3342                 /*Configure general VMDQ and DCB TX parameters*/
3343                 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3344                 break;
3345
3346         case ETH_MQ_TX_DCB:
3347                 dcb_config->vt_mode = false;
3348                 config_dcb_tx = DCB_TX_CONFIG;
3349                 /*get DCB TX configuration parameters from rte_eth_conf*/
3350                 ixgbe_dcb_tx_config(dev, dcb_config);
3351                 /*Configure general DCB TX parameters*/
3352                 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3353                 break;
3354         default:
3355                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3356                 break;
3357         }
3358
3359         nb_tcs = dcb_config->num_tcs.pfc_tcs;
3360         /* Unpack map */
3361         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3362         if (nb_tcs == ETH_4_TCS) {
3363                 /* Avoid un-configured priority mapping to TC0 */
3364                 uint8_t j = 4;
3365                 uint8_t mask = 0xFF;
3366                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3367                         mask = (uint8_t)(mask & (~ (1 << map[i])));
3368                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3369                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3370                                 map[j++] = i;
3371                         mask >>= 1;
3372                 }
3373                 /* Re-configure 4 TCs BW */
3374                 for (i = 0; i < nb_tcs; i++) {
3375                         tc = &dcb_config->tc_config[i];
3376                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3377                                                 (uint8_t)(100 / nb_tcs);
3378                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3379                                                 (uint8_t)(100 / nb_tcs);
3380                 }
3381                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3382                         tc = &dcb_config->tc_config[i];
3383                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3384                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3385                 }
3386         }
3387
3388         switch (hw->mac.type) {
3389         case ixgbe_mac_X550:
3390         case ixgbe_mac_X550EM_x:
3391         case ixgbe_mac_X550EM_a:
3392                 rx_buffer_size = X550_RX_BUFFER_SIZE;
3393                 break;
3394         default:
3395                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3396                 break;
3397         }
3398
3399         if (config_dcb_rx) {
3400                 /* Set RX buffer size */
3401                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3402                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3403                 for (i = 0; i < nb_tcs; i++) {
3404                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3405                 }
3406                 /* zero alloc all unused TCs */
3407                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3408                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3409                 }
3410         }
3411         if (config_dcb_tx) {
3412                 /* Only support an equally distributed Tx packet buffer strategy. */
3413                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3414                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3415                 for (i = 0; i < nb_tcs; i++) {
3416                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3417                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3418                 }
3419                 /* Clear unused TCs, if any, to zero buffer size*/
3420                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3421                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3422                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3423                 }
3424         }
3425
3426         /*Calculates traffic class credits*/
3427         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3428                                 IXGBE_DCB_TX_CONFIG);
3429         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3430                                 IXGBE_DCB_RX_CONFIG);
3431
3432         if (config_dcb_rx) {
3433                 /* Unpack CEE standard containers */
3434                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3435                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3436                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3437                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3438                 /* Configure PG(ETS) RX */
3439                 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3440         }
3441
3442         if (config_dcb_tx) {
3443                 /* Unpack CEE standard containers */
3444                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3445                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3446                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3447                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3448                 /* Configure PG(ETS) TX */
3449                 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3450         }
3451
3452         /*Configure queue statistics registers*/
3453         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3454
3455         /* Check if the PFC is supported */
3456         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3457                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3458                 for (i = 0; i < nb_tcs; i++) {
3459                         /*
3460                         * If the TC count is 8,and the default high_water is 48,
3461                         * the low_water is 16 as default.
3462                         */
3463                         hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3464                         hw->fc.low_water[i] = pbsize / 4;
3465                         /* Enable pfc for this TC */
3466                         tc = &dcb_config->tc_config[i];
3467                         tc->pfc = ixgbe_dcb_pfc_enabled;
3468                 }
3469                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3470                 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3471                         pfc_en &= 0x0F;
3472                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3473         }
3474
3475         return ret;
3476 }
3477
3478 /**
3479  * ixgbe_configure_dcb - Configure DCB  Hardware
3480  * @dev: pointer to rte_eth_dev
3481  */
3482 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3483 {
3484         struct ixgbe_dcb_config *dcb_cfg =
3485                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3486         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3487
3488         PMD_INIT_FUNC_TRACE();
3489
3490         /* check support mq_mode for DCB */
3491         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3492             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3493             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3494                 return;
3495
3496         if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3497                 return;
3498
3499         /** Configure DCB hardware **/
3500         ixgbe_dcb_hw_configure(dev, dcb_cfg);
3501
3502         return;
3503 }
3504
3505 /*
3506  * VMDq only support for 10 GbE NIC.
3507  */
3508 static void
3509 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3510 {
3511         struct rte_eth_vmdq_rx_conf *cfg;
3512         struct ixgbe_hw *hw;
3513         enum rte_eth_nb_pools num_pools;
3514         uint32_t mrqc, vt_ctl, vlanctrl;
3515         uint32_t vmolr = 0;
3516         int i;
3517
3518         PMD_INIT_FUNC_TRACE();
3519         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3520         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3521         num_pools = cfg->nb_queue_pools;
3522
3523         ixgbe_rss_disable(dev);
3524
3525         /* MRQC: enable vmdq */
3526         mrqc = IXGBE_MRQC_VMDQEN;
3527         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3528
3529         /* PFVTCTL: turn on virtualisation and set the default pool */
3530         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3531         if (cfg->enable_default_pool)
3532                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3533         else
3534                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3535
3536         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3537
3538         for (i = 0; i < (int)num_pools; i++) {
3539                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3540                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3541         }
3542
3543         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3544         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3545         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3546         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3547
3548         /* VFTA - enable all vlan filters */
3549         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3550                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3551
3552         /* VFRE: pool enabling for receive - 64 */
3553         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3554         if (num_pools == ETH_64_POOLS)
3555                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3556
3557         /*
3558          * MPSAR - allow pools to read specific mac addresses
3559          * In this case, all pools should be able to read from mac addr 0
3560          */
3561         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3562         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3563
3564         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3565         for (i = 0; i < cfg->nb_pool_maps; i++) {
3566                 /* set vlan id in VF register and set the valid bit */
3567                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3568                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3569                 /*
3570                  * Put the allowed pools in VFB reg. As we only have 16 or 64
3571                  * pools, we only need to use the first half of the register
3572                  * i.e. bits 0-31
3573                  */
3574                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3575                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3576                                         (cfg->pool_map[i].pools & UINT32_MAX));
3577                 else
3578                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3579                                         ((cfg->pool_map[i].pools >> 32) \
3580                                         & UINT32_MAX));
3581
3582         }
3583
3584         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3585         if (cfg->enable_loop_back) {
3586                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3587                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3588                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3589         }
3590
3591         IXGBE_WRITE_FLUSH(hw);
3592 }
3593
3594 /*
3595  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3596  * @hw: pointer to hardware structure
3597  */
3598 static void
3599 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3600 {
3601         uint32_t reg;
3602         uint32_t q;
3603
3604         PMD_INIT_FUNC_TRACE();
3605         /*PF VF Transmit Enable*/
3606         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3607         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3608
3609         /* Disable the Tx desc arbiter so that MTQC can be changed */
3610         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3611         reg |= IXGBE_RTTDCS_ARBDIS;
3612         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3613
3614         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3615         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3616
3617         /* Disable drop for all queues */
3618         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3619                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3620                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3621
3622         /* Enable the Tx desc arbiter */
3623         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3624         reg &= ~IXGBE_RTTDCS_ARBDIS;
3625         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3626
3627         IXGBE_WRITE_FLUSH(hw);
3628
3629         return;
3630 }
3631
3632 static int __attribute__((cold))
3633 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3634 {
3635         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3636         uint64_t dma_addr;
3637         unsigned i;
3638
3639         /* Initialize software ring entries */
3640         for (i = 0; i < rxq->nb_rx_desc; i++) {
3641                 volatile union ixgbe_adv_rx_desc *rxd;
3642                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3643                 if (mbuf == NULL) {
3644                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3645                                      (unsigned) rxq->queue_id);
3646                         return -ENOMEM;
3647                 }
3648
3649                 rte_mbuf_refcnt_set(mbuf, 1);
3650                 mbuf->next = NULL;
3651                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3652                 mbuf->nb_segs = 1;
3653                 mbuf->port = rxq->port_id;
3654
3655                 dma_addr =
3656                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
3657                 rxd = &rxq->rx_ring[i];
3658                 rxd->read.hdr_addr = 0;
3659                 rxd->read.pkt_addr = dma_addr;
3660                 rxe[i].mbuf = mbuf;
3661         }
3662
3663         return 0;
3664 }
3665
3666 static int
3667 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3668 {
3669         struct ixgbe_hw *hw;
3670         uint32_t mrqc;
3671
3672         ixgbe_rss_configure(dev);
3673
3674         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3675
3676         /* MRQC: enable VF RSS */
3677         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3678         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3679         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3680         case ETH_64_POOLS:
3681                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3682                 break;
3683
3684         case ETH_32_POOLS:
3685                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3686                 break;
3687
3688         default:
3689                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3690                 return -EINVAL;
3691         }
3692
3693         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3694
3695         return 0;
3696 }
3697
3698 static int
3699 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3700 {
3701         struct ixgbe_hw *hw =
3702                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3703
3704         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3705         case ETH_64_POOLS:
3706                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3707                         IXGBE_MRQC_VMDQEN);
3708                 break;
3709
3710         case ETH_32_POOLS:
3711                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3712                         IXGBE_MRQC_VMDQRT4TCEN);
3713                 break;
3714
3715         case ETH_16_POOLS:
3716                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3717                         IXGBE_MRQC_VMDQRT8TCEN);
3718                 break;
3719         default:
3720                 PMD_INIT_LOG(ERR,
3721                         "invalid pool number in IOV mode");
3722                 break;
3723         }
3724         return 0;
3725 }
3726
3727 static int
3728 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3729 {
3730         struct ixgbe_hw *hw =
3731                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732
3733         if (hw->mac.type == ixgbe_mac_82598EB)
3734                 return 0;
3735
3736         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3737                 /*
3738                  * SRIOV inactive scheme
3739                  * any DCB/RSS w/o VMDq multi-queue setting
3740                  */
3741                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3742                 case ETH_MQ_RX_RSS:
3743                 case ETH_MQ_RX_DCB_RSS:
3744                 case ETH_MQ_RX_VMDQ_RSS:
3745                         ixgbe_rss_configure(dev);
3746                         break;
3747
3748                 case ETH_MQ_RX_VMDQ_DCB:
3749                         ixgbe_vmdq_dcb_configure(dev);
3750                         break;
3751
3752                 case ETH_MQ_RX_VMDQ_ONLY:
3753                         ixgbe_vmdq_rx_hw_configure(dev);
3754                         break;
3755
3756                 case ETH_MQ_RX_NONE:
3757                 default:
3758                         /* if mq_mode is none, disable rss mode.*/
3759                         ixgbe_rss_disable(dev);
3760                         break;
3761                 }
3762         } else {
3763                 /*
3764                  * SRIOV active scheme
3765                  * Support RSS together with VMDq & SRIOV
3766                  */
3767                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3768                 case ETH_MQ_RX_RSS:
3769                 case ETH_MQ_RX_VMDQ_RSS:
3770                         ixgbe_config_vf_rss(dev);
3771                         break;
3772
3773                 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3774                 case ETH_MQ_RX_VMDQ_DCB:
3775                 case ETH_MQ_RX_VMDQ_DCB_RSS:
3776                         PMD_INIT_LOG(ERR,
3777                                 "Could not support DCB with VMDq & SRIOV");
3778                         return -1;
3779                 default:
3780                         ixgbe_config_vf_default(dev);
3781                         break;
3782                 }
3783         }
3784
3785         return 0;
3786 }
3787
3788 static int
3789 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3790 {
3791         struct ixgbe_hw *hw =
3792                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3793         uint32_t mtqc;
3794         uint32_t rttdcs;
3795
3796         if (hw->mac.type == ixgbe_mac_82598EB)
3797                 return 0;
3798
3799         /* disable arbiter before setting MTQC */
3800         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3801         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3802         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3803
3804         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3805                 /*
3806                  * SRIOV inactive scheme
3807                  * any DCB w/o VMDq multi-queue setting
3808                  */
3809                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3810                         ixgbe_vmdq_tx_hw_configure(hw);
3811                 else {
3812                         mtqc = IXGBE_MTQC_64Q_1PB;
3813                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3814                 }
3815         } else {
3816                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3817
3818                 /*
3819                  * SRIOV active scheme
3820                  * FIXME if support DCB together with VMDq & SRIOV
3821                  */
3822                 case ETH_64_POOLS:
3823                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3824                         break;
3825                 case ETH_32_POOLS:
3826                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3827                         break;
3828                 case ETH_16_POOLS:
3829                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3830                                 IXGBE_MTQC_8TC_8TQ;
3831                         break;
3832                 default:
3833                         mtqc = IXGBE_MTQC_64Q_1PB;
3834                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3835                 }
3836                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3837         }
3838
3839         /* re-enable arbiter */
3840         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3841         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3842
3843         return 0;
3844 }
3845
3846 /**
3847  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3848  *
3849  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3850  * spec rev. 3.0 chapter 8.2.3.8.13.
3851  *
3852  * @pool Memory pool of the Rx queue
3853  */
3854 static inline uint32_t
3855 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3856 {
3857         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3858
3859         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3860         uint16_t maxdesc =
3861                 IPV4_MAX_PKT_LEN /
3862                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3863
3864         if (maxdesc >= 16)
3865                 return IXGBE_RSCCTL_MAXDESC_16;
3866         else if (maxdesc >= 8)
3867                 return IXGBE_RSCCTL_MAXDESC_8;
3868         else if (maxdesc >= 4)
3869                 return IXGBE_RSCCTL_MAXDESC_4;
3870         else
3871                 return IXGBE_RSCCTL_MAXDESC_1;
3872 }
3873
3874 /**
3875  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3876  * interrupt
3877  *
3878  * (Taken from FreeBSD tree)
3879  * (yes this is all very magic and confusing :)
3880  *
3881  * @dev port handle
3882  * @entry the register array entry
3883  * @vector the MSIX vector for this queue
3884  * @type RX/TX/MISC
3885  */
3886 static void
3887 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3888 {
3889         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3890         u32 ivar, index;
3891
3892         vector |= IXGBE_IVAR_ALLOC_VAL;
3893
3894         switch (hw->mac.type) {
3895
3896         case ixgbe_mac_82598EB:
3897                 if (type == -1)
3898                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3899                 else
3900                         entry += (type * 64);
3901                 index = (entry >> 2) & 0x1F;
3902                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3903                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3904                 ivar |= (vector << (8 * (entry & 0x3)));
3905                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3906                 break;
3907
3908         case ixgbe_mac_82599EB:
3909         case ixgbe_mac_X540:
3910                 if (type == -1) { /* MISC IVAR */
3911                         index = (entry & 1) * 8;
3912                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3913                         ivar &= ~(0xFF << index);
3914                         ivar |= (vector << index);
3915                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3916                 } else {        /* RX/TX IVARS */
3917                         index = (16 * (entry & 1)) + (8 * type);
3918                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3919                         ivar &= ~(0xFF << index);
3920                         ivar |= (vector << index);
3921                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3922                 }
3923
3924                 break;
3925
3926         default:
3927                 break;
3928         }
3929 }
3930
3931 void __attribute__((cold))
3932 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3933 {
3934         uint16_t i, rx_using_sse;
3935         struct ixgbe_adapter *adapter =
3936                 (struct ixgbe_adapter *)dev->data->dev_private;
3937
3938         /*
3939          * In order to allow Vector Rx there are a few configuration
3940          * conditions to be met and Rx Bulk Allocation should be allowed.
3941          */
3942         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3943             !adapter->rx_bulk_alloc_allowed) {
3944                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3945                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
3946                                     "not enabled",
3947                              dev->data->port_id);
3948
3949                 adapter->rx_vec_allowed = false;
3950         }
3951
3952         /*
3953          * Initialize the appropriate LRO callback.
3954          *
3955          * If all queues satisfy the bulk allocation preconditions
3956          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3957          * Otherwise use a single allocation version.
3958          */
3959         if (dev->data->lro) {
3960                 if (adapter->rx_bulk_alloc_allowed) {
3961                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3962                                            "allocation version");
3963                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3964                 } else {
3965                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3966                                            "allocation version");
3967                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3968                 }
3969         } else if (dev->data->scattered_rx) {
3970                 /*
3971                  * Set the non-LRO scattered callback: there are Vector and
3972                  * single allocation versions.
3973                  */
3974                 if (adapter->rx_vec_allowed) {
3975                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3976                                             "callback (port=%d).",
3977                                      dev->data->port_id);
3978
3979                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3980                 } else if (adapter->rx_bulk_alloc_allowed) {
3981                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3982                                            "allocation callback (port=%d).",
3983                                      dev->data->port_id);
3984                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3985                 } else {
3986                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3987                                             "single allocation) "
3988                                             "Scattered Rx callback "
3989                                             "(port=%d).",
3990                                      dev->data->port_id);
3991
3992                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3993                 }
3994         /*
3995          * Below we set "simple" callbacks according to port/queues parameters.
3996          * If parameters allow we are going to choose between the following
3997          * callbacks:
3998          *    - Vector
3999          *    - Bulk Allocation
4000          *    - Single buffer allocation (the simplest one)
4001          */
4002         } else if (adapter->rx_vec_allowed) {
4003                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4004                                     "burst size no less than %d (port=%d).",
4005                              RTE_IXGBE_DESCS_PER_LOOP,
4006                              dev->data->port_id);
4007
4008                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4009         } else if (adapter->rx_bulk_alloc_allowed) {
4010                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4011                                     "satisfied. Rx Burst Bulk Alloc function "
4012                                     "will be used on port=%d.",
4013                              dev->data->port_id);
4014
4015                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4016         } else {
4017                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4018                                     "satisfied, or Scattered Rx is requested "
4019                                     "(port=%d).",
4020                              dev->data->port_id);
4021
4022                 dev->rx_pkt_burst = ixgbe_recv_pkts;
4023         }
4024
4025         /* Propagate information about RX function choice through all queues. */
4026
4027         rx_using_sse =
4028                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4029                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4030
4031         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4032                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4033                 rxq->rx_using_sse = rx_using_sse;
4034         }
4035 }
4036
4037 /**
4038  * ixgbe_set_rsc - configure RSC related port HW registers
4039  *
4040  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4041  * of 82599 Spec (x540 configuration is virtually the same).
4042  *
4043  * @dev port handle
4044  *
4045  * Returns 0 in case of success or a non-zero error code
4046  */
4047 static int
4048 ixgbe_set_rsc(struct rte_eth_dev *dev)
4049 {
4050         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4051         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4052         struct rte_eth_dev_info dev_info = { 0 };
4053         bool rsc_capable = false;
4054         uint16_t i;
4055         uint32_t rdrxctl;
4056
4057         /* Sanity check */
4058         dev->dev_ops->dev_infos_get(dev, &dev_info);
4059         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4060                 rsc_capable = true;
4061
4062         if (!rsc_capable && rx_conf->enable_lro) {
4063                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4064                                    "support it");
4065                 return -EINVAL;
4066         }
4067
4068         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4069
4070         if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4071                 /*
4072                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4073                  * 3.0 RSC configuration requires HW CRC stripping being
4074                  * enabled. If user requested both HW CRC stripping off
4075                  * and RSC on - return an error.
4076                  */
4077                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4078                                     "is disabled");
4079                 return -EINVAL;
4080         }
4081
4082         /* RFCTL configuration  */
4083         if (rsc_capable) {
4084                 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4085                 if (rx_conf->enable_lro)
4086                         /*
4087                          * Since NFS packets coalescing is not supported - clear
4088                          * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4089                          * enabled.
4090                          */
4091                         rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4092                                    IXGBE_RFCTL_NFSR_DIS);
4093                 else
4094                         rfctl |= IXGBE_RFCTL_RSC_DIS;
4095
4096                 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4097         }
4098
4099         /* If LRO hasn't been requested - we are done here. */
4100         if (!rx_conf->enable_lro)
4101                 return 0;
4102
4103         /* Set RDRXCTL.RSCACKC bit */
4104         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4105         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4106         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4107
4108         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4109         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4110                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4111                 uint32_t srrctl =
4112                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4113                 uint32_t rscctl =
4114                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4115                 uint32_t psrtype =
4116                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4117                 uint32_t eitr =
4118                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4119
4120                 /*
4121                  * ixgbe PMD doesn't support header-split at the moment.
4122                  *
4123                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4124                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4125                  * should be configured even if header split is not
4126                  * enabled. We will configure it 128 bytes following the
4127                  * recommendation in the spec.
4128                  */
4129                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4130                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4131                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4132
4133                 /*
4134                  * TODO: Consider setting the Receive Descriptor Minimum
4135                  * Threshold Size for an RSC case. This is not an obviously
4136                  * beneficiary option but the one worth considering...
4137                  */
4138
4139                 rscctl |= IXGBE_RSCCTL_RSCEN;
4140                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4141                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4142
4143                 /*
4144                  * RSC: Set ITR interval corresponding to 2K ints/s.
4145                  *
4146                  * Full-sized RSC aggregations for a 10Gb/s link will
4147                  * arrive at about 20K aggregation/s rate.
4148                  *
4149                  * 2K inst/s rate will make only 10% of the
4150                  * aggregations to be closed due to the interrupt timer
4151                  * expiration for a streaming at wire-speed case.
4152                  *
4153                  * For a sparse streaming case this setting will yield
4154                  * at most 500us latency for a single RSC aggregation.
4155                  */
4156                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4157                 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4158
4159                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4160                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4161                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4162                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4163
4164                 /*
4165                  * RSC requires the mapping of the queue to the
4166                  * interrupt vector.
4167                  */
4168                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4169         }
4170
4171         dev->data->lro = 1;
4172
4173         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4174
4175         return 0;
4176 }
4177
4178 /*
4179  * Initializes Receive Unit.
4180  */
4181 int __attribute__((cold))
4182 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4183 {
4184         struct ixgbe_hw     *hw;
4185         struct ixgbe_rx_queue *rxq;
4186         uint64_t bus_addr;
4187         uint32_t rxctrl;
4188         uint32_t fctrl;
4189         uint32_t hlreg0;
4190         uint32_t maxfrs;
4191         uint32_t srrctl;
4192         uint32_t rdrxctl;
4193         uint32_t rxcsum;
4194         uint16_t buf_size;
4195         uint16_t i;
4196         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4197         int rc;
4198
4199         PMD_INIT_FUNC_TRACE();
4200         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4201
4202         /*
4203          * Make sure receives are disabled while setting
4204          * up the RX context (registers, descriptor rings, etc.).
4205          */
4206         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4207         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4208
4209         /* Enable receipt of broadcasted frames */
4210         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4211         fctrl |= IXGBE_FCTRL_BAM;
4212         fctrl |= IXGBE_FCTRL_DPF;
4213         fctrl |= IXGBE_FCTRL_PMCF;
4214         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4215
4216         /*
4217          * Configure CRC stripping, if any.
4218          */
4219         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4220         if (rx_conf->hw_strip_crc)
4221                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4222         else
4223                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4224
4225         /*
4226          * Configure jumbo frame support, if any.
4227          */
4228         if (rx_conf->jumbo_frame == 1) {
4229                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4230                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4231                 maxfrs &= 0x0000FFFF;
4232                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4233                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4234         } else
4235                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4236
4237         /*
4238          * If loopback mode is configured for 82599, set LPBK bit.
4239          */
4240         if (hw->mac.type == ixgbe_mac_82599EB &&
4241                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4242                 hlreg0 |= IXGBE_HLREG0_LPBK;
4243         else
4244                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4245
4246         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4247
4248         /* Setup RX queues */
4249         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4250                 rxq = dev->data->rx_queues[i];
4251
4252                 /*
4253                  * Reset crc_len in case it was changed after queue setup by a
4254                  * call to configure.
4255                  */
4256                 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4257
4258                 /* Setup the Base and Length of the Rx Descriptor Rings */
4259                 bus_addr = rxq->rx_ring_phys_addr;
4260                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4261                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4262                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4263                                 (uint32_t)(bus_addr >> 32));
4264                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4265                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4266                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4267                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4268
4269                 /* Configure the SRRCTL register */
4270 #ifdef RTE_HEADER_SPLIT_ENABLE
4271                 /*
4272                  * Configure Header Split
4273                  */
4274                 if (rx_conf->header_split) {
4275                         if (hw->mac.type == ixgbe_mac_82599EB) {
4276                                 /* Must setup the PSRTYPE register */
4277                                 uint32_t psrtype;
4278                                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4279                                         IXGBE_PSRTYPE_UDPHDR   |
4280                                         IXGBE_PSRTYPE_IPV4HDR  |
4281                                         IXGBE_PSRTYPE_IPV6HDR;
4282                                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4283                         }
4284                         srrctl = ((rx_conf->split_hdr_size <<
4285                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4286                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4287                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4288                 } else
4289 #endif
4290                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4291
4292                 /* Set if packets are dropped when no descriptors available */
4293                 if (rxq->drop_en)
4294                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4295
4296                 /*
4297                  * Configure the RX buffer size in the BSIZEPACKET field of
4298                  * the SRRCTL register of the queue.
4299                  * The value is in 1 KB resolution. Valid values can be from
4300                  * 1 KB to 16 KB.
4301                  */
4302                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4303                         RTE_PKTMBUF_HEADROOM);
4304                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4305                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4306
4307                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4308
4309                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4310                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4311
4312                 /* It adds dual VLAN length for supporting dual VLAN */
4313                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4314                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4315                         dev->data->scattered_rx = 1;
4316         }
4317
4318         if (rx_conf->enable_scatter)
4319                 dev->data->scattered_rx = 1;
4320
4321         /*
4322          * Device configured with multiple RX queues.
4323          */
4324         ixgbe_dev_mq_rx_configure(dev);
4325
4326         /*
4327          * Setup the Checksum Register.
4328          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4329          * Enable IP/L4 checkum computation by hardware if requested to do so.
4330          */
4331         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4332         rxcsum |= IXGBE_RXCSUM_PCSD;
4333         if (rx_conf->hw_ip_checksum)
4334                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4335         else
4336                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4337
4338         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4339
4340         if (hw->mac.type == ixgbe_mac_82599EB ||
4341             hw->mac.type == ixgbe_mac_X540) {
4342                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4343                 if (rx_conf->hw_strip_crc)
4344                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4345                 else
4346                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4347                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4348                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4349         }
4350
4351         rc = ixgbe_set_rsc(dev);
4352         if (rc)
4353                 return rc;
4354
4355         ixgbe_set_rx_function(dev);
4356
4357         return 0;
4358 }
4359
4360 /*
4361  * Initializes Transmit Unit.
4362  */
4363 void __attribute__((cold))
4364 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4365 {
4366         struct ixgbe_hw     *hw;
4367         struct ixgbe_tx_queue *txq;
4368         uint64_t bus_addr;
4369         uint32_t hlreg0;
4370         uint32_t txctrl;
4371         uint16_t i;
4372
4373         PMD_INIT_FUNC_TRACE();
4374         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4375
4376         /* Enable TX CRC (checksum offload requirement) and hw padding
4377          * (TSO requirement) */
4378         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4379         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4380         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4381
4382         /* Setup the Base and Length of the Tx Descriptor Rings */
4383         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4384                 txq = dev->data->tx_queues[i];
4385
4386                 bus_addr = txq->tx_ring_phys_addr;
4387                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4388                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4389                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4390                                 (uint32_t)(bus_addr >> 32));
4391                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4392                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4393                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4394                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4395                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4396
4397                 /*
4398                  * Disable Tx Head Writeback RO bit, since this hoses
4399                  * bookkeeping if things aren't delivered in order.
4400                  */
4401                 switch (hw->mac.type) {
4402                         case ixgbe_mac_82598EB:
4403                                 txctrl = IXGBE_READ_REG(hw,
4404                                                         IXGBE_DCA_TXCTRL(txq->reg_idx));
4405                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4406                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4407                                                 txctrl);
4408                                 break;
4409
4410                         case ixgbe_mac_82599EB:
4411                         case ixgbe_mac_X540:
4412                         case ixgbe_mac_X550:
4413                         case ixgbe_mac_X550EM_x:
4414                         case ixgbe_mac_X550EM_a:
4415                         default:
4416                                 txctrl = IXGBE_READ_REG(hw,
4417                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4418                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4419                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4420                                                 txctrl);
4421                                 break;
4422                 }
4423         }
4424
4425         /* Device configured with multiple TX queues. */
4426         ixgbe_dev_mq_tx_configure(dev);
4427 }
4428
4429 /*
4430  * Set up link for 82599 loopback mode Tx->Rx.
4431  */
4432 static inline void __attribute__((cold))
4433 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4434 {
4435         PMD_INIT_FUNC_TRACE();
4436
4437         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4438                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4439                                 IXGBE_SUCCESS) {
4440                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4441                         /* ignore error */
4442                         return;
4443                 }
4444         }
4445
4446         /* Restart link */
4447         IXGBE_WRITE_REG(hw,
4448                         IXGBE_AUTOC,
4449                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4450         ixgbe_reset_pipeline_82599(hw);
4451
4452         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4453         msec_delay(50);
4454 }
4455
4456
4457 /*
4458  * Start Transmit and Receive Units.
4459  */
4460 int __attribute__((cold))
4461 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4462 {
4463         struct ixgbe_hw     *hw;
4464         struct ixgbe_tx_queue *txq;
4465         struct ixgbe_rx_queue *rxq;
4466         uint32_t txdctl;
4467         uint32_t dmatxctl;
4468         uint32_t rxctrl;
4469         uint16_t i;
4470         int ret = 0;
4471
4472         PMD_INIT_FUNC_TRACE();
4473         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4474
4475         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4476                 txq = dev->data->tx_queues[i];
4477                 /* Setup Transmit Threshold Registers */
4478                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4479                 txdctl |= txq->pthresh & 0x7F;
4480                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4481                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4482                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4483         }
4484
4485         if (hw->mac.type != ixgbe_mac_82598EB) {
4486                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4487                 dmatxctl |= IXGBE_DMATXCTL_TE;
4488                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4489         }
4490
4491         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4492                 txq = dev->data->tx_queues[i];
4493                 if (!txq->tx_deferred_start) {
4494                         ret = ixgbe_dev_tx_queue_start(dev, i);
4495                         if (ret < 0)
4496                                 return ret;
4497                 }
4498         }
4499
4500         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4501                 rxq = dev->data->rx_queues[i];
4502                 if (!rxq->rx_deferred_start) {
4503                         ret = ixgbe_dev_rx_queue_start(dev, i);
4504                         if (ret < 0)
4505                                 return ret;
4506                 }
4507         }
4508
4509         /* Enable Receive engine */
4510         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4511         if (hw->mac.type == ixgbe_mac_82598EB)
4512                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4513         rxctrl |= IXGBE_RXCTRL_RXEN;
4514         hw->mac.ops.enable_rx_dma(hw, rxctrl);
4515
4516         /* If loopback mode is enabled for 82599, set up the link accordingly */
4517         if (hw->mac.type == ixgbe_mac_82599EB &&
4518                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4519                 ixgbe_setup_loopback_link_82599(hw);
4520
4521         return 0;
4522 }
4523
4524 /*
4525  * Start Receive Units for specified queue.
4526  */
4527 int __attribute__((cold))
4528 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4529 {
4530         struct ixgbe_hw     *hw;
4531         struct ixgbe_rx_queue *rxq;
4532         uint32_t rxdctl;
4533         int poll_ms;
4534
4535         PMD_INIT_FUNC_TRACE();
4536         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4537
4538         if (rx_queue_id < dev->data->nb_rx_queues) {
4539                 rxq = dev->data->rx_queues[rx_queue_id];
4540
4541                 /* Allocate buffers for descriptor rings */
4542                 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4543                         PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4544                                      rx_queue_id);
4545                         return -1;
4546                 }
4547                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4548                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4549                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4550
4551                 /* Wait until RX Enable ready */
4552                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4553                 do {
4554                         rte_delay_ms(1);
4555                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4556                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4557                 if (!poll_ms)
4558                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4559                                      rx_queue_id);
4560                 rte_wmb();
4561                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4562                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4563                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4564         } else
4565                 return -1;
4566
4567         return 0;
4568 }
4569
4570 /*
4571  * Stop Receive Units for specified queue.
4572  */
4573 int __attribute__((cold))
4574 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4575 {
4576         struct ixgbe_hw     *hw;
4577         struct ixgbe_adapter *adapter =
4578                 (struct ixgbe_adapter *)dev->data->dev_private;
4579         struct ixgbe_rx_queue *rxq;
4580         uint32_t rxdctl;
4581         int poll_ms;
4582
4583         PMD_INIT_FUNC_TRACE();
4584         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4585
4586         if (rx_queue_id < dev->data->nb_rx_queues) {
4587                 rxq = dev->data->rx_queues[rx_queue_id];
4588
4589                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4590                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4591                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4592
4593                 /* Wait until RX Enable ready */
4594                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4595                 do {
4596                         rte_delay_ms(1);
4597                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4598                 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4599                 if (!poll_ms)
4600                         PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4601                                      rx_queue_id);
4602
4603                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4604
4605                 ixgbe_rx_queue_release_mbufs(rxq);
4606                 ixgbe_reset_rx_queue(adapter, rxq);
4607                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4608         } else
4609                 return -1;
4610
4611         return 0;
4612 }
4613
4614
4615 /*
4616  * Start Transmit Units for specified queue.
4617  */
4618 int __attribute__((cold))
4619 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4620 {
4621         struct ixgbe_hw     *hw;
4622         struct ixgbe_tx_queue *txq;
4623         uint32_t txdctl;
4624         int poll_ms;
4625
4626         PMD_INIT_FUNC_TRACE();
4627         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4628
4629         if (tx_queue_id < dev->data->nb_tx_queues) {
4630                 txq = dev->data->tx_queues[tx_queue_id];
4631                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4632                 txdctl |= IXGBE_TXDCTL_ENABLE;
4633                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4634
4635                 /* Wait until TX Enable ready */
4636                 if (hw->mac.type == ixgbe_mac_82599EB) {
4637                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4638                         do {
4639                                 rte_delay_ms(1);
4640                                 txdctl = IXGBE_READ_REG(hw,
4641                                         IXGBE_TXDCTL(txq->reg_idx));
4642                         } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4643                         if (!poll_ms)
4644                                 PMD_INIT_LOG(ERR, "Could not enable "
4645                                              "Tx Queue %d", tx_queue_id);
4646                 }
4647                 rte_wmb();
4648                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4649                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4650                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4651         } else
4652                 return -1;
4653
4654         return 0;
4655 }
4656
4657 /*
4658  * Stop Transmit Units for specified queue.
4659  */
4660 int __attribute__((cold))
4661 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4662 {
4663         struct ixgbe_hw     *hw;
4664         struct ixgbe_tx_queue *txq;
4665         uint32_t txdctl;
4666         uint32_t txtdh, txtdt;
4667         int poll_ms;
4668
4669         PMD_INIT_FUNC_TRACE();
4670         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4671
4672         if (tx_queue_id < dev->data->nb_tx_queues) {
4673                 txq = dev->data->tx_queues[tx_queue_id];
4674
4675                 /* Wait until TX queue is empty */
4676                 if (hw->mac.type == ixgbe_mac_82599EB) {
4677                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4678                         do {
4679                                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4680                                 txtdh = IXGBE_READ_REG(hw,
4681                                                 IXGBE_TDH(txq->reg_idx));
4682                                 txtdt = IXGBE_READ_REG(hw,
4683                                                 IXGBE_TDT(txq->reg_idx));
4684                         } while (--poll_ms && (txtdh != txtdt));
4685                         if (!poll_ms)
4686                                 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4687                                              "when stopping.", tx_queue_id);
4688                 }
4689
4690                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4691                 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4692                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4693
4694                 /* Wait until TX Enable ready */
4695                 if (hw->mac.type == ixgbe_mac_82599EB) {
4696                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4697                         do {
4698                                 rte_delay_ms(1);
4699                                 txdctl = IXGBE_READ_REG(hw,
4700                                                 IXGBE_TXDCTL(txq->reg_idx));
4701                         } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4702                         if (!poll_ms)
4703                                 PMD_INIT_LOG(ERR, "Could not disable "
4704                                              "Tx Queue %d", tx_queue_id);
4705                 }
4706
4707                 if (txq->ops != NULL) {
4708                         txq->ops->release_mbufs(txq);
4709                         txq->ops->reset(txq);
4710                 }
4711                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4712         } else
4713                 return -1;
4714
4715         return 0;
4716 }
4717
4718 void
4719 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4720         struct rte_eth_rxq_info *qinfo)
4721 {
4722         struct ixgbe_rx_queue *rxq;
4723
4724         rxq = dev->data->rx_queues[queue_id];
4725
4726         qinfo->mp = rxq->mb_pool;
4727         qinfo->scattered_rx = dev->data->scattered_rx;
4728         qinfo->nb_desc = rxq->nb_rx_desc;
4729
4730         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4731         qinfo->conf.rx_drop_en = rxq->drop_en;
4732         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4733 }
4734
4735 void
4736 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4737         struct rte_eth_txq_info *qinfo)
4738 {
4739         struct ixgbe_tx_queue *txq;
4740
4741         txq = dev->data->tx_queues[queue_id];
4742
4743         qinfo->nb_desc = txq->nb_tx_desc;
4744
4745         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4746         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4747         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4748
4749         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4750         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4751         qinfo->conf.txq_flags = txq->txq_flags;
4752         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4753 }
4754
4755 /*
4756  * [VF] Initializes Receive Unit.
4757  */
4758 int __attribute__((cold))
4759 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4760 {
4761         struct ixgbe_hw     *hw;
4762         struct ixgbe_rx_queue *rxq;
4763         uint64_t bus_addr;
4764         uint32_t srrctl, psrtype = 0;
4765         uint16_t buf_size;
4766         uint16_t i;
4767         int ret;
4768
4769         PMD_INIT_FUNC_TRACE();
4770         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4771
4772         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4773                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4774                         "it should be power of 2");
4775                 return -1;
4776         }
4777
4778         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4779                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4780                         "it should be equal to or less than %d",
4781                         hw->mac.max_rx_queues);
4782                 return -1;
4783         }
4784
4785         /*
4786          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4787          * disables the VF receipt of packets if the PF MTU is > 1500.
4788          * This is done to deal with 82599 limitations that imposes
4789          * the PF and all VFs to share the same MTU.
4790          * Then, the PF driver enables again the VF receipt of packet when
4791          * the VF driver issues a IXGBE_VF_SET_LPE request.
4792          * In the meantime, the VF device cannot be used, even if the VF driver
4793          * and the Guest VM network stack are ready to accept packets with a
4794          * size up to the PF MTU.
4795          * As a work-around to this PF behaviour, force the call to
4796          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4797          * VF packets received can work in all cases.
4798          */
4799         ixgbevf_rlpml_set_vf(hw,
4800                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4801
4802         /* Setup RX queues */
4803         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4804                 rxq = dev->data->rx_queues[i];
4805
4806                 /* Allocate buffers for descriptor rings */
4807                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4808                 if (ret)
4809                         return ret;
4810
4811                 /* Setup the Base and Length of the Rx Descriptor Rings */
4812                 bus_addr = rxq->rx_ring_phys_addr;
4813
4814                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4815                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4816                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4817                                 (uint32_t)(bus_addr >> 32));
4818                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4819                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4820                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4821                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4822
4823
4824                 /* Configure the SRRCTL register */
4825 #ifdef RTE_HEADER_SPLIT_ENABLE
4826                 /*
4827                  * Configure Header Split
4828                  */
4829                 if (dev->data->dev_conf.rxmode.header_split) {
4830                         srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4831                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4832                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4833                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4834                 } else
4835 #endif
4836                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4837
4838                 /* Set if packets are dropped when no descriptors available */
4839                 if (rxq->drop_en)
4840                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4841
4842                 /*
4843                  * Configure the RX buffer size in the BSIZEPACKET field of
4844                  * the SRRCTL register of the queue.
4845                  * The value is in 1 KB resolution. Valid values can be from
4846                  * 1 KB to 16 KB.
4847                  */
4848                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4849                         RTE_PKTMBUF_HEADROOM);
4850                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4851                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4852
4853                 /*
4854                  * VF modification to write virtual function SRRCTL register
4855                  */
4856                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4857
4858                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4859                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4860
4861                 if (dev->data->dev_conf.rxmode.enable_scatter ||
4862                     /* It adds dual VLAN length for supporting dual VLAN */
4863                     (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4864                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4865                         if (!dev->data->scattered_rx)
4866                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4867                         dev->data->scattered_rx = 1;
4868                 }
4869         }
4870
4871 #ifdef RTE_HEADER_SPLIT_ENABLE
4872         if (dev->data->dev_conf.rxmode.header_split)
4873                 /* Must setup the PSRTYPE register */
4874                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4875                         IXGBE_PSRTYPE_UDPHDR   |
4876                         IXGBE_PSRTYPE_IPV4HDR  |
4877                         IXGBE_PSRTYPE_IPV6HDR;
4878 #endif
4879
4880         /* Set RQPL for VF RSS according to max Rx queue */
4881         psrtype |= (dev->data->nb_rx_queues >> 1) <<
4882                 IXGBE_PSRTYPE_RQPL_SHIFT;
4883         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4884
4885         ixgbe_set_rx_function(dev);
4886
4887         return 0;
4888 }
4889
4890 /*
4891  * [VF] Initializes Transmit Unit.
4892  */
4893 void __attribute__((cold))
4894 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4895 {
4896         struct ixgbe_hw     *hw;
4897         struct ixgbe_tx_queue *txq;
4898         uint64_t bus_addr;
4899         uint32_t txctrl;
4900         uint16_t i;
4901
4902         PMD_INIT_FUNC_TRACE();
4903         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4904
4905         /* Setup the Base and Length of the Tx Descriptor Rings */
4906         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4907                 txq = dev->data->tx_queues[i];
4908                 bus_addr = txq->tx_ring_phys_addr;
4909                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4910                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4911                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4912                                 (uint32_t)(bus_addr >> 32));
4913                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4914                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4915                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4916                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4917                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4918
4919                 /*
4920                  * Disable Tx Head Writeback RO bit, since this hoses
4921                  * bookkeeping if things aren't delivered in order.
4922                  */
4923                 txctrl = IXGBE_READ_REG(hw,
4924                                 IXGBE_VFDCA_TXCTRL(i));
4925                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4926                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4927                                 txctrl);
4928         }
4929 }
4930
4931 /*
4932  * [VF] Start Transmit and Receive Units.
4933  */
4934 void __attribute__((cold))
4935 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4936 {
4937         struct ixgbe_hw     *hw;
4938         struct ixgbe_tx_queue *txq;
4939         struct ixgbe_rx_queue *rxq;
4940         uint32_t txdctl;
4941         uint32_t rxdctl;
4942         uint16_t i;
4943         int poll_ms;
4944
4945         PMD_INIT_FUNC_TRACE();
4946         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4947
4948         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4949                 txq = dev->data->tx_queues[i];
4950                 /* Setup Transmit Threshold Registers */
4951                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4952                 txdctl |= txq->pthresh & 0x7F;
4953                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4954                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4955                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4956         }
4957
4958         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4959
4960                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4961                 txdctl |= IXGBE_TXDCTL_ENABLE;
4962                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4963
4964                 poll_ms = 10;
4965                 /* Wait until TX Enable ready */
4966                 do {
4967                         rte_delay_ms(1);
4968                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4969                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4970                 if (!poll_ms)
4971                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4972         }
4973         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4974
4975                 rxq = dev->data->rx_queues[i];
4976
4977                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4978                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4979                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4980
4981                 /* Wait until RX Enable ready */
4982                 poll_ms = 10;
4983                 do {
4984                         rte_delay_ms(1);
4985                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4986                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4987                 if (!poll_ms)
4988                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4989                 rte_wmb();
4990                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4991
4992         }
4993 }
4994
4995 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4996 int __attribute__((weak))
4997 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4998 {
4999         return -1;
5000 }
5001
5002 uint16_t __attribute__((weak))
5003 ixgbe_recv_pkts_vec(
5004         void __rte_unused *rx_queue,
5005         struct rte_mbuf __rte_unused **rx_pkts,
5006         uint16_t __rte_unused nb_pkts)
5007 {
5008         return 0;
5009 }
5010
5011 uint16_t __attribute__((weak))
5012 ixgbe_recv_scattered_pkts_vec(
5013         void __rte_unused *rx_queue,
5014         struct rte_mbuf __rte_unused **rx_pkts,
5015         uint16_t __rte_unused nb_pkts)
5016 {
5017         return 0;
5018 }
5019
5020 int __attribute__((weak))
5021 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5022 {
5023         return -1;
5024 }