ixgbe: support new devices and MAC types
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   Copyright 2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <sys/queue.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <stdarg.h>
43 #include <unistd.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
49 #include <rte_log.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
52 #include <rte_pci.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
68 #include <rte_udp.h>
69 #include <rte_tcp.h>
70 #include <rte_sctp.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
73 #include <rte_ip.h>
74
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
82
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK (                  \
85                 PKT_TX_VLAN_PKT |                \
86                 PKT_TX_IP_CKSUM |                \
87                 PKT_TX_L4_MASK |                 \
88                 PKT_TX_TCP_SEG |                 \
89                 PKT_TX_OUTER_IP_CKSUM)
90
91 static inline struct rte_mbuf *
92 rte_rxmbuf_alloc(struct rte_mempool *mp)
93 {
94         struct rte_mbuf *m;
95
96         m = __rte_mbuf_raw_alloc(mp);
97         __rte_mbuf_sanity_check_raw(m, 0);
98         return m;
99 }
100
101
102 #if 1
103 #define RTE_PMD_USE_PREFETCH
104 #endif
105
106 #ifdef RTE_PMD_USE_PREFETCH
107 /*
108  * Prefetch a cache line into all cache levels.
109  */
110 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
111 #else
112 #define rte_ixgbe_prefetch(p)   do {} while (0)
113 #endif
114
115 /*********************************************************************
116  *
117  *  TX functions
118  *
119  **********************************************************************/
120
121 /*
122  * Check for descriptors with their DD bit set and free mbufs.
123  * Return the total number of buffers freed.
124  */
125 static inline int __attribute__((always_inline))
126 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
127 {
128         struct ixgbe_tx_entry *txep;
129         uint32_t status;
130         int i, nb_free = 0;
131         struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
132
133         /* check DD bit on threshold descriptor */
134         status = txq->tx_ring[txq->tx_next_dd].wb.status;
135         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
136                 return 0;
137
138         /*
139          * first buffer to free from S/W ring is at index
140          * tx_next_dd - (tx_rs_thresh-1)
141          */
142         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
143
144         for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145                 /* free buffers one at a time */
146                 m = __rte_pktmbuf_prefree_seg(txep->mbuf);
147                 txep->mbuf = NULL;
148
149                 if (unlikely(m == NULL))
150                         continue;
151
152                 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
153                     (nb_free > 0 && m->pool != free[0]->pool)) {
154                         rte_mempool_put_bulk(free[0]->pool,
155                                              (void **)free, nb_free);
156                         nb_free = 0;
157                 }
158
159                 free[nb_free++] = m;
160         }
161
162         if (nb_free > 0)
163                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
164
165         /* buffers were freed, update counters */
166         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
167         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
168         if (txq->tx_next_dd >= txq->nb_tx_desc)
169                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
170
171         return txq->tx_rs_thresh;
172 }
173
174 /* Populate 4 descriptors with data from 4 mbufs */
175 static inline void
176 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
177 {
178         uint64_t buf_dma_addr;
179         uint32_t pkt_len;
180         int i;
181
182         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
183                 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
184                 pkt_len = (*pkts)->data_len;
185
186                 /* write data to descriptor */
187                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
188
189                 txdp->read.cmd_type_len =
190                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
191
192                 txdp->read.olinfo_status =
193                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
194
195                 rte_prefetch0(&(*pkts)->pool);
196         }
197 }
198
199 /* Populate 1 descriptor with data from 1 mbuf */
200 static inline void
201 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
202 {
203         uint64_t buf_dma_addr;
204         uint32_t pkt_len;
205
206         buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
207         pkt_len = (*pkts)->data_len;
208
209         /* write data to descriptor */
210         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
211         txdp->read.cmd_type_len =
212                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
213         txdp->read.olinfo_status =
214                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
215         rte_prefetch0(&(*pkts)->pool);
216 }
217
218 /*
219  * Fill H/W descriptor ring with mbuf data.
220  * Copy mbuf pointers to the S/W ring.
221  */
222 static inline void
223 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
224                       uint16_t nb_pkts)
225 {
226         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
227         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
228         const int N_PER_LOOP = 4;
229         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
230         int mainpart, leftover;
231         int i, j;
232
233         /*
234          * Process most of the packets in chunks of N pkts.  Any
235          * leftover packets will get processed one at a time.
236          */
237         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
238         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
239         for (i = 0; i < mainpart; i += N_PER_LOOP) {
240                 /* Copy N mbuf pointers to the S/W ring */
241                 for (j = 0; j < N_PER_LOOP; ++j) {
242                         (txep + i + j)->mbuf = *(pkts + i + j);
243                 }
244                 tx4(txdp + i, pkts + i);
245         }
246
247         if (unlikely(leftover > 0)) {
248                 for (i = 0; i < leftover; ++i) {
249                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
250                         tx1(txdp + mainpart + i, pkts + mainpart + i);
251                 }
252         }
253 }
254
255 static inline uint16_t
256 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
257              uint16_t nb_pkts)
258 {
259         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
260         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
261         uint16_t n = 0;
262
263         /*
264          * Begin scanning the H/W ring for done descriptors when the
265          * number of available descriptors drops below tx_free_thresh.  For
266          * each done descriptor, free the associated buffer.
267          */
268         if (txq->nb_tx_free < txq->tx_free_thresh)
269                 ixgbe_tx_free_bufs(txq);
270
271         /* Only use descriptors that are available */
272         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
273         if (unlikely(nb_pkts == 0))
274                 return 0;
275
276         /* Use exactly nb_pkts descriptors */
277         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
278
279         /*
280          * At this point, we know there are enough descriptors in the
281          * ring to transmit all the packets.  This assumes that each
282          * mbuf contains a single segment, and that no new offloads
283          * are expected, which would require a new context descriptor.
284          */
285
286         /*
287          * See if we're going to wrap-around. If so, handle the top
288          * of the descriptor ring first, then do the bottom.  If not,
289          * the processing looks just like the "bottom" part anyway...
290          */
291         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
292                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
293                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
294
295                 /*
296                  * We know that the last descriptor in the ring will need to
297                  * have its RS bit set because tx_rs_thresh has to be
298                  * a divisor of the ring size
299                  */
300                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
301                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
302                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
303
304                 txq->tx_tail = 0;
305         }
306
307         /* Fill H/W descriptor ring with mbuf data */
308         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
309         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
310
311         /*
312          * Determine if RS bit should be set
313          * This is what we actually want:
314          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
315          * but instead of subtracting 1 and doing >=, we can just do
316          * greater than without subtracting.
317          */
318         if (txq->tx_tail > txq->tx_next_rs) {
319                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
320                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
321                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
322                                                 txq->tx_rs_thresh);
323                 if (txq->tx_next_rs >= txq->nb_tx_desc)
324                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
325         }
326
327         /*
328          * Check for wrap-around. This would only happen if we used
329          * up to the last descriptor in the ring, no more, no less.
330          */
331         if (txq->tx_tail >= txq->nb_tx_desc)
332                 txq->tx_tail = 0;
333
334         /* update tail pointer */
335         rte_wmb();
336         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
337
338         return nb_pkts;
339 }
340
341 uint16_t
342 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
343                        uint16_t nb_pkts)
344 {
345         uint16_t nb_tx;
346
347         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
348         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
349                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
350
351         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
352         nb_tx = 0;
353         while (nb_pkts) {
354                 uint16_t ret, n;
355                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
356                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
357                 nb_tx = (uint16_t)(nb_tx + ret);
358                 nb_pkts = (uint16_t)(nb_pkts - ret);
359                 if (ret < n)
360                         break;
361         }
362
363         return nb_tx;
364 }
365
366 static inline void
367 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
368                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
369                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
370 {
371         uint32_t type_tucmd_mlhl;
372         uint32_t mss_l4len_idx = 0;
373         uint32_t ctx_idx;
374         uint32_t vlan_macip_lens;
375         union ixgbe_tx_offload tx_offload_mask;
376         uint32_t seqnum_seed = 0;
377
378         ctx_idx = txq->ctx_curr;
379         tx_offload_mask.data[0] = 0;
380         tx_offload_mask.data[1] = 0;
381         type_tucmd_mlhl = 0;
382
383         /* Specify which HW CTX to upload. */
384         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
385
386         if (ol_flags & PKT_TX_VLAN_PKT) {
387                 tx_offload_mask.vlan_tci |= ~0;
388         }
389
390         /* check if TCP segmentation required for this packet */
391         if (ol_flags & PKT_TX_TCP_SEG) {
392                 /* implies IP cksum in IPv4 */
393                 if (ol_flags & PKT_TX_IP_CKSUM)
394                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
395                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
396                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
397                 else
398                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
399                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
400                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
401
402                 tx_offload_mask.l2_len |= ~0;
403                 tx_offload_mask.l3_len |= ~0;
404                 tx_offload_mask.l4_len |= ~0;
405                 tx_offload_mask.tso_segsz |= ~0;
406                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
407                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
408         } else { /* no TSO, check if hardware checksum is needed */
409                 if (ol_flags & PKT_TX_IP_CKSUM) {
410                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
411                         tx_offload_mask.l2_len |= ~0;
412                         tx_offload_mask.l3_len |= ~0;
413                 }
414
415                 switch (ol_flags & PKT_TX_L4_MASK) {
416                 case PKT_TX_UDP_CKSUM:
417                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
418                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
419                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
420                         tx_offload_mask.l2_len |= ~0;
421                         tx_offload_mask.l3_len |= ~0;
422                         break;
423                 case PKT_TX_TCP_CKSUM:
424                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
425                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
426                         mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
427                         tx_offload_mask.l2_len |= ~0;
428                         tx_offload_mask.l3_len |= ~0;
429                         break;
430                 case PKT_TX_SCTP_CKSUM:
431                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
432                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433                         mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
434                         tx_offload_mask.l2_len |= ~0;
435                         tx_offload_mask.l3_len |= ~0;
436                         break;
437                 default:
438                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
439                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
440                         break;
441                 }
442         }
443
444         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
445                 tx_offload_mask.outer_l2_len |= ~0;
446                 tx_offload_mask.outer_l3_len |= ~0;
447                 tx_offload_mask.l2_len |= ~0;
448                 seqnum_seed |= tx_offload.outer_l3_len
449                                << IXGBE_ADVTXD_OUTER_IPLEN;
450                 seqnum_seed |= tx_offload.l2_len
451                                << IXGBE_ADVTXD_TUNNEL_LEN;
452         }
453
454         txq->ctx_cache[ctx_idx].flags = ol_flags;
455         txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
456                 tx_offload_mask.data[0] & tx_offload.data[0];
457         txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
458                 tx_offload_mask.data[1] & tx_offload.data[1];
459         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
460
461         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
462         vlan_macip_lens = tx_offload.l3_len;
463         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
464                 vlan_macip_lens |= (tx_offload.outer_l2_len <<
465                                     IXGBE_ADVTXD_MACLEN_SHIFT);
466         else
467                 vlan_macip_lens |= (tx_offload.l2_len <<
468                                     IXGBE_ADVTXD_MACLEN_SHIFT);
469         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
470         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
471         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
472         ctx_txd->seqnum_seed     = seqnum_seed;
473 }
474
475 /*
476  * Check which hardware context can be used. Use the existing match
477  * or create a new context descriptor.
478  */
479 static inline uint32_t
480 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
481                 union ixgbe_tx_offload tx_offload)
482 {
483         /* If match with the current used context */
484         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
485                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
486                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
487                  & tx_offload.data[0])) &&
488                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
489                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
490                  & tx_offload.data[1])))) {
491                         return txq->ctx_curr;
492         }
493
494         /* What if match with the next context  */
495         txq->ctx_curr ^= 1;
496         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
497                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
498                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
499                  & tx_offload.data[0])) &&
500                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
501                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
502                  & tx_offload.data[1])))) {
503                         return txq->ctx_curr;
504         }
505
506         /* Mismatch, use the previous context */
507         return IXGBE_CTX_NUM;
508 }
509
510 static inline uint32_t
511 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
512 {
513         uint32_t tmp = 0;
514         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
515                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
516         if (ol_flags & PKT_TX_IP_CKSUM)
517                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
518         if (ol_flags & PKT_TX_TCP_SEG)
519                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
520         return tmp;
521 }
522
523 static inline uint32_t
524 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
525 {
526         uint32_t cmdtype = 0;
527         if (ol_flags & PKT_TX_VLAN_PKT)
528                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
529         if (ol_flags & PKT_TX_TCP_SEG)
530                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
531         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
532                 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
533         return cmdtype;
534 }
535
536 /* Default RS bit threshold values */
537 #ifndef DEFAULT_TX_RS_THRESH
538 #define DEFAULT_TX_RS_THRESH   32
539 #endif
540 #ifndef DEFAULT_TX_FREE_THRESH
541 #define DEFAULT_TX_FREE_THRESH 32
542 #endif
543
544 /* Reset transmit descriptors after they have been used */
545 static inline int
546 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
547 {
548         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
549         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
550         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
551         uint16_t nb_tx_desc = txq->nb_tx_desc;
552         uint16_t desc_to_clean_to;
553         uint16_t nb_tx_to_clean;
554         uint32_t status;
555
556         /* Determine the last descriptor needing to be cleaned */
557         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
558         if (desc_to_clean_to >= nb_tx_desc)
559                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
560
561         /* Check to make sure the last descriptor to clean is done */
562         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
563         status = txr[desc_to_clean_to].wb.status;
564         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
565         {
566                 PMD_TX_FREE_LOG(DEBUG,
567                                 "TX descriptor %4u is not done"
568                                 "(port=%d queue=%d)",
569                                 desc_to_clean_to,
570                                 txq->port_id, txq->queue_id);
571                 /* Failed to clean any descriptors, better luck next time */
572                 return -(1);
573         }
574
575         /* Figure out how many descriptors will be cleaned */
576         if (last_desc_cleaned > desc_to_clean_to)
577                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
578                                                         desc_to_clean_to);
579         else
580                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
581                                                 last_desc_cleaned);
582
583         PMD_TX_FREE_LOG(DEBUG,
584                         "Cleaning %4u TX descriptors: %4u to %4u "
585                         "(port=%d queue=%d)",
586                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
587                         txq->port_id, txq->queue_id);
588
589         /*
590          * The last descriptor to clean is done, so that means all the
591          * descriptors from the last descriptor that was cleaned
592          * up to the last descriptor with the RS bit set
593          * are done. Only reset the threshold descriptor.
594          */
595         txr[desc_to_clean_to].wb.status = 0;
596
597         /* Update the txq to reflect the last descriptor that was cleaned */
598         txq->last_desc_cleaned = desc_to_clean_to;
599         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
600
601         /* No Error */
602         return 0;
603 }
604
605 uint16_t
606 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
607                 uint16_t nb_pkts)
608 {
609         struct ixgbe_tx_queue *txq;
610         struct ixgbe_tx_entry *sw_ring;
611         struct ixgbe_tx_entry *txe, *txn;
612         volatile union ixgbe_adv_tx_desc *txr;
613         volatile union ixgbe_adv_tx_desc *txd, *txp;
614         struct rte_mbuf     *tx_pkt;
615         struct rte_mbuf     *m_seg;
616         uint64_t buf_dma_addr;
617         uint32_t olinfo_status;
618         uint32_t cmd_type_len;
619         uint32_t pkt_len;
620         uint16_t slen;
621         uint64_t ol_flags;
622         uint16_t tx_id;
623         uint16_t tx_last;
624         uint16_t nb_tx;
625         uint16_t nb_used;
626         uint64_t tx_ol_req;
627         uint32_t ctx = 0;
628         uint32_t new_ctx;
629         union ixgbe_tx_offload tx_offload;
630
631         tx_offload.data[0] = 0;
632         tx_offload.data[1] = 0;
633         txq = tx_queue;
634         sw_ring = txq->sw_ring;
635         txr     = txq->tx_ring;
636         tx_id   = txq->tx_tail;
637         txe = &sw_ring[tx_id];
638         txp = NULL;
639
640         /* Determine if the descriptor ring needs to be cleaned. */
641         if (txq->nb_tx_free < txq->tx_free_thresh)
642                 ixgbe_xmit_cleanup(txq);
643
644         rte_prefetch0(&txe->mbuf->pool);
645
646         /* TX loop */
647         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
648                 new_ctx = 0;
649                 tx_pkt = *tx_pkts++;
650                 pkt_len = tx_pkt->pkt_len;
651
652                 /*
653                  * Determine how many (if any) context descriptors
654                  * are needed for offload functionality.
655                  */
656                 ol_flags = tx_pkt->ol_flags;
657
658                 /* If hardware offload required */
659                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
660                 if (tx_ol_req) {
661                         tx_offload.l2_len = tx_pkt->l2_len;
662                         tx_offload.l3_len = tx_pkt->l3_len;
663                         tx_offload.l4_len = tx_pkt->l4_len;
664                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
665                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
666                         tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
667                         tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
668
669                         /* If new context need be built or reuse the exist ctx. */
670                         ctx = what_advctx_update(txq, tx_ol_req,
671                                 tx_offload);
672                         /* Only allocate context descriptor if required*/
673                         new_ctx = (ctx == IXGBE_CTX_NUM);
674                         ctx = txq->ctx_curr;
675                 }
676
677                 /*
678                  * Keep track of how many descriptors are used this loop
679                  * This will always be the number of segments + the number of
680                  * Context descriptors required to transmit the packet
681                  */
682                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
683
684                 if (txp != NULL &&
685                                 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
686                         /* set RS on the previous packet in the burst */
687                         txp->read.cmd_type_len |=
688                                 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
689
690                 /*
691                  * The number of descriptors that must be allocated for a
692                  * packet is the number of segments of that packet, plus 1
693                  * Context Descriptor for the hardware offload, if any.
694                  * Determine the last TX descriptor to allocate in the TX ring
695                  * for the packet, starting from the current position (tx_id)
696                  * in the ring.
697                  */
698                 tx_last = (uint16_t) (tx_id + nb_used - 1);
699
700                 /* Circular ring */
701                 if (tx_last >= txq->nb_tx_desc)
702                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
703
704                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
705                            " tx_first=%u tx_last=%u",
706                            (unsigned) txq->port_id,
707                            (unsigned) txq->queue_id,
708                            (unsigned) pkt_len,
709                            (unsigned) tx_id,
710                            (unsigned) tx_last);
711
712                 /*
713                  * Make sure there are enough TX descriptors available to
714                  * transmit the entire packet.
715                  * nb_used better be less than or equal to txq->tx_rs_thresh
716                  */
717                 if (nb_used > txq->nb_tx_free) {
718                         PMD_TX_FREE_LOG(DEBUG,
719                                         "Not enough free TX descriptors "
720                                         "nb_used=%4u nb_free=%4u "
721                                         "(port=%d queue=%d)",
722                                         nb_used, txq->nb_tx_free,
723                                         txq->port_id, txq->queue_id);
724
725                         if (ixgbe_xmit_cleanup(txq) != 0) {
726                                 /* Could not clean any descriptors */
727                                 if (nb_tx == 0)
728                                         return 0;
729                                 goto end_of_tx;
730                         }
731
732                         /* nb_used better be <= txq->tx_rs_thresh */
733                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
734                                 PMD_TX_FREE_LOG(DEBUG,
735                                         "The number of descriptors needed to "
736                                         "transmit the packet exceeds the "
737                                         "RS bit threshold. This will impact "
738                                         "performance."
739                                         "nb_used=%4u nb_free=%4u "
740                                         "tx_rs_thresh=%4u. "
741                                         "(port=%d queue=%d)",
742                                         nb_used, txq->nb_tx_free,
743                                         txq->tx_rs_thresh,
744                                         txq->port_id, txq->queue_id);
745                                 /*
746                                  * Loop here until there are enough TX
747                                  * descriptors or until the ring cannot be
748                                  * cleaned.
749                                  */
750                                 while (nb_used > txq->nb_tx_free) {
751                                         if (ixgbe_xmit_cleanup(txq) != 0) {
752                                                 /*
753                                                  * Could not clean any
754                                                  * descriptors
755                                                  */
756                                                 if (nb_tx == 0)
757                                                         return 0;
758                                                 goto end_of_tx;
759                                         }
760                                 }
761                         }
762                 }
763
764                 /*
765                  * By now there are enough free TX descriptors to transmit
766                  * the packet.
767                  */
768
769                 /*
770                  * Set common flags of all TX Data Descriptors.
771                  *
772                  * The following bits must be set in all Data Descriptors:
773                  *   - IXGBE_ADVTXD_DTYP_DATA
774                  *   - IXGBE_ADVTXD_DCMD_DEXT
775                  *
776                  * The following bits must be set in the first Data Descriptor
777                  * and are ignored in the other ones:
778                  *   - IXGBE_ADVTXD_DCMD_IFCS
779                  *   - IXGBE_ADVTXD_MAC_1588
780                  *   - IXGBE_ADVTXD_DCMD_VLE
781                  *
782                  * The following bits must only be set in the last Data
783                  * Descriptor:
784                  *   - IXGBE_TXD_CMD_EOP
785                  *
786                  * The following bits can be set in any Data Descriptor, but
787                  * are only set in the last Data Descriptor:
788                  *   - IXGBE_TXD_CMD_RS
789                  */
790                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
791                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
792
793 #ifdef RTE_LIBRTE_IEEE1588
794                 if (ol_flags & PKT_TX_IEEE1588_TMST)
795                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
796 #endif
797
798                 olinfo_status = 0;
799                 if (tx_ol_req) {
800
801                         if (ol_flags & PKT_TX_TCP_SEG) {
802                                 /* when TSO is on, paylen in descriptor is the
803                                  * not the packet len but the tcp payload len */
804                                 pkt_len -= (tx_offload.l2_len +
805                                         tx_offload.l3_len + tx_offload.l4_len);
806                         }
807
808                         /*
809                          * Setup the TX Advanced Context Descriptor if required
810                          */
811                         if (new_ctx) {
812                                 volatile struct ixgbe_adv_tx_context_desc *
813                                     ctx_txd;
814
815                                 ctx_txd = (volatile struct
816                                     ixgbe_adv_tx_context_desc *)
817                                     &txr[tx_id];
818
819                                 txn = &sw_ring[txe->next_id];
820                                 rte_prefetch0(&txn->mbuf->pool);
821
822                                 if (txe->mbuf != NULL) {
823                                         rte_pktmbuf_free_seg(txe->mbuf);
824                                         txe->mbuf = NULL;
825                                 }
826
827                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
828                                         tx_offload);
829
830                                 txe->last_id = tx_last;
831                                 tx_id = txe->next_id;
832                                 txe = txn;
833                         }
834
835                         /*
836                          * Setup the TX Advanced Data Descriptor,
837                          * This path will go through
838                          * whatever new/reuse the context descriptor
839                          */
840                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
841                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
842                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
843                 }
844
845                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
846
847                 m_seg = tx_pkt;
848                 do {
849                         txd = &txr[tx_id];
850                         txn = &sw_ring[txe->next_id];
851                         rte_prefetch0(&txn->mbuf->pool);
852
853                         if (txe->mbuf != NULL)
854                                 rte_pktmbuf_free_seg(txe->mbuf);
855                         txe->mbuf = m_seg;
856
857                         /*
858                          * Set up Transmit Data Descriptor.
859                          */
860                         slen = m_seg->data_len;
861                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
862                         txd->read.buffer_addr =
863                                 rte_cpu_to_le_64(buf_dma_addr);
864                         txd->read.cmd_type_len =
865                                 rte_cpu_to_le_32(cmd_type_len | slen);
866                         txd->read.olinfo_status =
867                                 rte_cpu_to_le_32(olinfo_status);
868                         txe->last_id = tx_last;
869                         tx_id = txe->next_id;
870                         txe = txn;
871                         m_seg = m_seg->next;
872                 } while (m_seg != NULL);
873
874                 /*
875                  * The last packet data descriptor needs End Of Packet (EOP)
876                  */
877                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
878                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
879                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
880
881                 /* Set RS bit only on threshold packets' last descriptor */
882                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
883                         PMD_TX_FREE_LOG(DEBUG,
884                                         "Setting RS bit on TXD id="
885                                         "%4u (port=%d queue=%d)",
886                                         tx_last, txq->port_id, txq->queue_id);
887
888                         cmd_type_len |= IXGBE_TXD_CMD_RS;
889
890                         /* Update txq RS bit counters */
891                         txq->nb_tx_used = 0;
892                         txp = NULL;
893                 } else
894                         txp = txd;
895
896                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
897         }
898
899 end_of_tx:
900         /* set RS on last packet in the burst */
901         if (txp != NULL)
902                 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
903
904         rte_wmb();
905
906         /*
907          * Set the Transmit Descriptor Tail (TDT)
908          */
909         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
910                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
911                    (unsigned) tx_id, (unsigned) nb_tx);
912         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
913         txq->tx_tail = tx_id;
914
915         return nb_tx;
916 }
917
918 /*********************************************************************
919  *
920  *  RX functions
921  *
922  **********************************************************************/
923 #define IXGBE_PACKET_TYPE_IPV4              0X01
924 #define IXGBE_PACKET_TYPE_IPV4_TCP          0X11
925 #define IXGBE_PACKET_TYPE_IPV4_UDP          0X21
926 #define IXGBE_PACKET_TYPE_IPV4_SCTP         0X41
927 #define IXGBE_PACKET_TYPE_IPV4_EXT          0X03
928 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP     0X43
929 #define IXGBE_PACKET_TYPE_IPV6              0X04
930 #define IXGBE_PACKET_TYPE_IPV6_TCP          0X14
931 #define IXGBE_PACKET_TYPE_IPV6_UDP          0X24
932 #define IXGBE_PACKET_TYPE_IPV6_EXT          0X0C
933 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP      0X1C
934 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP      0X2C
935 #define IXGBE_PACKET_TYPE_IPV4_IPV6         0X05
936 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP     0X15
937 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP     0X25
938 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
939 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
940 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
941 #define IXGBE_PACKET_TYPE_MAX               0X80
942 #define IXGBE_PACKET_TYPE_MASK              0X7F
943 #define IXGBE_PACKET_TYPE_SHIFT             0X04
944 static inline uint32_t
945 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
946 {
947         static const uint32_t
948                 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
949                 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
950                         RTE_PTYPE_L3_IPV4,
951                 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
952                         RTE_PTYPE_L3_IPV4_EXT,
953                 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
954                         RTE_PTYPE_L3_IPV6,
955                 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
956                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
957                         RTE_PTYPE_INNER_L3_IPV6,
958                 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
959                         RTE_PTYPE_L3_IPV6_EXT,
960                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
961                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
962                         RTE_PTYPE_INNER_L3_IPV6_EXT,
963                 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
964                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
965                 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
966                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
967                 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
968                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
969                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
970                 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
971                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
972                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
973                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
974                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
975                 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
976                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
977                 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
978                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
979                 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
980                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
981                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
982                 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
983                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
984                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
985                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
986                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
987                 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
988                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
989                 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
990                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
991         };
992         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
993                 return RTE_PTYPE_UNKNOWN;
994
995         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
996                                 IXGBE_PACKET_TYPE_MASK;
997
998         return ptype_table[pkt_info];
999 }
1000
1001 static inline uint64_t
1002 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1003 {
1004         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1005                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1006                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1007                 PKT_RX_RSS_HASH, 0, 0, 0,
1008                 0, 0, 0,  PKT_RX_FDIR,
1009         };
1010 #ifdef RTE_LIBRTE_IEEE1588
1011         static uint64_t ip_pkt_etqf_map[8] = {
1012                 0, 0, 0, PKT_RX_IEEE1588_PTP,
1013                 0, 0, 0, 0,
1014         };
1015
1016         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1017                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1018                                 ip_rss_types_map[pkt_info & 0XF];
1019         else
1020                 return ip_rss_types_map[pkt_info & 0XF];
1021 #else
1022         return ip_rss_types_map[pkt_info & 0XF];
1023 #endif
1024 }
1025
1026 static inline uint64_t
1027 rx_desc_status_to_pkt_flags(uint32_t rx_status)
1028 {
1029         uint64_t pkt_flags;
1030
1031         /*
1032          * Check if VLAN present only.
1033          * Do not check whether L3/L4 rx checksum done by NIC or not,
1034          * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1035          */
1036         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
1037
1038 #ifdef RTE_LIBRTE_IEEE1588
1039         if (rx_status & IXGBE_RXD_STAT_TMST)
1040                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1041 #endif
1042         return pkt_flags;
1043 }
1044
1045 static inline uint64_t
1046 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1047 {
1048         uint64_t pkt_flags;
1049
1050         /*
1051          * Bit 31: IPE, IPv4 checksum error
1052          * Bit 30: L4I, L4I integrity error
1053          */
1054         static uint64_t error_to_pkt_flags_map[4] = {
1055                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
1056                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1057         };
1058         pkt_flags = error_to_pkt_flags_map[(rx_status >>
1059                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1060
1061         if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1062             (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1063                 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1064         }
1065
1066         return pkt_flags;
1067 }
1068
1069 /*
1070  * LOOK_AHEAD defines how many desc statuses to check beyond the
1071  * current descriptor.
1072  * It must be a pound define for optimal performance.
1073  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1074  * function only works with LOOK_AHEAD=8.
1075  */
1076 #define LOOK_AHEAD 8
1077 #if (LOOK_AHEAD != 8)
1078 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1079 #endif
1080 static inline int
1081 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1082 {
1083         volatile union ixgbe_adv_rx_desc *rxdp;
1084         struct ixgbe_rx_entry *rxep;
1085         struct rte_mbuf *mb;
1086         uint16_t pkt_len;
1087         uint64_t pkt_flags;
1088         int nb_dd;
1089         uint32_t s[LOOK_AHEAD];
1090         uint16_t pkt_info[LOOK_AHEAD];
1091         int i, j, nb_rx = 0;
1092         uint32_t status;
1093
1094         /* get references to current descriptor and S/W ring entry */
1095         rxdp = &rxq->rx_ring[rxq->rx_tail];
1096         rxep = &rxq->sw_ring[rxq->rx_tail];
1097
1098         status = rxdp->wb.upper.status_error;
1099         /* check to make sure there is at least 1 packet to receive */
1100         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1101                 return 0;
1102
1103         /*
1104          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1105          * reference packets that are ready to be received.
1106          */
1107         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1108              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1109         {
1110                 /* Read desc statuses backwards to avoid race condition */
1111                 for (j = LOOK_AHEAD-1; j >= 0; --j)
1112                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1113
1114                 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1115                         pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1116                                                 hs_rss.pkt_info;
1117
1118                 /* Compute how many status bits were set */
1119                 nb_dd = 0;
1120                 for (j = 0; j < LOOK_AHEAD; ++j)
1121                         nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1122
1123                 nb_rx += nb_dd;
1124
1125                 /* Translate descriptor info to mbuf format */
1126                 for (j = 0; j < nb_dd; ++j) {
1127                         mb = rxep[j].mbuf;
1128                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1129                                   rxq->crc_len;
1130                         mb->data_len = pkt_len;
1131                         mb->pkt_len = pkt_len;
1132                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1133
1134                         /* convert descriptor fields to rte mbuf flags */
1135                         pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1136                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1137                         pkt_flags |=
1138                                 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1139                         mb->ol_flags = pkt_flags;
1140                         mb->packet_type =
1141                                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1142
1143                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1144                                 mb->hash.rss = rte_le_to_cpu_32(
1145                                     rxdp[j].wb.lower.hi_dword.rss);
1146                         else if (pkt_flags & PKT_RX_FDIR) {
1147                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1148                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1149                                     IXGBE_ATR_HASH_MASK;
1150                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1151                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1152                         }
1153                 }
1154
1155                 /* Move mbuf pointers from the S/W ring to the stage */
1156                 for (j = 0; j < LOOK_AHEAD; ++j) {
1157                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1158                 }
1159
1160                 /* stop if all requested packets could not be received */
1161                 if (nb_dd != LOOK_AHEAD)
1162                         break;
1163         }
1164
1165         /* clear software ring entries so we can cleanup correctly */
1166         for (i = 0; i < nb_rx; ++i) {
1167                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1168         }
1169
1170
1171         return nb_rx;
1172 }
1173
1174 static inline int
1175 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1176 {
1177         volatile union ixgbe_adv_rx_desc *rxdp;
1178         struct ixgbe_rx_entry *rxep;
1179         struct rte_mbuf *mb;
1180         uint16_t alloc_idx;
1181         __le64 dma_addr;
1182         int diag, i;
1183
1184         /* allocate buffers in bulk directly into the S/W ring */
1185         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1186         rxep = &rxq->sw_ring[alloc_idx];
1187         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1188                                     rxq->rx_free_thresh);
1189         if (unlikely(diag != 0))
1190                 return -ENOMEM;
1191
1192         rxdp = &rxq->rx_ring[alloc_idx];
1193         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1194                 /* populate the static rte mbuf fields */
1195                 mb = rxep[i].mbuf;
1196                 if (reset_mbuf) {
1197                         mb->next = NULL;
1198                         mb->nb_segs = 1;
1199                         mb->port = rxq->port_id;
1200                 }
1201
1202                 rte_mbuf_refcnt_set(mb, 1);
1203                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1204
1205                 /* populate the descriptors */
1206                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
1207                 rxdp[i].read.hdr_addr = 0;
1208                 rxdp[i].read.pkt_addr = dma_addr;
1209         }
1210
1211         /* update state of internal queue structure */
1212         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1213         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1214                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1215
1216         /* no errors */
1217         return 0;
1218 }
1219
1220 static inline uint16_t
1221 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1222                          uint16_t nb_pkts)
1223 {
1224         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1225         int i;
1226
1227         /* how many packets are ready to return? */
1228         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1229
1230         /* copy mbuf pointers to the application's packet list */
1231         for (i = 0; i < nb_pkts; ++i)
1232                 rx_pkts[i] = stage[i];
1233
1234         /* update internal queue state */
1235         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1236         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1237
1238         return nb_pkts;
1239 }
1240
1241 static inline uint16_t
1242 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1243              uint16_t nb_pkts)
1244 {
1245         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1246         uint16_t nb_rx = 0;
1247
1248         /* Any previously recv'd pkts will be returned from the Rx stage */
1249         if (rxq->rx_nb_avail)
1250                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1251
1252         /* Scan the H/W ring for packets to receive */
1253         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1254
1255         /* update internal queue state */
1256         rxq->rx_next_avail = 0;
1257         rxq->rx_nb_avail = nb_rx;
1258         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1259
1260         /* if required, allocate new buffers to replenish descriptors */
1261         if (rxq->rx_tail > rxq->rx_free_trigger) {
1262                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1263
1264                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1265                         int i, j;
1266                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1267                                    "queue_id=%u", (unsigned) rxq->port_id,
1268                                    (unsigned) rxq->queue_id);
1269
1270                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1271                                 rxq->rx_free_thresh;
1272
1273                         /*
1274                          * Need to rewind any previous receives if we cannot
1275                          * allocate new buffers to replenish the old ones.
1276                          */
1277                         rxq->rx_nb_avail = 0;
1278                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1279                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1280                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1281
1282                         return 0;
1283                 }
1284
1285                 /* update tail pointer */
1286                 rte_wmb();
1287                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1288         }
1289
1290         if (rxq->rx_tail >= rxq->nb_rx_desc)
1291                 rxq->rx_tail = 0;
1292
1293         /* received any packets this loop? */
1294         if (rxq->rx_nb_avail)
1295                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1296
1297         return 0;
1298 }
1299
1300 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1301 static uint16_t
1302 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1303                            uint16_t nb_pkts)
1304 {
1305         uint16_t nb_rx;
1306
1307         if (unlikely(nb_pkts == 0))
1308                 return 0;
1309
1310         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1311                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1312
1313         /* request is relatively large, chunk it up */
1314         nb_rx = 0;
1315         while (nb_pkts) {
1316                 uint16_t ret, n;
1317                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1318                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1319                 nb_rx = (uint16_t)(nb_rx + ret);
1320                 nb_pkts = (uint16_t)(nb_pkts - ret);
1321                 if (ret < n)
1322                         break;
1323         }
1324
1325         return nb_rx;
1326 }
1327
1328 uint16_t
1329 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1330                 uint16_t nb_pkts)
1331 {
1332         struct ixgbe_rx_queue *rxq;
1333         volatile union ixgbe_adv_rx_desc *rx_ring;
1334         volatile union ixgbe_adv_rx_desc *rxdp;
1335         struct ixgbe_rx_entry *sw_ring;
1336         struct ixgbe_rx_entry *rxe;
1337         struct rte_mbuf *rxm;
1338         struct rte_mbuf *nmb;
1339         union ixgbe_adv_rx_desc rxd;
1340         uint64_t dma_addr;
1341         uint32_t staterr;
1342         uint32_t pkt_info;
1343         uint16_t pkt_len;
1344         uint16_t rx_id;
1345         uint16_t nb_rx;
1346         uint16_t nb_hold;
1347         uint64_t pkt_flags;
1348
1349         nb_rx = 0;
1350         nb_hold = 0;
1351         rxq = rx_queue;
1352         rx_id = rxq->rx_tail;
1353         rx_ring = rxq->rx_ring;
1354         sw_ring = rxq->sw_ring;
1355         while (nb_rx < nb_pkts) {
1356                 /*
1357                  * The order of operations here is important as the DD status
1358                  * bit must not be read after any other descriptor fields.
1359                  * rx_ring and rxdp are pointing to volatile data so the order
1360                  * of accesses cannot be reordered by the compiler. If they were
1361                  * not volatile, they could be reordered which could lead to
1362                  * using invalid descriptor fields when read from rxd.
1363                  */
1364                 rxdp = &rx_ring[rx_id];
1365                 staterr = rxdp->wb.upper.status_error;
1366                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1367                         break;
1368                 rxd = *rxdp;
1369
1370                 /*
1371                  * End of packet.
1372                  *
1373                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1374                  * is likely to be invalid and to be dropped by the various
1375                  * validation checks performed by the network stack.
1376                  *
1377                  * Allocate a new mbuf to replenish the RX ring descriptor.
1378                  * If the allocation fails:
1379                  *    - arrange for that RX descriptor to be the first one
1380                  *      being parsed the next time the receive function is
1381                  *      invoked [on the same queue].
1382                  *
1383                  *    - Stop parsing the RX ring and return immediately.
1384                  *
1385                  * This policy do not drop the packet received in the RX
1386                  * descriptor for which the allocation of a new mbuf failed.
1387                  * Thus, it allows that packet to be later retrieved if
1388                  * mbuf have been freed in the mean time.
1389                  * As a side effect, holding RX descriptors instead of
1390                  * systematically giving them back to the NIC may lead to
1391                  * RX ring exhaustion situations.
1392                  * However, the NIC can gracefully prevent such situations
1393                  * to happen by sending specific "back-pressure" flow control
1394                  * frames to its peer(s).
1395                  */
1396                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1397                            "ext_err_stat=0x%08x pkt_len=%u",
1398                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1399                            (unsigned) rx_id, (unsigned) staterr,
1400                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1401
1402                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1403                 if (nmb == NULL) {
1404                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1405                                    "queue_id=%u", (unsigned) rxq->port_id,
1406                                    (unsigned) rxq->queue_id);
1407                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1408                         break;
1409                 }
1410
1411                 nb_hold++;
1412                 rxe = &sw_ring[rx_id];
1413                 rx_id++;
1414                 if (rx_id == rxq->nb_rx_desc)
1415                         rx_id = 0;
1416
1417                 /* Prefetch next mbuf while processing current one. */
1418                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1419
1420                 /*
1421                  * When next RX descriptor is on a cache-line boundary,
1422                  * prefetch the next 4 RX descriptors and the next 8 pointers
1423                  * to mbufs.
1424                  */
1425                 if ((rx_id & 0x3) == 0) {
1426                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1427                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1428                 }
1429
1430                 rxm = rxe->mbuf;
1431                 rxe->mbuf = nmb;
1432                 dma_addr =
1433                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1434                 rxdp->read.hdr_addr = 0;
1435                 rxdp->read.pkt_addr = dma_addr;
1436
1437                 /*
1438                  * Initialize the returned mbuf.
1439                  * 1) setup generic mbuf fields:
1440                  *    - number of segments,
1441                  *    - next segment,
1442                  *    - packet length,
1443                  *    - RX port identifier.
1444                  * 2) integrate hardware offload data, if any:
1445                  *    - RSS flag & hash,
1446                  *    - IP checksum flag,
1447                  *    - VLAN TCI, if any,
1448                  *    - error flags.
1449                  */
1450                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1451                                       rxq->crc_len);
1452                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1453                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1454                 rxm->nb_segs = 1;
1455                 rxm->next = NULL;
1456                 rxm->pkt_len = pkt_len;
1457                 rxm->data_len = pkt_len;
1458                 rxm->port = rxq->port_id;
1459
1460                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1461                                                                 pkt_info);
1462                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1463                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1464
1465                 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1466                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1467                 pkt_flags = pkt_flags |
1468                         ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1469                 rxm->ol_flags = pkt_flags;
1470                 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1471
1472                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1473                         rxm->hash.rss = rte_le_to_cpu_32(
1474                                                 rxd.wb.lower.hi_dword.rss);
1475                 else if (pkt_flags & PKT_RX_FDIR) {
1476                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1477                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1478                                         IXGBE_ATR_HASH_MASK;
1479                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1480                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1481                 }
1482                 /*
1483                  * Store the mbuf address into the next entry of the array
1484                  * of returned packets.
1485                  */
1486                 rx_pkts[nb_rx++] = rxm;
1487         }
1488         rxq->rx_tail = rx_id;
1489
1490         /*
1491          * If the number of free RX descriptors is greater than the RX free
1492          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1493          * register.
1494          * Update the RDT with the value of the last processed RX descriptor
1495          * minus 1, to guarantee that the RDT register is never equal to the
1496          * RDH register, which creates a "full" ring situtation from the
1497          * hardware point of view...
1498          */
1499         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1500         if (nb_hold > rxq->rx_free_thresh) {
1501                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1502                            "nb_hold=%u nb_rx=%u",
1503                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1504                            (unsigned) rx_id, (unsigned) nb_hold,
1505                            (unsigned) nb_rx);
1506                 rx_id = (uint16_t) ((rx_id == 0) ?
1507                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1508                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1509                 nb_hold = 0;
1510         }
1511         rxq->nb_rx_hold = nb_hold;
1512         return nb_rx;
1513 }
1514
1515 /**
1516  * Detect an RSC descriptor.
1517  */
1518 static inline uint32_t
1519 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1520 {
1521         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1522                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1523 }
1524
1525 /**
1526  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1527  *
1528  * Fill the following info in the HEAD buffer of the Rx cluster:
1529  *    - RX port identifier
1530  *    - hardware offload data, if any:
1531  *      - RSS flag & hash
1532  *      - IP checksum flag
1533  *      - VLAN TCI, if any
1534  *      - error flags
1535  * @head HEAD of the packet cluster
1536  * @desc HW descriptor to get data from
1537  * @port_id Port ID of the Rx queue
1538  */
1539 static inline void
1540 ixgbe_fill_cluster_head_buf(
1541         struct rte_mbuf *head,
1542         union ixgbe_adv_rx_desc *desc,
1543         uint8_t port_id,
1544         uint32_t staterr)
1545 {
1546         uint16_t pkt_info;
1547         uint64_t pkt_flags;
1548
1549         head->port = port_id;
1550
1551         /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1552          * set in the pkt_flags field.
1553          */
1554         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1555         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1556         pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1557         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1558         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1559         head->ol_flags = pkt_flags;
1560         head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1561
1562         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1563                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1564         else if (pkt_flags & PKT_RX_FDIR) {
1565                 head->hash.fdir.hash =
1566                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1567                                                           & IXGBE_ATR_HASH_MASK;
1568                 head->hash.fdir.id =
1569                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1570         }
1571 }
1572
1573 /**
1574  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1575  *
1576  * @rx_queue Rx queue handle
1577  * @rx_pkts table of received packets
1578  * @nb_pkts size of rx_pkts table
1579  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1580  *
1581  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1582  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1583  *
1584  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1585  * 1) When non-EOP RSC completion arrives:
1586  *    a) Update the HEAD of the current RSC aggregation cluster with the new
1587  *       segment's data length.
1588  *    b) Set the "next" pointer of the current segment to point to the segment
1589  *       at the NEXTP index.
1590  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1591  *       in the sw_rsc_ring.
1592  * 2) When EOP arrives we just update the cluster's total length and offload
1593  *    flags and deliver the cluster up to the upper layers. In our case - put it
1594  *    in the rx_pkts table.
1595  *
1596  * Returns the number of received packets/clusters (according to the "bulk
1597  * receive" interface).
1598  */
1599 static inline uint16_t
1600 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1601                     bool bulk_alloc)
1602 {
1603         struct ixgbe_rx_queue *rxq = rx_queue;
1604         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1605         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1606         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1607         uint16_t rx_id = rxq->rx_tail;
1608         uint16_t nb_rx = 0;
1609         uint16_t nb_hold = rxq->nb_rx_hold;
1610         uint16_t prev_id = rxq->rx_tail;
1611
1612         while (nb_rx < nb_pkts) {
1613                 bool eop;
1614                 struct ixgbe_rx_entry *rxe;
1615                 struct ixgbe_scattered_rx_entry *sc_entry;
1616                 struct ixgbe_scattered_rx_entry *next_sc_entry;
1617                 struct ixgbe_rx_entry *next_rxe;
1618                 struct rte_mbuf *first_seg;
1619                 struct rte_mbuf *rxm;
1620                 struct rte_mbuf *nmb;
1621                 union ixgbe_adv_rx_desc rxd;
1622                 uint16_t data_len;
1623                 uint16_t next_id;
1624                 volatile union ixgbe_adv_rx_desc *rxdp;
1625                 uint32_t staterr;
1626
1627 next_desc:
1628                 /*
1629                  * The code in this whole file uses the volatile pointer to
1630                  * ensure the read ordering of the status and the rest of the
1631                  * descriptor fields (on the compiler level only!!!). This is so
1632                  * UGLY - why not to just use the compiler barrier instead? DPDK
1633                  * even has the rte_compiler_barrier() for that.
1634                  *
1635                  * But most importantly this is just wrong because this doesn't
1636                  * ensure memory ordering in a general case at all. For
1637                  * instance, DPDK is supposed to work on Power CPUs where
1638                  * compiler barrier may just not be enough!
1639                  *
1640                  * I tried to write only this function properly to have a
1641                  * starting point (as a part of an LRO/RSC series) but the
1642                  * compiler cursed at me when I tried to cast away the
1643                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1644                  * keeping it the way it is for now.
1645                  *
1646                  * The code in this file is broken in so many other places and
1647                  * will just not work on a big endian CPU anyway therefore the
1648                  * lines below will have to be revisited together with the rest
1649                  * of the ixgbe PMD.
1650                  *
1651                  * TODO:
1652                  *    - Get rid of "volatile" crap and let the compiler do its
1653                  *      job.
1654                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
1655                  *      memory ordering below.
1656                  */
1657                 rxdp = &rx_ring[rx_id];
1658                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1659
1660                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1661                         break;
1662
1663                 rxd = *rxdp;
1664
1665                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1666                                   "staterr=0x%x data_len=%u",
1667                            rxq->port_id, rxq->queue_id, rx_id, staterr,
1668                            rte_le_to_cpu_16(rxd.wb.upper.length));
1669
1670                 if (!bulk_alloc) {
1671                         nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1672                         if (nmb == NULL) {
1673                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1674                                                   "port_id=%u queue_id=%u",
1675                                            rxq->port_id, rxq->queue_id);
1676
1677                                 rte_eth_devices[rxq->port_id].data->
1678                                                         rx_mbuf_alloc_failed++;
1679                                 break;
1680                         }
1681                 }
1682                 else if (nb_hold > rxq->rx_free_thresh) {
1683                         uint16_t next_rdt = rxq->rx_free_trigger;
1684
1685                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1686                                 rte_wmb();
1687                                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1688                                                     next_rdt);
1689                                 nb_hold -= rxq->rx_free_thresh;
1690                         } else {
1691                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1692                                                   "port_id=%u queue_id=%u",
1693                                            rxq->port_id, rxq->queue_id);
1694
1695                                 rte_eth_devices[rxq->port_id].data->
1696                                                         rx_mbuf_alloc_failed++;
1697                                 break;
1698                         }
1699                 }
1700
1701                 nb_hold++;
1702                 rxe = &sw_ring[rx_id];
1703                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1704
1705                 next_id = rx_id + 1;
1706                 if (next_id == rxq->nb_rx_desc)
1707                         next_id = 0;
1708
1709                 /* Prefetch next mbuf while processing current one. */
1710                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1711
1712                 /*
1713                  * When next RX descriptor is on a cache-line boundary,
1714                  * prefetch the next 4 RX descriptors and the next 4 pointers
1715                  * to mbufs.
1716                  */
1717                 if ((next_id & 0x3) == 0) {
1718                         rte_ixgbe_prefetch(&rx_ring[next_id]);
1719                         rte_ixgbe_prefetch(&sw_ring[next_id]);
1720                 }
1721
1722                 rxm = rxe->mbuf;
1723
1724                 if (!bulk_alloc) {
1725                         __le64 dma =
1726                           rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1727                         /*
1728                          * Update RX descriptor with the physical address of the
1729                          * new data buffer of the new allocated mbuf.
1730                          */
1731                         rxe->mbuf = nmb;
1732
1733                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
1734                         rxdp->read.hdr_addr = 0;
1735                         rxdp->read.pkt_addr = dma;
1736                 } else
1737                         rxe->mbuf = NULL;
1738
1739                 /*
1740                  * Set data length & data buffer address of mbuf.
1741                  */
1742                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1743                 rxm->data_len = data_len;
1744
1745                 if (!eop) {
1746                         uint16_t nextp_id;
1747                         /*
1748                          * Get next descriptor index:
1749                          *  - For RSC it's in the NEXTP field.
1750                          *  - For a scattered packet - it's just a following
1751                          *    descriptor.
1752                          */
1753                         if (ixgbe_rsc_count(&rxd))
1754                                 nextp_id =
1755                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1756                                                        IXGBE_RXDADV_NEXTP_SHIFT;
1757                         else
1758                                 nextp_id = next_id;
1759
1760                         next_sc_entry = &sw_sc_ring[nextp_id];
1761                         next_rxe = &sw_ring[nextp_id];
1762                         rte_ixgbe_prefetch(next_rxe);
1763                 }
1764
1765                 sc_entry = &sw_sc_ring[rx_id];
1766                 first_seg = sc_entry->fbuf;
1767                 sc_entry->fbuf = NULL;
1768
1769                 /*
1770                  * If this is the first buffer of the received packet,
1771                  * set the pointer to the first mbuf of the packet and
1772                  * initialize its context.
1773                  * Otherwise, update the total length and the number of segments
1774                  * of the current scattered packet, and update the pointer to
1775                  * the last mbuf of the current packet.
1776                  */
1777                 if (first_seg == NULL) {
1778                         first_seg = rxm;
1779                         first_seg->pkt_len = data_len;
1780                         first_seg->nb_segs = 1;
1781                 } else {
1782                         first_seg->pkt_len += data_len;
1783                         first_seg->nb_segs++;
1784                 }
1785
1786                 prev_id = rx_id;
1787                 rx_id = next_id;
1788
1789                 /*
1790                  * If this is not the last buffer of the received packet, update
1791                  * the pointer to the first mbuf at the NEXTP entry in the
1792                  * sw_sc_ring and continue to parse the RX ring.
1793                  */
1794                 if (!eop) {
1795                         rxm->next = next_rxe->mbuf;
1796                         next_sc_entry->fbuf = first_seg;
1797                         goto next_desc;
1798                 }
1799
1800                 /*
1801                  * This is the last buffer of the received packet - return
1802                  * the current cluster to the user.
1803                  */
1804                 rxm->next = NULL;
1805
1806                 /* Initialize the first mbuf of the returned packet */
1807                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1808                                             staterr);
1809
1810                 /*
1811                  * Deal with the case, when HW CRC srip is disabled.
1812                  * That can't happen when LRO is enabled, but still could
1813                  * happen for scattered RX mode.
1814                  */
1815                 first_seg->pkt_len -= rxq->crc_len;
1816                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1817                         struct rte_mbuf *lp;
1818
1819                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
1820                                 ;
1821
1822                         first_seg->nb_segs--;
1823                         lp->data_len -= rxq->crc_len - rxm->data_len;
1824                         lp->next = NULL;
1825                         rte_pktmbuf_free_seg(rxm);
1826                 } else
1827                         rxm->data_len -= rxq->crc_len;
1828
1829                 /* Prefetch data of first segment, if configured to do so. */
1830                 rte_packet_prefetch((char *)first_seg->buf_addr +
1831                         first_seg->data_off);
1832
1833                 /*
1834                  * Store the mbuf address into the next entry of the array
1835                  * of returned packets.
1836                  */
1837                 rx_pkts[nb_rx++] = first_seg;
1838         }
1839
1840         /*
1841          * Record index of the next RX descriptor to probe.
1842          */
1843         rxq->rx_tail = rx_id;
1844
1845         /*
1846          * If the number of free RX descriptors is greater than the RX free
1847          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1848          * register.
1849          * Update the RDT with the value of the last processed RX descriptor
1850          * minus 1, to guarantee that the RDT register is never equal to the
1851          * RDH register, which creates a "full" ring situtation from the
1852          * hardware point of view...
1853          */
1854         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1855                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1856                            "nb_hold=%u nb_rx=%u",
1857                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1858
1859                 rte_wmb();
1860                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1861                 nb_hold = 0;
1862         }
1863
1864         rxq->nb_rx_hold = nb_hold;
1865         return nb_rx;
1866 }
1867
1868 uint16_t
1869 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1870                                  uint16_t nb_pkts)
1871 {
1872         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1873 }
1874
1875 uint16_t
1876 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1877                                uint16_t nb_pkts)
1878 {
1879         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1880 }
1881
1882 /*********************************************************************
1883  *
1884  *  Queue management functions
1885  *
1886  **********************************************************************/
1887
1888 static void __attribute__((cold))
1889 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1890 {
1891         unsigned i;
1892
1893         if (txq->sw_ring != NULL) {
1894                 for (i = 0; i < txq->nb_tx_desc; i++) {
1895                         if (txq->sw_ring[i].mbuf != NULL) {
1896                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1897                                 txq->sw_ring[i].mbuf = NULL;
1898                         }
1899                 }
1900         }
1901 }
1902
1903 static void __attribute__((cold))
1904 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1905 {
1906         if (txq != NULL &&
1907             txq->sw_ring != NULL)
1908                 rte_free(txq->sw_ring);
1909 }
1910
1911 static void __attribute__((cold))
1912 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1913 {
1914         if (txq != NULL && txq->ops != NULL) {
1915                 txq->ops->release_mbufs(txq);
1916                 txq->ops->free_swring(txq);
1917                 rte_free(txq);
1918         }
1919 }
1920
1921 void __attribute__((cold))
1922 ixgbe_dev_tx_queue_release(void *txq)
1923 {
1924         ixgbe_tx_queue_release(txq);
1925 }
1926
1927 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1928 static void __attribute__((cold))
1929 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1930 {
1931         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1932         struct ixgbe_tx_entry *txe = txq->sw_ring;
1933         uint16_t prev, i;
1934
1935         /* Zero out HW ring memory */
1936         for (i = 0; i < txq->nb_tx_desc; i++) {
1937                 txq->tx_ring[i] = zeroed_desc;
1938         }
1939
1940         /* Initialize SW ring entries */
1941         prev = (uint16_t) (txq->nb_tx_desc - 1);
1942         for (i = 0; i < txq->nb_tx_desc; i++) {
1943                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1944                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1945                 txe[i].mbuf = NULL;
1946                 txe[i].last_id = i;
1947                 txe[prev].next_id = i;
1948                 prev = i;
1949         }
1950
1951         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1952         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1953
1954         txq->tx_tail = 0;
1955         txq->nb_tx_used = 0;
1956         /*
1957          * Always allow 1 descriptor to be un-allocated to avoid
1958          * a H/W race condition
1959          */
1960         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1961         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1962         txq->ctx_curr = 0;
1963         memset((void*)&txq->ctx_cache, 0,
1964                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1965 }
1966
1967 static const struct ixgbe_txq_ops def_txq_ops = {
1968         .release_mbufs = ixgbe_tx_queue_release_mbufs,
1969         .free_swring = ixgbe_tx_free_swring,
1970         .reset = ixgbe_reset_tx_queue,
1971 };
1972
1973 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1974  * the queue parameters. Used in tx_queue_setup by primary process and then
1975  * in dev_init by secondary process when attaching to an existing ethdev.
1976  */
1977 void __attribute__((cold))
1978 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1979 {
1980         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1981         if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1982                         && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1983                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1984 #ifdef RTE_IXGBE_INC_VECTOR
1985                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1986                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1987                                         ixgbe_txq_vec_setup(txq) == 0)) {
1988                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1989                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1990                 } else
1991 #endif
1992                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1993         } else {
1994                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1995                 PMD_INIT_LOG(DEBUG,
1996                                 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1997                                 (unsigned long)txq->txq_flags,
1998                                 (unsigned long)IXGBE_SIMPLE_FLAGS);
1999                 PMD_INIT_LOG(DEBUG,
2000                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2001                                 (unsigned long)txq->tx_rs_thresh,
2002                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2003                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2004         }
2005 }
2006
2007 int __attribute__((cold))
2008 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2009                          uint16_t queue_idx,
2010                          uint16_t nb_desc,
2011                          unsigned int socket_id,
2012                          const struct rte_eth_txconf *tx_conf)
2013 {
2014         const struct rte_memzone *tz;
2015         struct ixgbe_tx_queue *txq;
2016         struct ixgbe_hw     *hw;
2017         uint16_t tx_rs_thresh, tx_free_thresh;
2018
2019         PMD_INIT_FUNC_TRACE();
2020         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021
2022         /*
2023          * Validate number of transmit descriptors.
2024          * It must not exceed hardware maximum, and must be multiple
2025          * of IXGBE_ALIGN.
2026          */
2027         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2028                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2029                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2030                 return -EINVAL;
2031         }
2032
2033         /*
2034          * The following two parameters control the setting of the RS bit on
2035          * transmit descriptors.
2036          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2037          * descriptors have been used.
2038          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2039          * descriptors are used or if the number of descriptors required
2040          * to transmit a packet is greater than the number of free TX
2041          * descriptors.
2042          * The following constraints must be satisfied:
2043          *  tx_rs_thresh must be greater than 0.
2044          *  tx_rs_thresh must be less than the size of the ring minus 2.
2045          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2046          *  tx_rs_thresh must be a divisor of the ring size.
2047          *  tx_free_thresh must be greater than 0.
2048          *  tx_free_thresh must be less than the size of the ring minus 3.
2049          * One descriptor in the TX ring is used as a sentinel to avoid a
2050          * H/W race condition, hence the maximum threshold constraints.
2051          * When set to zero use default values.
2052          */
2053         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2054                         tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2055         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2056                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2057         if (tx_rs_thresh >= (nb_desc - 2)) {
2058                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2059                         "of TX descriptors minus 2. (tx_rs_thresh=%u "
2060                         "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2061                         (int)dev->data->port_id, (int)queue_idx);
2062                 return -(EINVAL);
2063         }
2064         if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2065                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2066                         "(tx_rs_thresh=%u port=%d queue=%d)",
2067                         DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2068                         (int)dev->data->port_id, (int)queue_idx);
2069                 return -(EINVAL);
2070         }
2071         if (tx_free_thresh >= (nb_desc - 3)) {
2072                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2073                              "tx_free_thresh must be less than the number of "
2074                              "TX descriptors minus 3. (tx_free_thresh=%u "
2075                              "port=%d queue=%d)",
2076                              (unsigned int)tx_free_thresh,
2077                              (int)dev->data->port_id, (int)queue_idx);
2078                 return -(EINVAL);
2079         }
2080         if (tx_rs_thresh > tx_free_thresh) {
2081                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2082                              "tx_free_thresh. (tx_free_thresh=%u "
2083                              "tx_rs_thresh=%u port=%d queue=%d)",
2084                              (unsigned int)tx_free_thresh,
2085                              (unsigned int)tx_rs_thresh,
2086                              (int)dev->data->port_id,
2087                              (int)queue_idx);
2088                 return -(EINVAL);
2089         }
2090         if ((nb_desc % tx_rs_thresh) != 0) {
2091                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2092                              "number of TX descriptors. (tx_rs_thresh=%u "
2093                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2094                              (int)dev->data->port_id, (int)queue_idx);
2095                 return -(EINVAL);
2096         }
2097
2098         /*
2099          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2100          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2101          * by the NIC and all descriptors are written back after the NIC
2102          * accumulates WTHRESH descriptors.
2103          */
2104         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2105                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2106                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2107                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2108                              (int)dev->data->port_id, (int)queue_idx);
2109                 return -(EINVAL);
2110         }
2111
2112         /* Free memory prior to re-allocation if needed... */
2113         if (dev->data->tx_queues[queue_idx] != NULL) {
2114                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2115                 dev->data->tx_queues[queue_idx] = NULL;
2116         }
2117
2118         /* First allocate the tx queue data structure */
2119         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2120                                  RTE_CACHE_LINE_SIZE, socket_id);
2121         if (txq == NULL)
2122                 return -ENOMEM;
2123
2124         /*
2125          * Allocate TX ring hardware descriptors. A memzone large enough to
2126          * handle the maximum ring size is allocated in order to allow for
2127          * resizing in later calls to the queue setup function.
2128          */
2129         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2130                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2131                         IXGBE_ALIGN, socket_id);
2132         if (tz == NULL) {
2133                 ixgbe_tx_queue_release(txq);
2134                 return -ENOMEM;
2135         }
2136
2137         txq->nb_tx_desc = nb_desc;
2138         txq->tx_rs_thresh = tx_rs_thresh;
2139         txq->tx_free_thresh = tx_free_thresh;
2140         txq->pthresh = tx_conf->tx_thresh.pthresh;
2141         txq->hthresh = tx_conf->tx_thresh.hthresh;
2142         txq->wthresh = tx_conf->tx_thresh.wthresh;
2143         txq->queue_id = queue_idx;
2144         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2145                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2146         txq->port_id = dev->data->port_id;
2147         txq->txq_flags = tx_conf->txq_flags;
2148         txq->ops = &def_txq_ops;
2149         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2150
2151         /*
2152          * Modification to set VFTDT for virtual function if vf is detected
2153          */
2154         if (hw->mac.type == ixgbe_mac_82599_vf ||
2155             hw->mac.type == ixgbe_mac_X540_vf ||
2156             hw->mac.type == ixgbe_mac_X550_vf ||
2157             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2158             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2159                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2160         else
2161                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2162
2163         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2164         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2165
2166         /* Allocate software ring */
2167         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2168                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2169                                 RTE_CACHE_LINE_SIZE, socket_id);
2170         if (txq->sw_ring == NULL) {
2171                 ixgbe_tx_queue_release(txq);
2172                 return -ENOMEM;
2173         }
2174         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2175                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2176
2177         /* set up vector or scalar TX function as appropriate */
2178         ixgbe_set_tx_function(dev, txq);
2179
2180         txq->ops->reset(txq);
2181
2182         dev->data->tx_queues[queue_idx] = txq;
2183
2184
2185         return 0;
2186 }
2187
2188 /**
2189  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2190  *
2191  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2192  * in the sw_rsc_ring is not set to NULL but rather points to the next
2193  * mbuf of this RSC aggregation (that has not been completed yet and still
2194  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2195  * will just free first "nb_segs" segments of the cluster explicitly by calling
2196  * an rte_pktmbuf_free_seg().
2197  *
2198  * @m scattered cluster head
2199  */
2200 static void __attribute__((cold))
2201 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2202 {
2203         uint8_t i, nb_segs = m->nb_segs;
2204         struct rte_mbuf *next_seg;
2205
2206         for (i = 0; i < nb_segs; i++) {
2207                 next_seg = m->next;
2208                 rte_pktmbuf_free_seg(m);
2209                 m = next_seg;
2210         }
2211 }
2212
2213 static void __attribute__((cold))
2214 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2215 {
2216         unsigned i;
2217
2218 #ifdef RTE_IXGBE_INC_VECTOR
2219         /* SSE Vector driver has a different way of releasing mbufs. */
2220         if (rxq->rx_using_sse) {
2221                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2222                 return;
2223         }
2224 #endif
2225
2226         if (rxq->sw_ring != NULL) {
2227                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2228                         if (rxq->sw_ring[i].mbuf != NULL) {
2229                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2230                                 rxq->sw_ring[i].mbuf = NULL;
2231                         }
2232                 }
2233                 if (rxq->rx_nb_avail) {
2234                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2235                                 struct rte_mbuf *mb;
2236                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2237                                 rte_pktmbuf_free_seg(mb);
2238                         }
2239                         rxq->rx_nb_avail = 0;
2240                 }
2241         }
2242
2243         if (rxq->sw_sc_ring)
2244                 for (i = 0; i < rxq->nb_rx_desc; i++)
2245                         if (rxq->sw_sc_ring[i].fbuf) {
2246                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2247                                 rxq->sw_sc_ring[i].fbuf = NULL;
2248                         }
2249 }
2250
2251 static void __attribute__((cold))
2252 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2253 {
2254         if (rxq != NULL) {
2255                 ixgbe_rx_queue_release_mbufs(rxq);
2256                 rte_free(rxq->sw_ring);
2257                 rte_free(rxq->sw_sc_ring);
2258                 rte_free(rxq);
2259         }
2260 }
2261
2262 void __attribute__((cold))
2263 ixgbe_dev_rx_queue_release(void *rxq)
2264 {
2265         ixgbe_rx_queue_release(rxq);
2266 }
2267
2268 /*
2269  * Check if Rx Burst Bulk Alloc function can be used.
2270  * Return
2271  *        0: the preconditions are satisfied and the bulk allocation function
2272  *           can be used.
2273  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2274  *           function must be used.
2275  */
2276 static inline int __attribute__((cold))
2277 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2278 {
2279         int ret = 0;
2280
2281         /*
2282          * Make sure the following pre-conditions are satisfied:
2283          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2284          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2285          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2286          *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2287          * Scattered packets are not supported.  This should be checked
2288          * outside of this function.
2289          */
2290         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2291                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2292                              "rxq->rx_free_thresh=%d, "
2293                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2294                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2295                 ret = -EINVAL;
2296         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2297                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2298                              "rxq->rx_free_thresh=%d, "
2299                              "rxq->nb_rx_desc=%d",
2300                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2301                 ret = -EINVAL;
2302         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2303                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2304                              "rxq->nb_rx_desc=%d, "
2305                              "rxq->rx_free_thresh=%d",
2306                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2307                 ret = -EINVAL;
2308         } else if (!(rxq->nb_rx_desc <
2309                (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2310                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2311                              "rxq->nb_rx_desc=%d, "
2312                              "IXGBE_MAX_RING_DESC=%d, "
2313                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2314                              rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2315                              RTE_PMD_IXGBE_RX_MAX_BURST);
2316                 ret = -EINVAL;
2317         }
2318
2319         return ret;
2320 }
2321
2322 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2323 static void __attribute__((cold))
2324 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2325 {
2326         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2327         unsigned i;
2328         uint16_t len = rxq->nb_rx_desc;
2329
2330         /*
2331          * By default, the Rx queue setup function allocates enough memory for
2332          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2333          * extra memory at the end of the descriptor ring to be zero'd out. A
2334          * pre-condition for using the Rx burst bulk alloc function is that the
2335          * number of descriptors is less than or equal to
2336          * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2337          * constraints here to see if we need to zero out memory after the end
2338          * of the H/W descriptor ring.
2339          */
2340         if (adapter->rx_bulk_alloc_allowed)
2341                 /* zero out extra memory */
2342                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2343
2344         /*
2345          * Zero out HW ring memory. Zero out extra memory at the end of
2346          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2347          * reads extra memory as zeros.
2348          */
2349         for (i = 0; i < len; i++) {
2350                 rxq->rx_ring[i] = zeroed_desc;
2351         }
2352
2353         /*
2354          * initialize extra software ring entries. Space for these extra
2355          * entries is always allocated
2356          */
2357         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2358         for (i = rxq->nb_rx_desc; i < len; ++i) {
2359                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2360         }
2361
2362         rxq->rx_nb_avail = 0;
2363         rxq->rx_next_avail = 0;
2364         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2365         rxq->rx_tail = 0;
2366         rxq->nb_rx_hold = 0;
2367         rxq->pkt_first_seg = NULL;
2368         rxq->pkt_last_seg = NULL;
2369
2370 #ifdef RTE_IXGBE_INC_VECTOR
2371         rxq->rxrearm_start = 0;
2372         rxq->rxrearm_nb = 0;
2373 #endif
2374 }
2375
2376 int __attribute__((cold))
2377 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2378                          uint16_t queue_idx,
2379                          uint16_t nb_desc,
2380                          unsigned int socket_id,
2381                          const struct rte_eth_rxconf *rx_conf,
2382                          struct rte_mempool *mp)
2383 {
2384         const struct rte_memzone *rz;
2385         struct ixgbe_rx_queue *rxq;
2386         struct ixgbe_hw     *hw;
2387         uint16_t len;
2388         struct ixgbe_adapter *adapter =
2389                 (struct ixgbe_adapter *)dev->data->dev_private;
2390
2391         PMD_INIT_FUNC_TRACE();
2392         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2393
2394         /*
2395          * Validate number of receive descriptors.
2396          * It must not exceed hardware maximum, and must be multiple
2397          * of IXGBE_ALIGN.
2398          */
2399         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2400                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2401                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2402                 return -EINVAL;
2403         }
2404
2405         /* Free memory prior to re-allocation if needed... */
2406         if (dev->data->rx_queues[queue_idx] != NULL) {
2407                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2408                 dev->data->rx_queues[queue_idx] = NULL;
2409         }
2410
2411         /* First allocate the rx queue data structure */
2412         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2413                                  RTE_CACHE_LINE_SIZE, socket_id);
2414         if (rxq == NULL)
2415                 return -ENOMEM;
2416         rxq->mb_pool = mp;
2417         rxq->nb_rx_desc = nb_desc;
2418         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2419         rxq->queue_id = queue_idx;
2420         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2421                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2422         rxq->port_id = dev->data->port_id;
2423         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2424                                                         0 : ETHER_CRC_LEN);
2425         rxq->drop_en = rx_conf->rx_drop_en;
2426         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2427
2428         /*
2429          * Allocate RX ring hardware descriptors. A memzone large enough to
2430          * handle the maximum ring size is allocated in order to allow for
2431          * resizing in later calls to the queue setup function.
2432          */
2433         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2434                                       RX_RING_SZ, IXGBE_ALIGN, socket_id);
2435         if (rz == NULL) {
2436                 ixgbe_rx_queue_release(rxq);
2437                 return -ENOMEM;
2438         }
2439
2440         /*
2441          * Zero init all the descriptors in the ring.
2442          */
2443         memset (rz->addr, 0, RX_RING_SZ);
2444
2445         /*
2446          * Modified to setup VFRDT for Virtual Function
2447          */
2448         if (hw->mac.type == ixgbe_mac_82599_vf ||
2449             hw->mac.type == ixgbe_mac_X540_vf ||
2450             hw->mac.type == ixgbe_mac_X550_vf ||
2451             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2452             hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2453                 rxq->rdt_reg_addr =
2454                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2455                 rxq->rdh_reg_addr =
2456                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2457         }
2458         else {
2459                 rxq->rdt_reg_addr =
2460                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2461                 rxq->rdh_reg_addr =
2462                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2463         }
2464
2465         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2466         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2467
2468         /*
2469          * Certain constraints must be met in order to use the bulk buffer
2470          * allocation Rx burst function. If any of Rx queues doesn't meet them
2471          * the feature should be disabled for the whole port.
2472          */
2473         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2474                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2475                                     "preconditions - canceling the feature for "
2476                                     "the whole port[%d]",
2477                              rxq->queue_id, rxq->port_id);
2478                 adapter->rx_bulk_alloc_allowed = false;
2479         }
2480
2481         /*
2482          * Allocate software ring. Allow for space at the end of the
2483          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2484          * function does not access an invalid memory region.
2485          */
2486         len = nb_desc;
2487         if (adapter->rx_bulk_alloc_allowed)
2488                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2489
2490         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2491                                           sizeof(struct ixgbe_rx_entry) * len,
2492                                           RTE_CACHE_LINE_SIZE, socket_id);
2493         if (!rxq->sw_ring) {
2494                 ixgbe_rx_queue_release(rxq);
2495                 return -ENOMEM;
2496         }
2497
2498         /*
2499          * Always allocate even if it's not going to be needed in order to
2500          * simplify the code.
2501          *
2502          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2503          * be requested in ixgbe_dev_rx_init(), which is called later from
2504          * dev_start() flow.
2505          */
2506         rxq->sw_sc_ring =
2507                 rte_zmalloc_socket("rxq->sw_sc_ring",
2508                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
2509                                    RTE_CACHE_LINE_SIZE, socket_id);
2510         if (!rxq->sw_sc_ring) {
2511                 ixgbe_rx_queue_release(rxq);
2512                 return -ENOMEM;
2513         }
2514
2515         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2516                             "dma_addr=0x%"PRIx64,
2517                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2518                      rxq->rx_ring_phys_addr);
2519
2520         if (!rte_is_power_of_2(nb_desc)) {
2521                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2522                                     "preconditions - canceling the feature for "
2523                                     "the whole port[%d]",
2524                              rxq->queue_id, rxq->port_id);
2525                 adapter->rx_vec_allowed = false;
2526         } else
2527                 ixgbe_rxq_vec_setup(rxq);
2528
2529         dev->data->rx_queues[queue_idx] = rxq;
2530
2531         ixgbe_reset_rx_queue(adapter, rxq);
2532
2533         return 0;
2534 }
2535
2536 uint32_t
2537 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2538 {
2539 #define IXGBE_RXQ_SCAN_INTERVAL 4
2540         volatile union ixgbe_adv_rx_desc *rxdp;
2541         struct ixgbe_rx_queue *rxq;
2542         uint32_t desc = 0;
2543
2544         if (rx_queue_id >= dev->data->nb_rx_queues) {
2545                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2546                 return 0;
2547         }
2548
2549         rxq = dev->data->rx_queues[rx_queue_id];
2550         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2551
2552         while ((desc < rxq->nb_rx_desc) &&
2553                 (rxdp->wb.upper.status_error &
2554                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2555                 desc += IXGBE_RXQ_SCAN_INTERVAL;
2556                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2557                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2558                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2559                                 desc - rxq->nb_rx_desc]);
2560         }
2561
2562         return desc;
2563 }
2564
2565 int
2566 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2567 {
2568         volatile union ixgbe_adv_rx_desc *rxdp;
2569         struct ixgbe_rx_queue *rxq = rx_queue;
2570         uint32_t desc;
2571
2572         if (unlikely(offset >= rxq->nb_rx_desc))
2573                 return 0;
2574         desc = rxq->rx_tail + offset;
2575         if (desc >= rxq->nb_rx_desc)
2576                 desc -= rxq->nb_rx_desc;
2577
2578         rxdp = &rxq->rx_ring[desc];
2579         return !!(rxdp->wb.upper.status_error &
2580                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2581 }
2582
2583 void __attribute__((cold))
2584 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2585 {
2586         unsigned i;
2587         struct ixgbe_adapter *adapter =
2588                 (struct ixgbe_adapter *)dev->data->dev_private;
2589
2590         PMD_INIT_FUNC_TRACE();
2591
2592         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2593                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2594                 if (txq != NULL) {
2595                         txq->ops->release_mbufs(txq);
2596                         txq->ops->reset(txq);
2597                 }
2598         }
2599
2600         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2601                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2602                 if (rxq != NULL) {
2603                         ixgbe_rx_queue_release_mbufs(rxq);
2604                         ixgbe_reset_rx_queue(adapter, rxq);
2605                 }
2606         }
2607 }
2608
2609 void
2610 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2611 {
2612         unsigned i;
2613
2614         PMD_INIT_FUNC_TRACE();
2615
2616         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2617                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2618                 dev->data->rx_queues[i] = NULL;
2619         }
2620         dev->data->nb_rx_queues = 0;
2621
2622         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2623                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2624                 dev->data->tx_queues[i] = NULL;
2625         }
2626         dev->data->nb_tx_queues = 0;
2627 }
2628
2629 /*********************************************************************
2630  *
2631  *  Device RX/TX init functions
2632  *
2633  **********************************************************************/
2634
2635 /**
2636  * Receive Side Scaling (RSS)
2637  * See section 7.1.2.8 in the following document:
2638  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2639  *
2640  * Principles:
2641  * The source and destination IP addresses of the IP header and the source
2642  * and destination ports of TCP/UDP headers, if any, of received packets are
2643  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2644  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2645  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2646  * RSS output index which is used as the RX queue index where to store the
2647  * received packets.
2648  * The following output is supplied in the RX write-back descriptor:
2649  *     - 32-bit result of the Microsoft RSS hash function,
2650  *     - 4-bit RSS type field.
2651  */
2652
2653 /*
2654  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2655  * Used as the default key.
2656  */
2657 static uint8_t rss_intel_key[40] = {
2658         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2659         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2660         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2661         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2662         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2663 };
2664
2665 static void
2666 ixgbe_rss_disable(struct rte_eth_dev *dev)
2667 {
2668         struct ixgbe_hw *hw;
2669         uint32_t mrqc;
2670         uint32_t mrqc_reg;
2671
2672         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2673         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2674         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2675         mrqc &= ~IXGBE_MRQC_RSSEN;
2676         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2677 }
2678
2679 static void
2680 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2681 {
2682         uint8_t  *hash_key;
2683         uint32_t mrqc;
2684         uint32_t rss_key;
2685         uint64_t rss_hf;
2686         uint16_t i;
2687         uint32_t mrqc_reg;
2688         uint32_t rssrk_reg;
2689
2690         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2691         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2692
2693         hash_key = rss_conf->rss_key;
2694         if (hash_key != NULL) {
2695                 /* Fill in RSS hash key */
2696                 for (i = 0; i < 10; i++) {
2697                         rss_key  = hash_key[(i * 4)];
2698                         rss_key |= hash_key[(i * 4) + 1] << 8;
2699                         rss_key |= hash_key[(i * 4) + 2] << 16;
2700                         rss_key |= hash_key[(i * 4) + 3] << 24;
2701                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2702                 }
2703         }
2704
2705         /* Set configured hashing protocols in MRQC register */
2706         rss_hf = rss_conf->rss_hf;
2707         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2708         if (rss_hf & ETH_RSS_IPV4)
2709                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2710         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2711                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2712         if (rss_hf & ETH_RSS_IPV6)
2713                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2714         if (rss_hf & ETH_RSS_IPV6_EX)
2715                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2716         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2717                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2718         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2719                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2720         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2721                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2722         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2723                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2724         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2725                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2726         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2727 }
2728
2729 int
2730 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2731                           struct rte_eth_rss_conf *rss_conf)
2732 {
2733         struct ixgbe_hw *hw;
2734         uint32_t mrqc;
2735         uint64_t rss_hf;
2736         uint32_t mrqc_reg;
2737
2738         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2739
2740         if (!ixgbe_rss_update_sp(hw->mac.type)) {
2741                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2742                         "NIC.");
2743                 return -ENOTSUP;
2744         }
2745         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2746
2747         /*
2748          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2749          *     "RSS enabling cannot be done dynamically while it must be
2750          *      preceded by a software reset"
2751          * Before changing anything, first check that the update RSS operation
2752          * does not attempt to disable RSS, if RSS was enabled at
2753          * initialization time, or does not attempt to enable RSS, if RSS was
2754          * disabled at initialization time.
2755          */
2756         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2757         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2758         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2759                 if (rss_hf != 0) /* Enable RSS */
2760                         return -(EINVAL);
2761                 return 0; /* Nothing to do */
2762         }
2763         /* RSS enabled */
2764         if (rss_hf == 0) /* Disable RSS */
2765                 return -(EINVAL);
2766         ixgbe_hw_rss_hash_set(hw, rss_conf);
2767         return 0;
2768 }
2769
2770 int
2771 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2772                             struct rte_eth_rss_conf *rss_conf)
2773 {
2774         struct ixgbe_hw *hw;
2775         uint8_t *hash_key;
2776         uint32_t mrqc;
2777         uint32_t rss_key;
2778         uint64_t rss_hf;
2779         uint16_t i;
2780         uint32_t mrqc_reg;
2781         uint32_t rssrk_reg;
2782
2783         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2784         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2785         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2786         hash_key = rss_conf->rss_key;
2787         if (hash_key != NULL) {
2788                 /* Return RSS hash key */
2789                 for (i = 0; i < 10; i++) {
2790                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2791                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2792                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2793                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2794                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2795                 }
2796         }
2797
2798         /* Get RSS functions configured in MRQC register */
2799         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2800         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2801                 rss_conf->rss_hf = 0;
2802                 return 0;
2803         }
2804         rss_hf = 0;
2805         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2806                 rss_hf |= ETH_RSS_IPV4;
2807         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2808                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2809         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2810                 rss_hf |= ETH_RSS_IPV6;
2811         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2812                 rss_hf |= ETH_RSS_IPV6_EX;
2813         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2814                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2815         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2816                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2817         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2818                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2819         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2820                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2821         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2822                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2823         rss_conf->rss_hf = rss_hf;
2824         return 0;
2825 }
2826
2827 static void
2828 ixgbe_rss_configure(struct rte_eth_dev *dev)
2829 {
2830         struct rte_eth_rss_conf rss_conf;
2831         struct ixgbe_hw *hw;
2832         uint32_t reta;
2833         uint16_t i;
2834         uint16_t j;
2835         uint16_t sp_reta_size;
2836         uint32_t reta_reg;
2837
2838         PMD_INIT_FUNC_TRACE();
2839         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2840
2841         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2842
2843         /*
2844          * Fill in redirection table
2845          * The byte-swap is needed because NIC registers are in
2846          * little-endian order.
2847          */
2848         reta = 0;
2849         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2850                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2851
2852                 if (j == dev->data->nb_rx_queues)
2853                         j = 0;
2854                 reta = (reta << 8) | j;
2855                 if ((i & 3) == 3)
2856                         IXGBE_WRITE_REG(hw, reta_reg,
2857                                         rte_bswap32(reta));
2858         }
2859
2860         /*
2861          * Configure the RSS key and the RSS protocols used to compute
2862          * the RSS hash of input packets.
2863          */
2864         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2865         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2866                 ixgbe_rss_disable(dev);
2867                 return;
2868         }
2869         if (rss_conf.rss_key == NULL)
2870                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2871         ixgbe_hw_rss_hash_set(hw, &rss_conf);
2872 }
2873
2874 #define NUM_VFTA_REGISTERS 128
2875 #define NIC_RX_BUFFER_SIZE 0x200
2876 #define X550_RX_BUFFER_SIZE 0x180
2877
2878 static void
2879 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2880 {
2881         struct rte_eth_vmdq_dcb_conf *cfg;
2882         struct ixgbe_hw *hw;
2883         enum rte_eth_nb_pools num_pools;
2884         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2885         uint16_t pbsize;
2886         uint8_t nb_tcs; /* number of traffic classes */
2887         int i;
2888
2889         PMD_INIT_FUNC_TRACE();
2890         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2891         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2892         num_pools = cfg->nb_queue_pools;
2893         /* Check we have a valid number of pools */
2894         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2895                 ixgbe_rss_disable(dev);
2896                 return;
2897         }
2898         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2899         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2900
2901         /*
2902          * RXPBSIZE
2903          * split rx buffer up into sections, each for 1 traffic class
2904          */
2905         switch (hw->mac.type) {
2906         case ixgbe_mac_X550:
2907         case ixgbe_mac_X550EM_x:
2908         case ixgbe_mac_X550EM_a:
2909                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2910                 break;
2911         default:
2912                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2913                 break;
2914         }
2915         for (i = 0; i < nb_tcs; i++) {
2916                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2917                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2918                 /* clear 10 bits. */
2919                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2920                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2921         }
2922         /* zero alloc all unused TCs */
2923         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2924                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2925                 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2926                 /* clear 10 bits. */
2927                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2928         }
2929
2930         /* MRQC: enable vmdq and dcb */
2931         mrqc = ((num_pools == ETH_16_POOLS) ? \
2932                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2933         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2934
2935         /* PFVTCTL: turn on virtualisation and set the default pool */
2936         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2937         if (cfg->enable_default_pool) {
2938                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2939         } else {
2940                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2941         }
2942
2943         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2944
2945         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2946         queue_mapping = 0;
2947         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2948                 /*
2949                  * mapping is done with 3 bits per priority,
2950                  * so shift by i*3 each time
2951                  */
2952                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2953
2954         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2955
2956         /* RTRPCS: DCB related */
2957         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2958
2959         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2960         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2961         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2962         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2963
2964         /* VFTA - enable all vlan filters */
2965         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2966                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2967         }
2968
2969         /* VFRE: pool enabling for receive - 16 or 32 */
2970         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2971                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2972
2973         /*
2974          * MPSAR - allow pools to read specific mac addresses
2975          * In this case, all pools should be able to read from mac addr 0
2976          */
2977         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2978         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2979
2980         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2981         for (i = 0; i < cfg->nb_pool_maps; i++) {
2982                 /* set vlan id in VF register and set the valid bit */
2983                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2984                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
2985                 /*
2986                  * Put the allowed pools in VFB reg. As we only have 16 or 32
2987                  * pools, we only need to use the first half of the register
2988                  * i.e. bits 0-31
2989                  */
2990                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2991         }
2992 }
2993
2994 /**
2995  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2996  * @hw: pointer to hardware structure
2997  * @dcb_config: pointer to ixgbe_dcb_config structure
2998  */
2999 static void
3000 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
3001                struct ixgbe_dcb_config *dcb_config)
3002 {
3003         uint32_t reg;
3004         uint32_t q;
3005
3006         PMD_INIT_FUNC_TRACE();
3007         if (hw->mac.type != ixgbe_mac_82598EB) {
3008                 /* Disable the Tx desc arbiter so that MTQC can be changed */
3009                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3010                 reg |= IXGBE_RTTDCS_ARBDIS;
3011                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3012
3013                 /* Enable DCB for Tx with 8 TCs */
3014                 if (dcb_config->num_tcs.pg_tcs == 8) {
3015                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3016                 }
3017                 else {
3018                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3019                 }
3020                 if (dcb_config->vt_mode)
3021                     reg |= IXGBE_MTQC_VT_ENA;
3022                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3023
3024                 /* Disable drop for all queues */
3025                 for (q = 0; q < 128; q++)
3026                         IXGBE_WRITE_REG(hw, IXGBE_QDE,
3027                      (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3028
3029                 /* Enable the Tx desc arbiter */
3030                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3031                 reg &= ~IXGBE_RTTDCS_ARBDIS;
3032                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3033
3034                 /* Enable Security TX Buffer IFG for DCB */
3035                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3036                 reg |= IXGBE_SECTX_DCB;
3037                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3038         }
3039         return;
3040 }
3041
3042 /**
3043  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3044  * @dev: pointer to rte_eth_dev structure
3045  * @dcb_config: pointer to ixgbe_dcb_config structure
3046  */
3047 static void
3048 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3049                         struct ixgbe_dcb_config *dcb_config)
3050 {
3051         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3052                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3053         struct ixgbe_hw *hw =
3054                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3055
3056         PMD_INIT_FUNC_TRACE();
3057         if (hw->mac.type != ixgbe_mac_82598EB)
3058                 /*PF VF Transmit Enable*/
3059                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3060                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3061
3062         /*Configure general DCB TX parameters*/
3063         ixgbe_dcb_tx_hw_config(hw,dcb_config);
3064         return;
3065 }
3066
3067 static void
3068 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3069                         struct ixgbe_dcb_config *dcb_config)
3070 {
3071         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3072                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3073         struct ixgbe_dcb_tc_config *tc;
3074         uint8_t i,j;
3075
3076         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3077         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3078                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3079                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3080         }
3081         else {
3082                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3083                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3084         }
3085         /* User Priority to Traffic Class mapping */
3086         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3087                 j = vmdq_rx_conf->dcb_tc[i];
3088                 tc = &dcb_config->tc_config[j];
3089                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3090                                                 (uint8_t)(1 << j);
3091         }
3092 }
3093
3094 static void
3095 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3096                         struct ixgbe_dcb_config *dcb_config)
3097 {
3098         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3099                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3100         struct ixgbe_dcb_tc_config *tc;
3101         uint8_t i,j;
3102
3103         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3104         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3105                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3106                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3107         }
3108         else {
3109                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3110                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3111         }
3112
3113         /* User Priority to Traffic Class mapping */
3114         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3115                 j = vmdq_tx_conf->dcb_tc[i];
3116                 tc = &dcb_config->tc_config[j];
3117                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3118                                                 (uint8_t)(1 << j);
3119         }
3120         return;
3121 }
3122
3123 static void
3124 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3125                 struct ixgbe_dcb_config *dcb_config)
3126 {
3127         struct rte_eth_dcb_rx_conf *rx_conf =
3128                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3129         struct ixgbe_dcb_tc_config *tc;
3130         uint8_t i,j;
3131
3132         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3133         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3134
3135         /* User Priority to Traffic Class mapping */
3136         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3137                 j = rx_conf->dcb_tc[i];
3138                 tc = &dcb_config->tc_config[j];
3139                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3140                                                 (uint8_t)(1 << j);
3141         }
3142 }
3143
3144 static void
3145 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3146                 struct ixgbe_dcb_config *dcb_config)
3147 {
3148         struct rte_eth_dcb_tx_conf *tx_conf =
3149                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3150         struct ixgbe_dcb_tc_config *tc;
3151         uint8_t i,j;
3152
3153         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3154         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3155
3156         /* User Priority to Traffic Class mapping */
3157         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3158                 j = tx_conf->dcb_tc[i];
3159                 tc = &dcb_config->tc_config[j];
3160                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3161                                                 (uint8_t)(1 << j);
3162         }
3163 }
3164
3165 /**
3166  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3167  * @hw: pointer to hardware structure
3168  * @dcb_config: pointer to ixgbe_dcb_config structure
3169  */
3170 static void
3171 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3172                struct ixgbe_dcb_config *dcb_config)
3173 {
3174         uint32_t reg;
3175         uint32_t vlanctrl;
3176         uint8_t i;
3177
3178         PMD_INIT_FUNC_TRACE();
3179         /*
3180          * Disable the arbiter before changing parameters
3181          * (always enable recycle mode; WSP)
3182          */
3183         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3184         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3185
3186         if (hw->mac.type != ixgbe_mac_82598EB) {
3187                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3188                 if (dcb_config->num_tcs.pg_tcs == 4) {
3189                         if (dcb_config->vt_mode)
3190                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3191                                         IXGBE_MRQC_VMDQRT4TCEN;
3192                         else {
3193                                 /* no matter the mode is DCB or DCB_RSS, just
3194                                  * set the MRQE to RSSXTCEN. RSS is controlled
3195                                  * by RSS_FIELD
3196                                  */
3197                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3198                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3199                                         IXGBE_MRQC_RTRSS4TCEN;
3200                         }
3201                 }
3202                 if (dcb_config->num_tcs.pg_tcs == 8) {
3203                         if (dcb_config->vt_mode)
3204                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3205                                         IXGBE_MRQC_VMDQRT8TCEN;
3206                         else {
3207                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3208                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3209                                         IXGBE_MRQC_RTRSS8TCEN;
3210                         }
3211                 }
3212
3213                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3214         }
3215
3216         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3217         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3218         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3219         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3220
3221         /* VFTA - enable all vlan filters */
3222         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3223                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3224         }
3225
3226         /*
3227          * Configure Rx packet plane (recycle mode; WSP) and
3228          * enable arbiter
3229          */
3230         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3231         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3232
3233         return;
3234 }
3235
3236 static void
3237 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3238                         uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3239 {
3240         switch (hw->mac.type) {
3241         case ixgbe_mac_82598EB:
3242                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3243                 break;
3244         case ixgbe_mac_82599EB:
3245         case ixgbe_mac_X540:
3246         case ixgbe_mac_X550:
3247         case ixgbe_mac_X550EM_x:
3248         case ixgbe_mac_X550EM_a:
3249                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3250                                                   tsa, map);
3251                 break;
3252         default:
3253                 break;
3254         }
3255 }
3256
3257 static void
3258 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3259                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3260 {
3261         switch (hw->mac.type) {
3262         case ixgbe_mac_82598EB:
3263                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3264                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3265                 break;
3266         case ixgbe_mac_82599EB:
3267         case ixgbe_mac_X540:
3268         case ixgbe_mac_X550:
3269         case ixgbe_mac_X550EM_x:
3270         case ixgbe_mac_X550EM_a:
3271                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3272                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3273                 break;
3274         default:
3275                 break;
3276         }
3277 }
3278
3279 #define DCB_RX_CONFIG  1
3280 #define DCB_TX_CONFIG  1
3281 #define DCB_TX_PB      1024
3282 /**
3283  * ixgbe_dcb_hw_configure - Enable DCB and configure
3284  * general DCB in VT mode and non-VT mode parameters
3285  * @dev: pointer to rte_eth_dev structure
3286  * @dcb_config: pointer to ixgbe_dcb_config structure
3287  */
3288 static int
3289 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3290                         struct ixgbe_dcb_config *dcb_config)
3291 {
3292         int     ret = 0;
3293         uint8_t i,pfc_en,nb_tcs;
3294         uint16_t pbsize, rx_buffer_size;
3295         uint8_t config_dcb_rx = 0;
3296         uint8_t config_dcb_tx = 0;
3297         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3298         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3299         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3300         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3301         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3302         struct ixgbe_dcb_tc_config *tc;
3303         uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3304         struct ixgbe_hw *hw =
3305                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3306
3307         switch(dev->data->dev_conf.rxmode.mq_mode){
3308         case ETH_MQ_RX_VMDQ_DCB:
3309                 dcb_config->vt_mode = true;
3310                 if (hw->mac.type != ixgbe_mac_82598EB) {
3311                         config_dcb_rx = DCB_RX_CONFIG;
3312                         /*
3313                          *get dcb and VT rx configuration parameters
3314                          *from rte_eth_conf
3315                          */
3316                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3317                         /*Configure general VMDQ and DCB RX parameters*/
3318                         ixgbe_vmdq_dcb_configure(dev);
3319                 }
3320                 break;
3321         case ETH_MQ_RX_DCB:
3322         case ETH_MQ_RX_DCB_RSS:
3323                 dcb_config->vt_mode = false;
3324                 config_dcb_rx = DCB_RX_CONFIG;
3325                 /* Get dcb TX configuration parameters from rte_eth_conf */
3326                 ixgbe_dcb_rx_config(dev, dcb_config);
3327                 /*Configure general DCB RX parameters*/
3328                 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3329                 break;
3330         default:
3331                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3332                 break;
3333         }
3334         switch (dev->data->dev_conf.txmode.mq_mode) {
3335         case ETH_MQ_TX_VMDQ_DCB:
3336                 dcb_config->vt_mode = true;
3337                 config_dcb_tx = DCB_TX_CONFIG;
3338                 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3339                 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3340                 /*Configure general VMDQ and DCB TX parameters*/
3341                 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3342                 break;
3343
3344         case ETH_MQ_TX_DCB:
3345                 dcb_config->vt_mode = false;
3346                 config_dcb_tx = DCB_TX_CONFIG;
3347                 /*get DCB TX configuration parameters from rte_eth_conf*/
3348                 ixgbe_dcb_tx_config(dev, dcb_config);
3349                 /*Configure general DCB TX parameters*/
3350                 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3351                 break;
3352         default:
3353                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3354                 break;
3355         }
3356
3357         nb_tcs = dcb_config->num_tcs.pfc_tcs;
3358         /* Unpack map */
3359         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3360         if (nb_tcs == ETH_4_TCS) {
3361                 /* Avoid un-configured priority mapping to TC0 */
3362                 uint8_t j = 4;
3363                 uint8_t mask = 0xFF;
3364                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3365                         mask = (uint8_t)(mask & (~ (1 << map[i])));
3366                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3367                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3368                                 map[j++] = i;
3369                         mask >>= 1;
3370                 }
3371                 /* Re-configure 4 TCs BW */
3372                 for (i = 0; i < nb_tcs; i++) {
3373                         tc = &dcb_config->tc_config[i];
3374                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3375                                                 (uint8_t)(100 / nb_tcs);
3376                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3377                                                 (uint8_t)(100 / nb_tcs);
3378                 }
3379                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3380                         tc = &dcb_config->tc_config[i];
3381                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3382                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3383                 }
3384         }
3385
3386         switch (hw->mac.type) {
3387         case ixgbe_mac_X550:
3388         case ixgbe_mac_X550EM_x:
3389         case ixgbe_mac_X550EM_a:
3390                 rx_buffer_size = X550_RX_BUFFER_SIZE;
3391                 break;
3392         default:
3393                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3394                 break;
3395         }
3396
3397         if (config_dcb_rx) {
3398                 /* Set RX buffer size */
3399                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3400                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3401                 for (i = 0; i < nb_tcs; i++) {
3402                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3403                 }
3404                 /* zero alloc all unused TCs */
3405                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3406                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3407                 }
3408         }
3409         if (config_dcb_tx) {
3410                 /* Only support an equally distributed Tx packet buffer strategy. */
3411                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3412                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3413                 for (i = 0; i < nb_tcs; i++) {
3414                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3415                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3416                 }
3417                 /* Clear unused TCs, if any, to zero buffer size*/
3418                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3419                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3420                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3421                 }
3422         }
3423
3424         /*Calculates traffic class credits*/
3425         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3426                                 IXGBE_DCB_TX_CONFIG);
3427         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3428                                 IXGBE_DCB_RX_CONFIG);
3429
3430         if (config_dcb_rx) {
3431                 /* Unpack CEE standard containers */
3432                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3433                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3434                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3435                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3436                 /* Configure PG(ETS) RX */
3437                 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3438         }
3439
3440         if (config_dcb_tx) {
3441                 /* Unpack CEE standard containers */
3442                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3443                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3444                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3445                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3446                 /* Configure PG(ETS) TX */
3447                 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3448         }
3449
3450         /*Configure queue statistics registers*/
3451         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3452
3453         /* Check if the PFC is supported */
3454         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3455                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3456                 for (i = 0; i < nb_tcs; i++) {
3457                         /*
3458                         * If the TC count is 8,and the default high_water is 48,
3459                         * the low_water is 16 as default.
3460                         */
3461                         hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3462                         hw->fc.low_water[i] = pbsize / 4;
3463                         /* Enable pfc for this TC */
3464                         tc = &dcb_config->tc_config[i];
3465                         tc->pfc = ixgbe_dcb_pfc_enabled;
3466                 }
3467                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3468                 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3469                         pfc_en &= 0x0F;
3470                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3471         }
3472
3473         return ret;
3474 }
3475
3476 /**
3477  * ixgbe_configure_dcb - Configure DCB  Hardware
3478  * @dev: pointer to rte_eth_dev
3479  */
3480 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3481 {
3482         struct ixgbe_dcb_config *dcb_cfg =
3483                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3484         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3485
3486         PMD_INIT_FUNC_TRACE();
3487
3488         /* check support mq_mode for DCB */
3489         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3490             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3491             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3492                 return;
3493
3494         if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3495                 return;
3496
3497         /** Configure DCB hardware **/
3498         ixgbe_dcb_hw_configure(dev, dcb_cfg);
3499
3500         return;
3501 }
3502
3503 /*
3504  * VMDq only support for 10 GbE NIC.
3505  */
3506 static void
3507 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3508 {
3509         struct rte_eth_vmdq_rx_conf *cfg;
3510         struct ixgbe_hw *hw;
3511         enum rte_eth_nb_pools num_pools;
3512         uint32_t mrqc, vt_ctl, vlanctrl;
3513         uint32_t vmolr = 0;
3514         int i;
3515
3516         PMD_INIT_FUNC_TRACE();
3517         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3518         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3519         num_pools = cfg->nb_queue_pools;
3520
3521         ixgbe_rss_disable(dev);
3522
3523         /* MRQC: enable vmdq */
3524         mrqc = IXGBE_MRQC_VMDQEN;
3525         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3526
3527         /* PFVTCTL: turn on virtualisation and set the default pool */
3528         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3529         if (cfg->enable_default_pool)
3530                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3531         else
3532                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3533
3534         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3535
3536         for (i = 0; i < (int)num_pools; i++) {
3537                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3538                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3539         }
3540
3541         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3542         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3543         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3544         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3545
3546         /* VFTA - enable all vlan filters */
3547         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3548                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3549
3550         /* VFRE: pool enabling for receive - 64 */
3551         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3552         if (num_pools == ETH_64_POOLS)
3553                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3554
3555         /*
3556          * MPSAR - allow pools to read specific mac addresses
3557          * In this case, all pools should be able to read from mac addr 0
3558          */
3559         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3560         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3561
3562         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3563         for (i = 0; i < cfg->nb_pool_maps; i++) {
3564                 /* set vlan id in VF register and set the valid bit */
3565                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3566                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3567                 /*
3568                  * Put the allowed pools in VFB reg. As we only have 16 or 64
3569                  * pools, we only need to use the first half of the register
3570                  * i.e. bits 0-31
3571                  */
3572                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3573                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3574                                         (cfg->pool_map[i].pools & UINT32_MAX));
3575                 else
3576                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3577                                         ((cfg->pool_map[i].pools >> 32) \
3578                                         & UINT32_MAX));
3579
3580         }
3581
3582         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3583         if (cfg->enable_loop_back) {
3584                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3585                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3586                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3587         }
3588
3589         IXGBE_WRITE_FLUSH(hw);
3590 }
3591
3592 /*
3593  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3594  * @hw: pointer to hardware structure
3595  */
3596 static void
3597 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3598 {
3599         uint32_t reg;
3600         uint32_t q;
3601
3602         PMD_INIT_FUNC_TRACE();
3603         /*PF VF Transmit Enable*/
3604         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3605         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3606
3607         /* Disable the Tx desc arbiter so that MTQC can be changed */
3608         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3609         reg |= IXGBE_RTTDCS_ARBDIS;
3610         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3611
3612         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3613         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3614
3615         /* Disable drop for all queues */
3616         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3617                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3618                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3619
3620         /* Enable the Tx desc arbiter */
3621         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3622         reg &= ~IXGBE_RTTDCS_ARBDIS;
3623         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3624
3625         IXGBE_WRITE_FLUSH(hw);
3626
3627         return;
3628 }
3629
3630 static int __attribute__((cold))
3631 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3632 {
3633         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3634         uint64_t dma_addr;
3635         unsigned i;
3636
3637         /* Initialize software ring entries */
3638         for (i = 0; i < rxq->nb_rx_desc; i++) {
3639                 volatile union ixgbe_adv_rx_desc *rxd;
3640                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3641                 if (mbuf == NULL) {
3642                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3643                                      (unsigned) rxq->queue_id);
3644                         return -ENOMEM;
3645                 }
3646
3647                 rte_mbuf_refcnt_set(mbuf, 1);
3648                 mbuf->next = NULL;
3649                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3650                 mbuf->nb_segs = 1;
3651                 mbuf->port = rxq->port_id;
3652
3653                 dma_addr =
3654                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
3655                 rxd = &rxq->rx_ring[i];
3656                 rxd->read.hdr_addr = 0;
3657                 rxd->read.pkt_addr = dma_addr;
3658                 rxe[i].mbuf = mbuf;
3659         }
3660
3661         return 0;
3662 }
3663
3664 static int
3665 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3666 {
3667         struct ixgbe_hw *hw;
3668         uint32_t mrqc;
3669
3670         ixgbe_rss_configure(dev);
3671
3672         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3673
3674         /* MRQC: enable VF RSS */
3675         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3676         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3677         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3678         case ETH_64_POOLS:
3679                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3680                 break;
3681
3682         case ETH_32_POOLS:
3683                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3684                 break;
3685
3686         default:
3687                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3688                 return -EINVAL;
3689         }
3690
3691         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3692
3693         return 0;
3694 }
3695
3696 static int
3697 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3698 {
3699         struct ixgbe_hw *hw =
3700                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3701
3702         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3703         case ETH_64_POOLS:
3704                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3705                         IXGBE_MRQC_VMDQEN);
3706                 break;
3707
3708         case ETH_32_POOLS:
3709                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3710                         IXGBE_MRQC_VMDQRT4TCEN);
3711                 break;
3712
3713         case ETH_16_POOLS:
3714                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3715                         IXGBE_MRQC_VMDQRT8TCEN);
3716                 break;
3717         default:
3718                 PMD_INIT_LOG(ERR,
3719                         "invalid pool number in IOV mode");
3720                 break;
3721         }
3722         return 0;
3723 }
3724
3725 static int
3726 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3727 {
3728         struct ixgbe_hw *hw =
3729                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3730
3731         if (hw->mac.type == ixgbe_mac_82598EB)
3732                 return 0;
3733
3734         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3735                 /*
3736                  * SRIOV inactive scheme
3737                  * any DCB/RSS w/o VMDq multi-queue setting
3738                  */
3739                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3740                 case ETH_MQ_RX_RSS:
3741                 case ETH_MQ_RX_DCB_RSS:
3742                 case ETH_MQ_RX_VMDQ_RSS:
3743                         ixgbe_rss_configure(dev);
3744                         break;
3745
3746                 case ETH_MQ_RX_VMDQ_DCB:
3747                         ixgbe_vmdq_dcb_configure(dev);
3748                         break;
3749
3750                 case ETH_MQ_RX_VMDQ_ONLY:
3751                         ixgbe_vmdq_rx_hw_configure(dev);
3752                         break;
3753
3754                 case ETH_MQ_RX_NONE:
3755                 default:
3756                         /* if mq_mode is none, disable rss mode.*/
3757                         ixgbe_rss_disable(dev);
3758                         break;
3759                 }
3760         } else {
3761                 /*
3762                  * SRIOV active scheme
3763                  * Support RSS together with VMDq & SRIOV
3764                  */
3765                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3766                 case ETH_MQ_RX_RSS:
3767                 case ETH_MQ_RX_VMDQ_RSS:
3768                         ixgbe_config_vf_rss(dev);
3769                         break;
3770
3771                 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3772                 case ETH_MQ_RX_VMDQ_DCB:
3773                 case ETH_MQ_RX_VMDQ_DCB_RSS:
3774                         PMD_INIT_LOG(ERR,
3775                                 "Could not support DCB with VMDq & SRIOV");
3776                         return -1;
3777                 default:
3778                         ixgbe_config_vf_default(dev);
3779                         break;
3780                 }
3781         }
3782
3783         return 0;
3784 }
3785
3786 static int
3787 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3788 {
3789         struct ixgbe_hw *hw =
3790                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3791         uint32_t mtqc;
3792         uint32_t rttdcs;
3793
3794         if (hw->mac.type == ixgbe_mac_82598EB)
3795                 return 0;
3796
3797         /* disable arbiter before setting MTQC */
3798         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3799         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3800         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3801
3802         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3803                 /*
3804                  * SRIOV inactive scheme
3805                  * any DCB w/o VMDq multi-queue setting
3806                  */
3807                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3808                         ixgbe_vmdq_tx_hw_configure(hw);
3809                 else {
3810                         mtqc = IXGBE_MTQC_64Q_1PB;
3811                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3812                 }
3813         } else {
3814                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3815
3816                 /*
3817                  * SRIOV active scheme
3818                  * FIXME if support DCB together with VMDq & SRIOV
3819                  */
3820                 case ETH_64_POOLS:
3821                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3822                         break;
3823                 case ETH_32_POOLS:
3824                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3825                         break;
3826                 case ETH_16_POOLS:
3827                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3828                                 IXGBE_MTQC_8TC_8TQ;
3829                         break;
3830                 default:
3831                         mtqc = IXGBE_MTQC_64Q_1PB;
3832                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3833                 }
3834                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3835         }
3836
3837         /* re-enable arbiter */
3838         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3839         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3840
3841         return 0;
3842 }
3843
3844 /**
3845  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3846  *
3847  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3848  * spec rev. 3.0 chapter 8.2.3.8.13.
3849  *
3850  * @pool Memory pool of the Rx queue
3851  */
3852 static inline uint32_t
3853 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3854 {
3855         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3856
3857         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3858         uint16_t maxdesc =
3859                 IPV4_MAX_PKT_LEN /
3860                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3861
3862         if (maxdesc >= 16)
3863                 return IXGBE_RSCCTL_MAXDESC_16;
3864         else if (maxdesc >= 8)
3865                 return IXGBE_RSCCTL_MAXDESC_8;
3866         else if (maxdesc >= 4)
3867                 return IXGBE_RSCCTL_MAXDESC_4;
3868         else
3869                 return IXGBE_RSCCTL_MAXDESC_1;
3870 }
3871
3872 /**
3873  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3874  * interrupt
3875  *
3876  * (Taken from FreeBSD tree)
3877  * (yes this is all very magic and confusing :)
3878  *
3879  * @dev port handle
3880  * @entry the register array entry
3881  * @vector the MSIX vector for this queue
3882  * @type RX/TX/MISC
3883  */
3884 static void
3885 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3886 {
3887         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3888         u32 ivar, index;
3889
3890         vector |= IXGBE_IVAR_ALLOC_VAL;
3891
3892         switch (hw->mac.type) {
3893
3894         case ixgbe_mac_82598EB:
3895                 if (type == -1)
3896                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3897                 else
3898                         entry += (type * 64);
3899                 index = (entry >> 2) & 0x1F;
3900                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3901                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3902                 ivar |= (vector << (8 * (entry & 0x3)));
3903                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3904                 break;
3905
3906         case ixgbe_mac_82599EB:
3907         case ixgbe_mac_X540:
3908                 if (type == -1) { /* MISC IVAR */
3909                         index = (entry & 1) * 8;
3910                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3911                         ivar &= ~(0xFF << index);
3912                         ivar |= (vector << index);
3913                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3914                 } else {        /* RX/TX IVARS */
3915                         index = (16 * (entry & 1)) + (8 * type);
3916                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3917                         ivar &= ~(0xFF << index);
3918                         ivar |= (vector << index);
3919                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3920                 }
3921
3922                 break;
3923
3924         default:
3925                 break;
3926         }
3927 }
3928
3929 void __attribute__((cold))
3930 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3931 {
3932         uint16_t i, rx_using_sse;
3933         struct ixgbe_adapter *adapter =
3934                 (struct ixgbe_adapter *)dev->data->dev_private;
3935
3936         /*
3937          * In order to allow Vector Rx there are a few configuration
3938          * conditions to be met and Rx Bulk Allocation should be allowed.
3939          */
3940         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3941             !adapter->rx_bulk_alloc_allowed) {
3942                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3943                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
3944                                     "not enabled",
3945                              dev->data->port_id);
3946
3947                 adapter->rx_vec_allowed = false;
3948         }
3949
3950         /*
3951          * Initialize the appropriate LRO callback.
3952          *
3953          * If all queues satisfy the bulk allocation preconditions
3954          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3955          * Otherwise use a single allocation version.
3956          */
3957         if (dev->data->lro) {
3958                 if (adapter->rx_bulk_alloc_allowed) {
3959                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3960                                            "allocation version");
3961                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3962                 } else {
3963                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3964                                            "allocation version");
3965                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3966                 }
3967         } else if (dev->data->scattered_rx) {
3968                 /*
3969                  * Set the non-LRO scattered callback: there are Vector and
3970                  * single allocation versions.
3971                  */
3972                 if (adapter->rx_vec_allowed) {
3973                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3974                                             "callback (port=%d).",
3975                                      dev->data->port_id);
3976
3977                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3978                 } else if (adapter->rx_bulk_alloc_allowed) {
3979                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3980                                            "allocation callback (port=%d).",
3981                                      dev->data->port_id);
3982                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3983                 } else {
3984                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3985                                             "single allocation) "
3986                                             "Scattered Rx callback "
3987                                             "(port=%d).",
3988                                      dev->data->port_id);
3989
3990                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3991                 }
3992         /*
3993          * Below we set "simple" callbacks according to port/queues parameters.
3994          * If parameters allow we are going to choose between the following
3995          * callbacks:
3996          *    - Vector
3997          *    - Bulk Allocation
3998          *    - Single buffer allocation (the simplest one)
3999          */
4000         } else if (adapter->rx_vec_allowed) {
4001                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4002                                     "burst size no less than %d (port=%d).",
4003                              RTE_IXGBE_DESCS_PER_LOOP,
4004                              dev->data->port_id);
4005
4006                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4007         } else if (adapter->rx_bulk_alloc_allowed) {
4008                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4009                                     "satisfied. Rx Burst Bulk Alloc function "
4010                                     "will be used on port=%d.",
4011                              dev->data->port_id);
4012
4013                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4014         } else {
4015                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4016                                     "satisfied, or Scattered Rx is requested "
4017                                     "(port=%d).",
4018                              dev->data->port_id);
4019
4020                 dev->rx_pkt_burst = ixgbe_recv_pkts;
4021         }
4022
4023         /* Propagate information about RX function choice through all queues. */
4024
4025         rx_using_sse =
4026                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4027                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4028
4029         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4030                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4031                 rxq->rx_using_sse = rx_using_sse;
4032         }
4033 }
4034
4035 /**
4036  * ixgbe_set_rsc - configure RSC related port HW registers
4037  *
4038  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4039  * of 82599 Spec (x540 configuration is virtually the same).
4040  *
4041  * @dev port handle
4042  *
4043  * Returns 0 in case of success or a non-zero error code
4044  */
4045 static int
4046 ixgbe_set_rsc(struct rte_eth_dev *dev)
4047 {
4048         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4049         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4050         struct rte_eth_dev_info dev_info = { 0 };
4051         bool rsc_capable = false;
4052         uint16_t i;
4053         uint32_t rdrxctl;
4054
4055         /* Sanity check */
4056         dev->dev_ops->dev_infos_get(dev, &dev_info);
4057         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4058                 rsc_capable = true;
4059
4060         if (!rsc_capable && rx_conf->enable_lro) {
4061                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4062                                    "support it");
4063                 return -EINVAL;
4064         }
4065
4066         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4067
4068         if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4069                 /*
4070                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4071                  * 3.0 RSC configuration requires HW CRC stripping being
4072                  * enabled. If user requested both HW CRC stripping off
4073                  * and RSC on - return an error.
4074                  */
4075                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4076                                     "is disabled");
4077                 return -EINVAL;
4078         }
4079
4080         /* RFCTL configuration  */
4081         if (rsc_capable) {
4082                 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4083                 if (rx_conf->enable_lro)
4084                         /*
4085                          * Since NFS packets coalescing is not supported - clear
4086                          * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4087                          * enabled.
4088                          */
4089                         rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4090                                    IXGBE_RFCTL_NFSR_DIS);
4091                 else
4092                         rfctl |= IXGBE_RFCTL_RSC_DIS;
4093
4094                 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4095         }
4096
4097         /* If LRO hasn't been requested - we are done here. */
4098         if (!rx_conf->enable_lro)
4099                 return 0;
4100
4101         /* Set RDRXCTL.RSCACKC bit */
4102         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4103         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4104         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4105
4106         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4107         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4108                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4109                 uint32_t srrctl =
4110                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4111                 uint32_t rscctl =
4112                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4113                 uint32_t psrtype =
4114                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4115                 uint32_t eitr =
4116                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4117
4118                 /*
4119                  * ixgbe PMD doesn't support header-split at the moment.
4120                  *
4121                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4122                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4123                  * should be configured even if header split is not
4124                  * enabled. We will configure it 128 bytes following the
4125                  * recommendation in the spec.
4126                  */
4127                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4128                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4129                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4130
4131                 /*
4132                  * TODO: Consider setting the Receive Descriptor Minimum
4133                  * Threshold Size for an RSC case. This is not an obviously
4134                  * beneficiary option but the one worth considering...
4135                  */
4136
4137                 rscctl |= IXGBE_RSCCTL_RSCEN;
4138                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4139                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4140
4141                 /*
4142                  * RSC: Set ITR interval corresponding to 2K ints/s.
4143                  *
4144                  * Full-sized RSC aggregations for a 10Gb/s link will
4145                  * arrive at about 20K aggregation/s rate.
4146                  *
4147                  * 2K inst/s rate will make only 10% of the
4148                  * aggregations to be closed due to the interrupt timer
4149                  * expiration for a streaming at wire-speed case.
4150                  *
4151                  * For a sparse streaming case this setting will yield
4152                  * at most 500us latency for a single RSC aggregation.
4153                  */
4154                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4155                 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4156
4157                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4158                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4159                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4160                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4161
4162                 /*
4163                  * RSC requires the mapping of the queue to the
4164                  * interrupt vector.
4165                  */
4166                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4167         }
4168
4169         dev->data->lro = 1;
4170
4171         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4172
4173         return 0;
4174 }
4175
4176 /*
4177  * Initializes Receive Unit.
4178  */
4179 int __attribute__((cold))
4180 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4181 {
4182         struct ixgbe_hw     *hw;
4183         struct ixgbe_rx_queue *rxq;
4184         uint64_t bus_addr;
4185         uint32_t rxctrl;
4186         uint32_t fctrl;
4187         uint32_t hlreg0;
4188         uint32_t maxfrs;
4189         uint32_t srrctl;
4190         uint32_t rdrxctl;
4191         uint32_t rxcsum;
4192         uint16_t buf_size;
4193         uint16_t i;
4194         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4195         int rc;
4196
4197         PMD_INIT_FUNC_TRACE();
4198         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4199
4200         /*
4201          * Make sure receives are disabled while setting
4202          * up the RX context (registers, descriptor rings, etc.).
4203          */
4204         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4205         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4206
4207         /* Enable receipt of broadcasted frames */
4208         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4209         fctrl |= IXGBE_FCTRL_BAM;
4210         fctrl |= IXGBE_FCTRL_DPF;
4211         fctrl |= IXGBE_FCTRL_PMCF;
4212         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4213
4214         /*
4215          * Configure CRC stripping, if any.
4216          */
4217         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4218         if (rx_conf->hw_strip_crc)
4219                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4220         else
4221                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4222
4223         /*
4224          * Configure jumbo frame support, if any.
4225          */
4226         if (rx_conf->jumbo_frame == 1) {
4227                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4228                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4229                 maxfrs &= 0x0000FFFF;
4230                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4231                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4232         } else
4233                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4234
4235         /*
4236          * If loopback mode is configured for 82599, set LPBK bit.
4237          */
4238         if (hw->mac.type == ixgbe_mac_82599EB &&
4239                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4240                 hlreg0 |= IXGBE_HLREG0_LPBK;
4241         else
4242                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4243
4244         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4245
4246         /* Setup RX queues */
4247         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4248                 rxq = dev->data->rx_queues[i];
4249
4250                 /*
4251                  * Reset crc_len in case it was changed after queue setup by a
4252                  * call to configure.
4253                  */
4254                 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4255
4256                 /* Setup the Base and Length of the Rx Descriptor Rings */
4257                 bus_addr = rxq->rx_ring_phys_addr;
4258                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4259                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4260                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4261                                 (uint32_t)(bus_addr >> 32));
4262                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4263                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4264                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4265                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4266
4267                 /* Configure the SRRCTL register */
4268 #ifdef RTE_HEADER_SPLIT_ENABLE
4269                 /*
4270                  * Configure Header Split
4271                  */
4272                 if (rx_conf->header_split) {
4273                         if (hw->mac.type == ixgbe_mac_82599EB) {
4274                                 /* Must setup the PSRTYPE register */
4275                                 uint32_t psrtype;
4276                                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4277                                         IXGBE_PSRTYPE_UDPHDR   |
4278                                         IXGBE_PSRTYPE_IPV4HDR  |
4279                                         IXGBE_PSRTYPE_IPV6HDR;
4280                                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4281                         }
4282                         srrctl = ((rx_conf->split_hdr_size <<
4283                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4284                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4285                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4286                 } else
4287 #endif
4288                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4289
4290                 /* Set if packets are dropped when no descriptors available */
4291                 if (rxq->drop_en)
4292                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4293
4294                 /*
4295                  * Configure the RX buffer size in the BSIZEPACKET field of
4296                  * the SRRCTL register of the queue.
4297                  * The value is in 1 KB resolution. Valid values can be from
4298                  * 1 KB to 16 KB.
4299                  */
4300                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4301                         RTE_PKTMBUF_HEADROOM);
4302                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4303                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4304
4305                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4306
4307                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4308                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4309
4310                 /* It adds dual VLAN length for supporting dual VLAN */
4311                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4312                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4313                         dev->data->scattered_rx = 1;
4314         }
4315
4316         if (rx_conf->enable_scatter)
4317                 dev->data->scattered_rx = 1;
4318
4319         /*
4320          * Device configured with multiple RX queues.
4321          */
4322         ixgbe_dev_mq_rx_configure(dev);
4323
4324         /*
4325          * Setup the Checksum Register.
4326          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4327          * Enable IP/L4 checkum computation by hardware if requested to do so.
4328          */
4329         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4330         rxcsum |= IXGBE_RXCSUM_PCSD;
4331         if (rx_conf->hw_ip_checksum)
4332                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4333         else
4334                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4335
4336         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4337
4338         if (hw->mac.type == ixgbe_mac_82599EB ||
4339             hw->mac.type == ixgbe_mac_X540) {
4340                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4341                 if (rx_conf->hw_strip_crc)
4342                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4343                 else
4344                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4345                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4346                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4347         }
4348
4349         rc = ixgbe_set_rsc(dev);
4350         if (rc)
4351                 return rc;
4352
4353         ixgbe_set_rx_function(dev);
4354
4355         return 0;
4356 }
4357
4358 /*
4359  * Initializes Transmit Unit.
4360  */
4361 void __attribute__((cold))
4362 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4363 {
4364         struct ixgbe_hw     *hw;
4365         struct ixgbe_tx_queue *txq;
4366         uint64_t bus_addr;
4367         uint32_t hlreg0;
4368         uint32_t txctrl;
4369         uint16_t i;
4370
4371         PMD_INIT_FUNC_TRACE();
4372         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4373
4374         /* Enable TX CRC (checksum offload requirement) and hw padding
4375          * (TSO requirement) */
4376         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4377         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4378         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4379
4380         /* Setup the Base and Length of the Tx Descriptor Rings */
4381         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4382                 txq = dev->data->tx_queues[i];
4383
4384                 bus_addr = txq->tx_ring_phys_addr;
4385                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4386                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4387                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4388                                 (uint32_t)(bus_addr >> 32));
4389                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4390                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4391                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4392                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4393                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4394
4395                 /*
4396                  * Disable Tx Head Writeback RO bit, since this hoses
4397                  * bookkeeping if things aren't delivered in order.
4398                  */
4399                 switch (hw->mac.type) {
4400                         case ixgbe_mac_82598EB:
4401                                 txctrl = IXGBE_READ_REG(hw,
4402                                                         IXGBE_DCA_TXCTRL(txq->reg_idx));
4403                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4404                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4405                                                 txctrl);
4406                                 break;
4407
4408                         case ixgbe_mac_82599EB:
4409                         case ixgbe_mac_X540:
4410                         case ixgbe_mac_X550:
4411                         case ixgbe_mac_X550EM_x:
4412                         case ixgbe_mac_X550EM_a:
4413                         default:
4414                                 txctrl = IXGBE_READ_REG(hw,
4415                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4416                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4417                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4418                                                 txctrl);
4419                                 break;
4420                 }
4421         }
4422
4423         /* Device configured with multiple TX queues. */
4424         ixgbe_dev_mq_tx_configure(dev);
4425 }
4426
4427 /*
4428  * Set up link for 82599 loopback mode Tx->Rx.
4429  */
4430 static inline void __attribute__((cold))
4431 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4432 {
4433         PMD_INIT_FUNC_TRACE();
4434
4435         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4436                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4437                                 IXGBE_SUCCESS) {
4438                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4439                         /* ignore error */
4440                         return;
4441                 }
4442         }
4443
4444         /* Restart link */
4445         IXGBE_WRITE_REG(hw,
4446                         IXGBE_AUTOC,
4447                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4448         ixgbe_reset_pipeline_82599(hw);
4449
4450         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4451         msec_delay(50);
4452 }
4453
4454
4455 /*
4456  * Start Transmit and Receive Units.
4457  */
4458 int __attribute__((cold))
4459 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4460 {
4461         struct ixgbe_hw     *hw;
4462         struct ixgbe_tx_queue *txq;
4463         struct ixgbe_rx_queue *rxq;
4464         uint32_t txdctl;
4465         uint32_t dmatxctl;
4466         uint32_t rxctrl;
4467         uint16_t i;
4468         int ret = 0;
4469
4470         PMD_INIT_FUNC_TRACE();
4471         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4472
4473         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4474                 txq = dev->data->tx_queues[i];
4475                 /* Setup Transmit Threshold Registers */
4476                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4477                 txdctl |= txq->pthresh & 0x7F;
4478                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4479                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4480                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4481         }
4482
4483         if (hw->mac.type != ixgbe_mac_82598EB) {
4484                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4485                 dmatxctl |= IXGBE_DMATXCTL_TE;
4486                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4487         }
4488
4489         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4490                 txq = dev->data->tx_queues[i];
4491                 if (!txq->tx_deferred_start) {
4492                         ret = ixgbe_dev_tx_queue_start(dev, i);
4493                         if (ret < 0)
4494                                 return ret;
4495                 }
4496         }
4497
4498         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4499                 rxq = dev->data->rx_queues[i];
4500                 if (!rxq->rx_deferred_start) {
4501                         ret = ixgbe_dev_rx_queue_start(dev, i);
4502                         if (ret < 0)
4503                                 return ret;
4504                 }
4505         }
4506
4507         /* Enable Receive engine */
4508         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4509         if (hw->mac.type == ixgbe_mac_82598EB)
4510                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4511         rxctrl |= IXGBE_RXCTRL_RXEN;
4512         hw->mac.ops.enable_rx_dma(hw, rxctrl);
4513
4514         /* If loopback mode is enabled for 82599, set up the link accordingly */
4515         if (hw->mac.type == ixgbe_mac_82599EB &&
4516                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4517                 ixgbe_setup_loopback_link_82599(hw);
4518
4519         return 0;
4520 }
4521
4522 /*
4523  * Start Receive Units for specified queue.
4524  */
4525 int __attribute__((cold))
4526 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4527 {
4528         struct ixgbe_hw     *hw;
4529         struct ixgbe_rx_queue *rxq;
4530         uint32_t rxdctl;
4531         int poll_ms;
4532
4533         PMD_INIT_FUNC_TRACE();
4534         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4535
4536         if (rx_queue_id < dev->data->nb_rx_queues) {
4537                 rxq = dev->data->rx_queues[rx_queue_id];
4538
4539                 /* Allocate buffers for descriptor rings */
4540                 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4541                         PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4542                                      rx_queue_id);
4543                         return -1;
4544                 }
4545                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4546                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4547                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4548
4549                 /* Wait until RX Enable ready */
4550                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4551                 do {
4552                         rte_delay_ms(1);
4553                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4554                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4555                 if (!poll_ms)
4556                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4557                                      rx_queue_id);
4558                 rte_wmb();
4559                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4560                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4561                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4562         } else
4563                 return -1;
4564
4565         return 0;
4566 }
4567
4568 /*
4569  * Stop Receive Units for specified queue.
4570  */
4571 int __attribute__((cold))
4572 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4573 {
4574         struct ixgbe_hw     *hw;
4575         struct ixgbe_adapter *adapter =
4576                 (struct ixgbe_adapter *)dev->data->dev_private;
4577         struct ixgbe_rx_queue *rxq;
4578         uint32_t rxdctl;
4579         int poll_ms;
4580
4581         PMD_INIT_FUNC_TRACE();
4582         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4583
4584         if (rx_queue_id < dev->data->nb_rx_queues) {
4585                 rxq = dev->data->rx_queues[rx_queue_id];
4586
4587                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4588                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4589                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4590
4591                 /* Wait until RX Enable ready */
4592                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4593                 do {
4594                         rte_delay_ms(1);
4595                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4596                 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4597                 if (!poll_ms)
4598                         PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4599                                      rx_queue_id);
4600
4601                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4602
4603                 ixgbe_rx_queue_release_mbufs(rxq);
4604                 ixgbe_reset_rx_queue(adapter, rxq);
4605                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4606         } else
4607                 return -1;
4608
4609         return 0;
4610 }
4611
4612
4613 /*
4614  * Start Transmit Units for specified queue.
4615  */
4616 int __attribute__((cold))
4617 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4618 {
4619         struct ixgbe_hw     *hw;
4620         struct ixgbe_tx_queue *txq;
4621         uint32_t txdctl;
4622         int poll_ms;
4623
4624         PMD_INIT_FUNC_TRACE();
4625         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4626
4627         if (tx_queue_id < dev->data->nb_tx_queues) {
4628                 txq = dev->data->tx_queues[tx_queue_id];
4629                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4630                 txdctl |= IXGBE_TXDCTL_ENABLE;
4631                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4632
4633                 /* Wait until TX Enable ready */
4634                 if (hw->mac.type == ixgbe_mac_82599EB) {
4635                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4636                         do {
4637                                 rte_delay_ms(1);
4638                                 txdctl = IXGBE_READ_REG(hw,
4639                                         IXGBE_TXDCTL(txq->reg_idx));
4640                         } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4641                         if (!poll_ms)
4642                                 PMD_INIT_LOG(ERR, "Could not enable "
4643                                              "Tx Queue %d", tx_queue_id);
4644                 }
4645                 rte_wmb();
4646                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4647                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4648                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4649         } else
4650                 return -1;
4651
4652         return 0;
4653 }
4654
4655 /*
4656  * Stop Transmit Units for specified queue.
4657  */
4658 int __attribute__((cold))
4659 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4660 {
4661         struct ixgbe_hw     *hw;
4662         struct ixgbe_tx_queue *txq;
4663         uint32_t txdctl;
4664         uint32_t txtdh, txtdt;
4665         int poll_ms;
4666
4667         PMD_INIT_FUNC_TRACE();
4668         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4669
4670         if (tx_queue_id < dev->data->nb_tx_queues) {
4671                 txq = dev->data->tx_queues[tx_queue_id];
4672
4673                 /* Wait until TX queue is empty */
4674                 if (hw->mac.type == ixgbe_mac_82599EB) {
4675                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4676                         do {
4677                                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4678                                 txtdh = IXGBE_READ_REG(hw,
4679                                                 IXGBE_TDH(txq->reg_idx));
4680                                 txtdt = IXGBE_READ_REG(hw,
4681                                                 IXGBE_TDT(txq->reg_idx));
4682                         } while (--poll_ms && (txtdh != txtdt));
4683                         if (!poll_ms)
4684                                 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4685                                              "when stopping.", tx_queue_id);
4686                 }
4687
4688                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4689                 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4690                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4691
4692                 /* Wait until TX Enable ready */
4693                 if (hw->mac.type == ixgbe_mac_82599EB) {
4694                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4695                         do {
4696                                 rte_delay_ms(1);
4697                                 txdctl = IXGBE_READ_REG(hw,
4698                                                 IXGBE_TXDCTL(txq->reg_idx));
4699                         } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4700                         if (!poll_ms)
4701                                 PMD_INIT_LOG(ERR, "Could not disable "
4702                                              "Tx Queue %d", tx_queue_id);
4703                 }
4704
4705                 if (txq->ops != NULL) {
4706                         txq->ops->release_mbufs(txq);
4707                         txq->ops->reset(txq);
4708                 }
4709                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4710         } else
4711                 return -1;
4712
4713         return 0;
4714 }
4715
4716 void
4717 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4718         struct rte_eth_rxq_info *qinfo)
4719 {
4720         struct ixgbe_rx_queue *rxq;
4721
4722         rxq = dev->data->rx_queues[queue_id];
4723
4724         qinfo->mp = rxq->mb_pool;
4725         qinfo->scattered_rx = dev->data->scattered_rx;
4726         qinfo->nb_desc = rxq->nb_rx_desc;
4727
4728         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4729         qinfo->conf.rx_drop_en = rxq->drop_en;
4730         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4731 }
4732
4733 void
4734 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4735         struct rte_eth_txq_info *qinfo)
4736 {
4737         struct ixgbe_tx_queue *txq;
4738
4739         txq = dev->data->tx_queues[queue_id];
4740
4741         qinfo->nb_desc = txq->nb_tx_desc;
4742
4743         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4744         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4745         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4746
4747         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4748         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4749         qinfo->conf.txq_flags = txq->txq_flags;
4750         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4751 }
4752
4753 /*
4754  * [VF] Initializes Receive Unit.
4755  */
4756 int __attribute__((cold))
4757 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4758 {
4759         struct ixgbe_hw     *hw;
4760         struct ixgbe_rx_queue *rxq;
4761         uint64_t bus_addr;
4762         uint32_t srrctl, psrtype = 0;
4763         uint16_t buf_size;
4764         uint16_t i;
4765         int ret;
4766
4767         PMD_INIT_FUNC_TRACE();
4768         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4769
4770         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4771                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4772                         "it should be power of 2");
4773                 return -1;
4774         }
4775
4776         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4777                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4778                         "it should be equal to or less than %d",
4779                         hw->mac.max_rx_queues);
4780                 return -1;
4781         }
4782
4783         /*
4784          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4785          * disables the VF receipt of packets if the PF MTU is > 1500.
4786          * This is done to deal with 82599 limitations that imposes
4787          * the PF and all VFs to share the same MTU.
4788          * Then, the PF driver enables again the VF receipt of packet when
4789          * the VF driver issues a IXGBE_VF_SET_LPE request.
4790          * In the meantime, the VF device cannot be used, even if the VF driver
4791          * and the Guest VM network stack are ready to accept packets with a
4792          * size up to the PF MTU.
4793          * As a work-around to this PF behaviour, force the call to
4794          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4795          * VF packets received can work in all cases.
4796          */
4797         ixgbevf_rlpml_set_vf(hw,
4798                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4799
4800         /* Setup RX queues */
4801         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4802                 rxq = dev->data->rx_queues[i];
4803
4804                 /* Allocate buffers for descriptor rings */
4805                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4806                 if (ret)
4807                         return ret;
4808
4809                 /* Setup the Base and Length of the Rx Descriptor Rings */
4810                 bus_addr = rxq->rx_ring_phys_addr;
4811
4812                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4813                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4814                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4815                                 (uint32_t)(bus_addr >> 32));
4816                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4817                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4818                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4819                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4820
4821
4822                 /* Configure the SRRCTL register */
4823 #ifdef RTE_HEADER_SPLIT_ENABLE
4824                 /*
4825                  * Configure Header Split
4826                  */
4827                 if (dev->data->dev_conf.rxmode.header_split) {
4828                         srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4829                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4830                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4831                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4832                 } else
4833 #endif
4834                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4835
4836                 /* Set if packets are dropped when no descriptors available */
4837                 if (rxq->drop_en)
4838                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4839
4840                 /*
4841                  * Configure the RX buffer size in the BSIZEPACKET field of
4842                  * the SRRCTL register of the queue.
4843                  * The value is in 1 KB resolution. Valid values can be from
4844                  * 1 KB to 16 KB.
4845                  */
4846                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4847                         RTE_PKTMBUF_HEADROOM);
4848                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4849                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4850
4851                 /*
4852                  * VF modification to write virtual function SRRCTL register
4853                  */
4854                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4855
4856                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4857                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4858
4859                 if (dev->data->dev_conf.rxmode.enable_scatter ||
4860                     /* It adds dual VLAN length for supporting dual VLAN */
4861                     (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4862                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4863                         if (!dev->data->scattered_rx)
4864                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4865                         dev->data->scattered_rx = 1;
4866                 }
4867         }
4868
4869 #ifdef RTE_HEADER_SPLIT_ENABLE
4870         if (dev->data->dev_conf.rxmode.header_split)
4871                 /* Must setup the PSRTYPE register */
4872                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4873                         IXGBE_PSRTYPE_UDPHDR   |
4874                         IXGBE_PSRTYPE_IPV4HDR  |
4875                         IXGBE_PSRTYPE_IPV6HDR;
4876 #endif
4877
4878         /* Set RQPL for VF RSS according to max Rx queue */
4879         psrtype |= (dev->data->nb_rx_queues >> 1) <<
4880                 IXGBE_PSRTYPE_RQPL_SHIFT;
4881         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4882
4883         ixgbe_set_rx_function(dev);
4884
4885         return 0;
4886 }
4887
4888 /*
4889  * [VF] Initializes Transmit Unit.
4890  */
4891 void __attribute__((cold))
4892 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4893 {
4894         struct ixgbe_hw     *hw;
4895         struct ixgbe_tx_queue *txq;
4896         uint64_t bus_addr;
4897         uint32_t txctrl;
4898         uint16_t i;
4899
4900         PMD_INIT_FUNC_TRACE();
4901         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4902
4903         /* Setup the Base and Length of the Tx Descriptor Rings */
4904         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4905                 txq = dev->data->tx_queues[i];
4906                 bus_addr = txq->tx_ring_phys_addr;
4907                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4908                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4909                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4910                                 (uint32_t)(bus_addr >> 32));
4911                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4912                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4913                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4914                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4915                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4916
4917                 /*
4918                  * Disable Tx Head Writeback RO bit, since this hoses
4919                  * bookkeeping if things aren't delivered in order.
4920                  */
4921                 txctrl = IXGBE_READ_REG(hw,
4922                                 IXGBE_VFDCA_TXCTRL(i));
4923                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4924                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4925                                 txctrl);
4926         }
4927 }
4928
4929 /*
4930  * [VF] Start Transmit and Receive Units.
4931  */
4932 void __attribute__((cold))
4933 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4934 {
4935         struct ixgbe_hw     *hw;
4936         struct ixgbe_tx_queue *txq;
4937         struct ixgbe_rx_queue *rxq;
4938         uint32_t txdctl;
4939         uint32_t rxdctl;
4940         uint16_t i;
4941         int poll_ms;
4942
4943         PMD_INIT_FUNC_TRACE();
4944         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4945
4946         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4947                 txq = dev->data->tx_queues[i];
4948                 /* Setup Transmit Threshold Registers */
4949                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4950                 txdctl |= txq->pthresh & 0x7F;
4951                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4952                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4953                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4954         }
4955
4956         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4957
4958                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4959                 txdctl |= IXGBE_TXDCTL_ENABLE;
4960                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4961
4962                 poll_ms = 10;
4963                 /* Wait until TX Enable ready */
4964                 do {
4965                         rte_delay_ms(1);
4966                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4967                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4968                 if (!poll_ms)
4969                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4970         }
4971         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4972
4973                 rxq = dev->data->rx_queues[i];
4974
4975                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4976                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4977                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4978
4979                 /* Wait until RX Enable ready */
4980                 poll_ms = 10;
4981                 do {
4982                         rte_delay_ms(1);
4983                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4984                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4985                 if (!poll_ms)
4986                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4987                 rte_wmb();
4988                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4989
4990         }
4991 }
4992
4993 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4994 int __attribute__((weak))
4995 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4996 {
4997         return -1;
4998 }
4999
5000 uint16_t __attribute__((weak))
5001 ixgbe_recv_pkts_vec(
5002         void __rte_unused *rx_queue,
5003         struct rte_mbuf __rte_unused **rx_pkts,
5004         uint16_t __rte_unused nb_pkts)
5005 {
5006         return 0;
5007 }
5008
5009 uint16_t __attribute__((weak))
5010 ixgbe_recv_scattered_pkts_vec(
5011         void __rte_unused *rx_queue,
5012         struct rte_mbuf __rte_unused **rx_pkts,
5013         uint16_t __rte_unused nb_pkts)
5014 {
5015         return 0;
5016 }
5017
5018 int __attribute__((weak))
5019 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5020 {
5021         return -1;
5022 }