55611950bc8328228274739267ae6d206e82e4c3
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   Copyright 2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <sys/queue.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <stdarg.h>
43 #include <unistd.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
49 #include <rte_log.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
52 #include <rte_pci.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
68 #include <rte_udp.h>
69 #include <rte_tcp.h>
70 #include <rte_sctp.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
73 #include <rte_ip.h>
74
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
82
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK (                  \
85                 PKT_TX_VLAN_PKT |                \
86                 PKT_TX_IP_CKSUM |                \
87                 PKT_TX_L4_MASK |                 \
88                 PKT_TX_TCP_SEG)
89
90 static inline struct rte_mbuf *
91 rte_rxmbuf_alloc(struct rte_mempool *mp)
92 {
93         struct rte_mbuf *m;
94
95         m = __rte_mbuf_raw_alloc(mp);
96         __rte_mbuf_sanity_check_raw(m, 0);
97         return (m);
98 }
99
100
101 #if 1
102 #define RTE_PMD_USE_PREFETCH
103 #endif
104
105 #ifdef RTE_PMD_USE_PREFETCH
106 /*
107  * Prefetch a cache line into all cache levels.
108  */
109 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
110 #else
111 #define rte_ixgbe_prefetch(p)   do {} while(0)
112 #endif
113
114 /*********************************************************************
115  *
116  *  TX functions
117  *
118  **********************************************************************/
119
120 /*
121  * Check for descriptors with their DD bit set and free mbufs.
122  * Return the total number of buffers freed.
123  */
124 static inline int __attribute__((always_inline))
125 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
126 {
127         struct ixgbe_tx_entry *txep;
128         uint32_t status;
129         int i;
130
131         /* check DD bit on threshold descriptor */
132         status = txq->tx_ring[txq->tx_next_dd].wb.status;
133         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
134                 return 0;
135
136         /*
137          * first buffer to free from S/W ring is at index
138          * tx_next_dd - (tx_rs_thresh-1)
139          */
140         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
141
142         /* free buffers one at a time */
143         if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
144                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145                         txep->mbuf->next = NULL;
146                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
147                         txep->mbuf = NULL;
148                 }
149         } else {
150                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
151                         rte_pktmbuf_free_seg(txep->mbuf);
152                         txep->mbuf = NULL;
153                 }
154         }
155
156         /* buffers were freed, update counters */
157         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
158         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
159         if (txq->tx_next_dd >= txq->nb_tx_desc)
160                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
161
162         return txq->tx_rs_thresh;
163 }
164
165 /* Populate 4 descriptors with data from 4 mbufs */
166 static inline void
167 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
168 {
169         uint64_t buf_dma_addr;
170         uint32_t pkt_len;
171         int i;
172
173         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
174                 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
175                 pkt_len = (*pkts)->data_len;
176
177                 /* write data to descriptor */
178                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
179
180                 txdp->read.cmd_type_len =
181                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
182
183                 txdp->read.olinfo_status =
184                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
185
186                 rte_prefetch0(&(*pkts)->pool);
187         }
188 }
189
190 /* Populate 1 descriptor with data from 1 mbuf */
191 static inline void
192 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
193 {
194         uint64_t buf_dma_addr;
195         uint32_t pkt_len;
196
197         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
198         pkt_len = (*pkts)->data_len;
199
200         /* write data to descriptor */
201         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
202         txdp->read.cmd_type_len =
203                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
204         txdp->read.olinfo_status =
205                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
206         rte_prefetch0(&(*pkts)->pool);
207 }
208
209 /*
210  * Fill H/W descriptor ring with mbuf data.
211  * Copy mbuf pointers to the S/W ring.
212  */
213 static inline void
214 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
215                       uint16_t nb_pkts)
216 {
217         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
218         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
219         const int N_PER_LOOP = 4;
220         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
221         int mainpart, leftover;
222         int i, j;
223
224         /*
225          * Process most of the packets in chunks of N pkts.  Any
226          * leftover packets will get processed one at a time.
227          */
228         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
229         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
230         for (i = 0; i < mainpart; i += N_PER_LOOP) {
231                 /* Copy N mbuf pointers to the S/W ring */
232                 for (j = 0; j < N_PER_LOOP; ++j) {
233                         (txep + i + j)->mbuf = *(pkts + i + j);
234                 }
235                 tx4(txdp + i, pkts + i);
236         }
237
238         if (unlikely(leftover > 0)) {
239                 for (i = 0; i < leftover; ++i) {
240                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
241                         tx1(txdp + mainpart + i, pkts + mainpart + i);
242                 }
243         }
244 }
245
246 static inline uint16_t
247 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
248              uint16_t nb_pkts)
249 {
250         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
251         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
252         uint16_t n = 0;
253
254         /*
255          * Begin scanning the H/W ring for done descriptors when the
256          * number of available descriptors drops below tx_free_thresh.  For
257          * each done descriptor, free the associated buffer.
258          */
259         if (txq->nb_tx_free < txq->tx_free_thresh)
260                 ixgbe_tx_free_bufs(txq);
261
262         /* Only use descriptors that are available */
263         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
264         if (unlikely(nb_pkts == 0))
265                 return 0;
266
267         /* Use exactly nb_pkts descriptors */
268         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
269
270         /*
271          * At this point, we know there are enough descriptors in the
272          * ring to transmit all the packets.  This assumes that each
273          * mbuf contains a single segment, and that no new offloads
274          * are expected, which would require a new context descriptor.
275          */
276
277         /*
278          * See if we're going to wrap-around. If so, handle the top
279          * of the descriptor ring first, then do the bottom.  If not,
280          * the processing looks just like the "bottom" part anyway...
281          */
282         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
283                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
284                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
285
286                 /*
287                  * We know that the last descriptor in the ring will need to
288                  * have its RS bit set because tx_rs_thresh has to be
289                  * a divisor of the ring size
290                  */
291                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
292                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
293                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
294
295                 txq->tx_tail = 0;
296         }
297
298         /* Fill H/W descriptor ring with mbuf data */
299         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
300         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
301
302         /*
303          * Determine if RS bit should be set
304          * This is what we actually want:
305          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
306          * but instead of subtracting 1 and doing >=, we can just do
307          * greater than without subtracting.
308          */
309         if (txq->tx_tail > txq->tx_next_rs) {
310                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
311                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
312                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
313                                                 txq->tx_rs_thresh);
314                 if (txq->tx_next_rs >= txq->nb_tx_desc)
315                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
316         }
317
318         /*
319          * Check for wrap-around. This would only happen if we used
320          * up to the last descriptor in the ring, no more, no less.
321          */
322         if (txq->tx_tail >= txq->nb_tx_desc)
323                 txq->tx_tail = 0;
324
325         /* update tail pointer */
326         rte_wmb();
327         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
328
329         return nb_pkts;
330 }
331
332 uint16_t
333 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
334                        uint16_t nb_pkts)
335 {
336         uint16_t nb_tx;
337
338         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
339         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
340                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
341
342         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
343         nb_tx = 0;
344         while (nb_pkts) {
345                 uint16_t ret, n;
346                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
347                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
348                 nb_tx = (uint16_t)(nb_tx + ret);
349                 nb_pkts = (uint16_t)(nb_pkts - ret);
350                 if (ret < n)
351                         break;
352         }
353
354         return nb_tx;
355 }
356
357 static inline void
358 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
359                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
360                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
361 {
362         uint32_t type_tucmd_mlhl;
363         uint32_t mss_l4len_idx = 0;
364         uint32_t ctx_idx;
365         uint32_t vlan_macip_lens;
366         union ixgbe_tx_offload tx_offload_mask;
367
368         ctx_idx = txq->ctx_curr;
369         tx_offload_mask.data = 0;
370         type_tucmd_mlhl = 0;
371
372         /* Specify which HW CTX to upload. */
373         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
374
375         if (ol_flags & PKT_TX_VLAN_PKT) {
376                 tx_offload_mask.vlan_tci |= ~0;
377         }
378
379         /* check if TCP segmentation required for this packet */
380         if (ol_flags & PKT_TX_TCP_SEG) {
381                 /* implies IP cksum in IPv4 */
382                 if (ol_flags & PKT_TX_IP_CKSUM)
383                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
384                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
385                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
386                 else
387                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
388                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
389                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
390
391                 tx_offload_mask.l2_len |= ~0;
392                 tx_offload_mask.l3_len |= ~0;
393                 tx_offload_mask.l4_len |= ~0;
394                 tx_offload_mask.tso_segsz |= ~0;
395                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
396                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
397         } else { /* no TSO, check if hardware checksum is needed */
398                 if (ol_flags & PKT_TX_IP_CKSUM) {
399                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
400                         tx_offload_mask.l2_len |= ~0;
401                         tx_offload_mask.l3_len |= ~0;
402                 }
403
404                 switch (ol_flags & PKT_TX_L4_MASK) {
405                 case PKT_TX_UDP_CKSUM:
406                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
407                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
409                         tx_offload_mask.l2_len |= ~0;
410                         tx_offload_mask.l3_len |= ~0;
411                         break;
412                 case PKT_TX_TCP_CKSUM:
413                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
414                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
415                         mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
416                         tx_offload_mask.l2_len |= ~0;
417                         tx_offload_mask.l3_len |= ~0;
418                         break;
419                 case PKT_TX_SCTP_CKSUM:
420                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
421                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
422                         mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
423                         tx_offload_mask.l2_len |= ~0;
424                         tx_offload_mask.l3_len |= ~0;
425                         break;
426                 default:
427                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
428                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
429                         break;
430                 }
431         }
432
433         txq->ctx_cache[ctx_idx].flags = ol_flags;
434         txq->ctx_cache[ctx_idx].tx_offload.data  =
435                 tx_offload_mask.data & tx_offload.data;
436         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
437
438         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
439         vlan_macip_lens = tx_offload.l3_len;
440         vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
441         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
442         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
443         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
444         ctx_txd->seqnum_seed     = 0;
445 }
446
447 /*
448  * Check which hardware context can be used. Use the existing match
449  * or create a new context descriptor.
450  */
451 static inline uint32_t
452 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
453                 union ixgbe_tx_offload tx_offload)
454 {
455         /* If match with the current used context */
456         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
457                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
458                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
459                         return txq->ctx_curr;
460         }
461
462         /* What if match with the next context  */
463         txq->ctx_curr ^= 1;
464         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
465                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
466                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
467                         return txq->ctx_curr;
468         }
469
470         /* Mismatch, use the previous context */
471         return (IXGBE_CTX_NUM);
472 }
473
474 static inline uint32_t
475 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
476 {
477         uint32_t tmp = 0;
478         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
479                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
480         if (ol_flags & PKT_TX_IP_CKSUM)
481                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
482         if (ol_flags & PKT_TX_TCP_SEG)
483                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
484         return tmp;
485 }
486
487 static inline uint32_t
488 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
489 {
490         uint32_t cmdtype = 0;
491         if (ol_flags & PKT_TX_VLAN_PKT)
492                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
493         if (ol_flags & PKT_TX_TCP_SEG)
494                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
495         return cmdtype;
496 }
497
498 /* Default RS bit threshold values */
499 #ifndef DEFAULT_TX_RS_THRESH
500 #define DEFAULT_TX_RS_THRESH   32
501 #endif
502 #ifndef DEFAULT_TX_FREE_THRESH
503 #define DEFAULT_TX_FREE_THRESH 32
504 #endif
505
506 /* Reset transmit descriptors after they have been used */
507 static inline int
508 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
509 {
510         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
511         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
512         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
513         uint16_t nb_tx_desc = txq->nb_tx_desc;
514         uint16_t desc_to_clean_to;
515         uint16_t nb_tx_to_clean;
516         uint32_t status;
517
518         /* Determine the last descriptor needing to be cleaned */
519         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
520         if (desc_to_clean_to >= nb_tx_desc)
521                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
522
523         /* Check to make sure the last descriptor to clean is done */
524         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
525         status = txr[desc_to_clean_to].wb.status;
526         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
527         {
528                 PMD_TX_FREE_LOG(DEBUG,
529                                 "TX descriptor %4u is not done"
530                                 "(port=%d queue=%d)",
531                                 desc_to_clean_to,
532                                 txq->port_id, txq->queue_id);
533                 /* Failed to clean any descriptors, better luck next time */
534                 return -(1);
535         }
536
537         /* Figure out how many descriptors will be cleaned */
538         if (last_desc_cleaned > desc_to_clean_to)
539                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
540                                                         desc_to_clean_to);
541         else
542                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
543                                                 last_desc_cleaned);
544
545         PMD_TX_FREE_LOG(DEBUG,
546                         "Cleaning %4u TX descriptors: %4u to %4u "
547                         "(port=%d queue=%d)",
548                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
549                         txq->port_id, txq->queue_id);
550
551         /*
552          * The last descriptor to clean is done, so that means all the
553          * descriptors from the last descriptor that was cleaned
554          * up to the last descriptor with the RS bit set
555          * are done. Only reset the threshold descriptor.
556          */
557         txr[desc_to_clean_to].wb.status = 0;
558
559         /* Update the txq to reflect the last descriptor that was cleaned */
560         txq->last_desc_cleaned = desc_to_clean_to;
561         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
562
563         /* No Error */
564         return (0);
565 }
566
567 uint16_t
568 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
569                 uint16_t nb_pkts)
570 {
571         struct ixgbe_tx_queue *txq;
572         struct ixgbe_tx_entry *sw_ring;
573         struct ixgbe_tx_entry *txe, *txn;
574         volatile union ixgbe_adv_tx_desc *txr;
575         volatile union ixgbe_adv_tx_desc *txd;
576         struct rte_mbuf     *tx_pkt;
577         struct rte_mbuf     *m_seg;
578         uint64_t buf_dma_addr;
579         uint32_t olinfo_status;
580         uint32_t cmd_type_len;
581         uint32_t pkt_len;
582         uint16_t slen;
583         uint64_t ol_flags;
584         uint16_t tx_id;
585         uint16_t tx_last;
586         uint16_t nb_tx;
587         uint16_t nb_used;
588         uint64_t tx_ol_req;
589         uint32_t ctx = 0;
590         uint32_t new_ctx;
591         union ixgbe_tx_offload tx_offload = {0};
592
593         txq = tx_queue;
594         sw_ring = txq->sw_ring;
595         txr     = txq->tx_ring;
596         tx_id   = txq->tx_tail;
597         txe = &sw_ring[tx_id];
598
599         /* Determine if the descriptor ring needs to be cleaned. */
600         if (txq->nb_tx_free < txq->tx_free_thresh)
601                 ixgbe_xmit_cleanup(txq);
602
603         rte_prefetch0(&txe->mbuf->pool);
604
605         /* TX loop */
606         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
607                 new_ctx = 0;
608                 tx_pkt = *tx_pkts++;
609                 pkt_len = tx_pkt->pkt_len;
610
611                 /*
612                  * Determine how many (if any) context descriptors
613                  * are needed for offload functionality.
614                  */
615                 ol_flags = tx_pkt->ol_flags;
616
617                 /* If hardware offload required */
618                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
619                 if (tx_ol_req) {
620                         tx_offload.l2_len = tx_pkt->l2_len;
621                         tx_offload.l3_len = tx_pkt->l3_len;
622                         tx_offload.l4_len = tx_pkt->l4_len;
623                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
624                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
625
626                         /* If new context need be built or reuse the exist ctx. */
627                         ctx = what_advctx_update(txq, tx_ol_req,
628                                 tx_offload);
629                         /* Only allocate context descriptor if required*/
630                         new_ctx = (ctx == IXGBE_CTX_NUM);
631                         ctx = txq->ctx_curr;
632                 }
633
634                 /*
635                  * Keep track of how many descriptors are used this loop
636                  * This will always be the number of segments + the number of
637                  * Context descriptors required to transmit the packet
638                  */
639                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
640
641                 /*
642                  * The number of descriptors that must be allocated for a
643                  * packet is the number of segments of that packet, plus 1
644                  * Context Descriptor for the hardware offload, if any.
645                  * Determine the last TX descriptor to allocate in the TX ring
646                  * for the packet, starting from the current position (tx_id)
647                  * in the ring.
648                  */
649                 tx_last = (uint16_t) (tx_id + nb_used - 1);
650
651                 /* Circular ring */
652                 if (tx_last >= txq->nb_tx_desc)
653                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
654
655                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
656                            " tx_first=%u tx_last=%u",
657                            (unsigned) txq->port_id,
658                            (unsigned) txq->queue_id,
659                            (unsigned) pkt_len,
660                            (unsigned) tx_id,
661                            (unsigned) tx_last);
662
663                 /*
664                  * Make sure there are enough TX descriptors available to
665                  * transmit the entire packet.
666                  * nb_used better be less than or equal to txq->tx_rs_thresh
667                  */
668                 if (nb_used > txq->nb_tx_free) {
669                         PMD_TX_FREE_LOG(DEBUG,
670                                         "Not enough free TX descriptors "
671                                         "nb_used=%4u nb_free=%4u "
672                                         "(port=%d queue=%d)",
673                                         nb_used, txq->nb_tx_free,
674                                         txq->port_id, txq->queue_id);
675
676                         if (ixgbe_xmit_cleanup(txq) != 0) {
677                                 /* Could not clean any descriptors */
678                                 if (nb_tx == 0)
679                                         return (0);
680                                 goto end_of_tx;
681                         }
682
683                         /* nb_used better be <= txq->tx_rs_thresh */
684                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
685                                 PMD_TX_FREE_LOG(DEBUG,
686                                         "The number of descriptors needed to "
687                                         "transmit the packet exceeds the "
688                                         "RS bit threshold. This will impact "
689                                         "performance."
690                                         "nb_used=%4u nb_free=%4u "
691                                         "tx_rs_thresh=%4u. "
692                                         "(port=%d queue=%d)",
693                                         nb_used, txq->nb_tx_free,
694                                         txq->tx_rs_thresh,
695                                         txq->port_id, txq->queue_id);
696                                 /*
697                                  * Loop here until there are enough TX
698                                  * descriptors or until the ring cannot be
699                                  * cleaned.
700                                  */
701                                 while (nb_used > txq->nb_tx_free) {
702                                         if (ixgbe_xmit_cleanup(txq) != 0) {
703                                                 /*
704                                                  * Could not clean any
705                                                  * descriptors
706                                                  */
707                                                 if (nb_tx == 0)
708                                                         return (0);
709                                                 goto end_of_tx;
710                                         }
711                                 }
712                         }
713                 }
714
715                 /*
716                  * By now there are enough free TX descriptors to transmit
717                  * the packet.
718                  */
719
720                 /*
721                  * Set common flags of all TX Data Descriptors.
722                  *
723                  * The following bits must be set in all Data Descriptors:
724                  *   - IXGBE_ADVTXD_DTYP_DATA
725                  *   - IXGBE_ADVTXD_DCMD_DEXT
726                  *
727                  * The following bits must be set in the first Data Descriptor
728                  * and are ignored in the other ones:
729                  *   - IXGBE_ADVTXD_DCMD_IFCS
730                  *   - IXGBE_ADVTXD_MAC_1588
731                  *   - IXGBE_ADVTXD_DCMD_VLE
732                  *
733                  * The following bits must only be set in the last Data
734                  * Descriptor:
735                  *   - IXGBE_TXD_CMD_EOP
736                  *
737                  * The following bits can be set in any Data Descriptor, but
738                  * are only set in the last Data Descriptor:
739                  *   - IXGBE_TXD_CMD_RS
740                  */
741                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
742                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
743
744 #ifdef RTE_LIBRTE_IEEE1588
745                 if (ol_flags & PKT_TX_IEEE1588_TMST)
746                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
747 #endif
748
749                 olinfo_status = 0;
750                 if (tx_ol_req) {
751
752                         if (ol_flags & PKT_TX_TCP_SEG) {
753                                 /* when TSO is on, paylen in descriptor is the
754                                  * not the packet len but the tcp payload len */
755                                 pkt_len -= (tx_offload.l2_len +
756                                         tx_offload.l3_len + tx_offload.l4_len);
757                         }
758
759                         /*
760                          * Setup the TX Advanced Context Descriptor if required
761                          */
762                         if (new_ctx) {
763                                 volatile struct ixgbe_adv_tx_context_desc *
764                                     ctx_txd;
765
766                                 ctx_txd = (volatile struct
767                                     ixgbe_adv_tx_context_desc *)
768                                     &txr[tx_id];
769
770                                 txn = &sw_ring[txe->next_id];
771                                 rte_prefetch0(&txn->mbuf->pool);
772
773                                 if (txe->mbuf != NULL) {
774                                         rte_pktmbuf_free_seg(txe->mbuf);
775                                         txe->mbuf = NULL;
776                                 }
777
778                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
779                                         tx_offload);
780
781                                 txe->last_id = tx_last;
782                                 tx_id = txe->next_id;
783                                 txe = txn;
784                         }
785
786                         /*
787                          * Setup the TX Advanced Data Descriptor,
788                          * This path will go through
789                          * whatever new/reuse the context descriptor
790                          */
791                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
792                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
793                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
794                 }
795
796                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
797
798                 m_seg = tx_pkt;
799                 do {
800                         txd = &txr[tx_id];
801                         txn = &sw_ring[txe->next_id];
802                         rte_prefetch0(&txn->mbuf->pool);
803
804                         if (txe->mbuf != NULL)
805                                 rte_pktmbuf_free_seg(txe->mbuf);
806                         txe->mbuf = m_seg;
807
808                         /*
809                          * Set up Transmit Data Descriptor.
810                          */
811                         slen = m_seg->data_len;
812                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
813                         txd->read.buffer_addr =
814                                 rte_cpu_to_le_64(buf_dma_addr);
815                         txd->read.cmd_type_len =
816                                 rte_cpu_to_le_32(cmd_type_len | slen);
817                         txd->read.olinfo_status =
818                                 rte_cpu_to_le_32(olinfo_status);
819                         txe->last_id = tx_last;
820                         tx_id = txe->next_id;
821                         txe = txn;
822                         m_seg = m_seg->next;
823                 } while (m_seg != NULL);
824
825                 /*
826                  * The last packet data descriptor needs End Of Packet (EOP)
827                  */
828                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
829                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
830                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
831
832                 /* Set RS bit only on threshold packets' last descriptor */
833                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
834                         PMD_TX_FREE_LOG(DEBUG,
835                                         "Setting RS bit on TXD id="
836                                         "%4u (port=%d queue=%d)",
837                                         tx_last, txq->port_id, txq->queue_id);
838
839                         cmd_type_len |= IXGBE_TXD_CMD_RS;
840
841                         /* Update txq RS bit counters */
842                         txq->nb_tx_used = 0;
843                 }
844                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
845         }
846 end_of_tx:
847         rte_wmb();
848
849         /*
850          * Set the Transmit Descriptor Tail (TDT)
851          */
852         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
853                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
854                    (unsigned) tx_id, (unsigned) nb_tx);
855         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
856         txq->tx_tail = tx_id;
857
858         return (nb_tx);
859 }
860
861 /*********************************************************************
862  *
863  *  RX functions
864  *
865  **********************************************************************/
866 #define IXGBE_PACKET_TYPE_IPV4              0X01
867 #define IXGBE_PACKET_TYPE_IPV4_TCP          0X11
868 #define IXGBE_PACKET_TYPE_IPV4_UDP          0X21
869 #define IXGBE_PACKET_TYPE_IPV4_SCTP         0X41
870 #define IXGBE_PACKET_TYPE_IPV4_EXT          0X03
871 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP     0X43
872 #define IXGBE_PACKET_TYPE_IPV6              0X04
873 #define IXGBE_PACKET_TYPE_IPV6_TCP          0X14
874 #define IXGBE_PACKET_TYPE_IPV6_UDP          0X24
875 #define IXGBE_PACKET_TYPE_IPV6_EXT          0X0C
876 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP      0X1C
877 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP      0X2C
878 #define IXGBE_PACKET_TYPE_IPV4_IPV6         0X05
879 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP     0X15
880 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP     0X25
881 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
882 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
883 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
884 #define IXGBE_PACKET_TYPE_MAX               0X80
885 #define IXGBE_PACKET_TYPE_MASK              0X7F
886 #define IXGBE_PACKET_TYPE_SHIFT             0X04
887 static inline uint32_t
888 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
889 {
890         static const uint32_t
891                 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
892                 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
893                         RTE_PTYPE_L3_IPV4,
894                 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
895                         RTE_PTYPE_L3_IPV4_EXT,
896                 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
897                         RTE_PTYPE_L3_IPV6,
898                 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
899                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
900                         RTE_PTYPE_INNER_L3_IPV6,
901                 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
902                         RTE_PTYPE_L3_IPV6_EXT,
903                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
904                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
905                         RTE_PTYPE_INNER_L3_IPV6_EXT,
906                 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
907                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
908                 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
909                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
910                 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
911                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
912                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
913                 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
914                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
915                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
916                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
917                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
918                 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
919                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
920                 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
921                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
922                 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
923                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
924                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
925                 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
926                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
927                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
928                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
929                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
930                 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
931                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
932                 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
933                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
934         };
935         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
936                 return RTE_PTYPE_UNKNOWN;
937
938         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
939                                 IXGBE_PACKET_TYPE_MASK;
940
941         return ptype_table[pkt_info];
942 }
943
944 static inline uint64_t
945 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
946 {
947         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
948                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
949                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
950                 PKT_RX_RSS_HASH, 0, 0, 0,
951                 0, 0, 0,  PKT_RX_FDIR,
952         };
953 #ifdef RTE_LIBRTE_IEEE1588
954         static uint64_t ip_pkt_etqf_map[8] = {
955                 0, 0, 0, PKT_RX_IEEE1588_PTP,
956                 0, 0, 0, 0,
957         };
958
959         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
960                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
961                                 ip_rss_types_map[pkt_info & 0XF];
962         else
963                 return ip_rss_types_map[pkt_info & 0XF];
964 #else
965         return ip_rss_types_map[pkt_info & 0XF];
966 #endif
967 }
968
969 static inline uint64_t
970 rx_desc_status_to_pkt_flags(uint32_t rx_status)
971 {
972         uint64_t pkt_flags;
973
974         /*
975          * Check if VLAN present only.
976          * Do not check whether L3/L4 rx checksum done by NIC or not,
977          * That can be found from rte_eth_rxmode.hw_ip_checksum flag
978          */
979         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
980
981 #ifdef RTE_LIBRTE_IEEE1588
982         if (rx_status & IXGBE_RXD_STAT_TMST)
983                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
984 #endif
985         return pkt_flags;
986 }
987
988 static inline uint64_t
989 rx_desc_error_to_pkt_flags(uint32_t rx_status)
990 {
991         /*
992          * Bit 31: IPE, IPv4 checksum error
993          * Bit 30: L4I, L4I integrity error
994          */
995         static uint64_t error_to_pkt_flags_map[4] = {
996                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
997                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
998         };
999         return error_to_pkt_flags_map[(rx_status >>
1000                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1001 }
1002
1003 /*
1004  * LOOK_AHEAD defines how many desc statuses to check beyond the
1005  * current descriptor.
1006  * It must be a pound define for optimal performance.
1007  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1008  * function only works with LOOK_AHEAD=8.
1009  */
1010 #define LOOK_AHEAD 8
1011 #if (LOOK_AHEAD != 8)
1012 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1013 #endif
1014 static inline int
1015 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1016 {
1017         volatile union ixgbe_adv_rx_desc *rxdp;
1018         struct ixgbe_rx_entry *rxep;
1019         struct rte_mbuf *mb;
1020         uint16_t pkt_len;
1021         uint64_t pkt_flags;
1022         int nb_dd;
1023         uint32_t s[LOOK_AHEAD];
1024         uint16_t pkt_info[LOOK_AHEAD];
1025         int i, j, nb_rx = 0;
1026         uint32_t status;
1027
1028         /* get references to current descriptor and S/W ring entry */
1029         rxdp = &rxq->rx_ring[rxq->rx_tail];
1030         rxep = &rxq->sw_ring[rxq->rx_tail];
1031
1032         status = rxdp->wb.upper.status_error;
1033         /* check to make sure there is at least 1 packet to receive */
1034         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1035                 return 0;
1036
1037         /*
1038          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1039          * reference packets that are ready to be received.
1040          */
1041         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1042              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1043         {
1044                 /* Read desc statuses backwards to avoid race condition */
1045                 for (j = LOOK_AHEAD-1; j >= 0; --j)
1046                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1047
1048                 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1049                         pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1050                                                 hs_rss.pkt_info;
1051
1052                 /* Compute how many status bits were set */
1053                 nb_dd = 0;
1054                 for (j = 0; j < LOOK_AHEAD; ++j)
1055                         nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1056
1057                 nb_rx += nb_dd;
1058
1059                 /* Translate descriptor info to mbuf format */
1060                 for (j = 0; j < nb_dd; ++j) {
1061                         mb = rxep[j].mbuf;
1062                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1063                                   rxq->crc_len;
1064                         mb->data_len = pkt_len;
1065                         mb->pkt_len = pkt_len;
1066                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1067
1068                         /* convert descriptor fields to rte mbuf flags */
1069                         pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1070                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1071                         pkt_flags |=
1072                                 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1073                         mb->ol_flags = pkt_flags;
1074                         mb->packet_type =
1075                                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1076
1077                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1078                                 mb->hash.rss = rte_le_to_cpu_32(
1079                                     rxdp[j].wb.lower.hi_dword.rss);
1080                         else if (pkt_flags & PKT_RX_FDIR) {
1081                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1082                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1083                                     IXGBE_ATR_HASH_MASK;
1084                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1085                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1086                         }
1087                 }
1088
1089                 /* Move mbuf pointers from the S/W ring to the stage */
1090                 for (j = 0; j < LOOK_AHEAD; ++j) {
1091                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1092                 }
1093
1094                 /* stop if all requested packets could not be received */
1095                 if (nb_dd != LOOK_AHEAD)
1096                         break;
1097         }
1098
1099         /* clear software ring entries so we can cleanup correctly */
1100         for (i = 0; i < nb_rx; ++i) {
1101                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1102         }
1103
1104
1105         return nb_rx;
1106 }
1107
1108 static inline int
1109 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1110 {
1111         volatile union ixgbe_adv_rx_desc *rxdp;
1112         struct ixgbe_rx_entry *rxep;
1113         struct rte_mbuf *mb;
1114         uint16_t alloc_idx;
1115         __le64 dma_addr;
1116         int diag, i;
1117
1118         /* allocate buffers in bulk directly into the S/W ring */
1119         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1120         rxep = &rxq->sw_ring[alloc_idx];
1121         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1122                                     rxq->rx_free_thresh);
1123         if (unlikely(diag != 0))
1124                 return (-ENOMEM);
1125
1126         rxdp = &rxq->rx_ring[alloc_idx];
1127         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1128                 /* populate the static rte mbuf fields */
1129                 mb = rxep[i].mbuf;
1130                 if (reset_mbuf) {
1131                         mb->next = NULL;
1132                         mb->nb_segs = 1;
1133                         mb->port = rxq->port_id;
1134                 }
1135
1136                 rte_mbuf_refcnt_set(mb, 1);
1137                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1138
1139                 /* populate the descriptors */
1140                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1141                 rxdp[i].read.hdr_addr = 0;
1142                 rxdp[i].read.pkt_addr = dma_addr;
1143         }
1144
1145         /* update state of internal queue structure */
1146         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1147         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1148                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1149
1150         /* no errors */
1151         return 0;
1152 }
1153
1154 static inline uint16_t
1155 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1156                          uint16_t nb_pkts)
1157 {
1158         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1159         int i;
1160
1161         /* how many packets are ready to return? */
1162         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1163
1164         /* copy mbuf pointers to the application's packet list */
1165         for (i = 0; i < nb_pkts; ++i)
1166                 rx_pkts[i] = stage[i];
1167
1168         /* update internal queue state */
1169         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1170         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1171
1172         return nb_pkts;
1173 }
1174
1175 static inline uint16_t
1176 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1177              uint16_t nb_pkts)
1178 {
1179         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1180         uint16_t nb_rx = 0;
1181
1182         /* Any previously recv'd pkts will be returned from the Rx stage */
1183         if (rxq->rx_nb_avail)
1184                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1185
1186         /* Scan the H/W ring for packets to receive */
1187         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1188
1189         /* update internal queue state */
1190         rxq->rx_next_avail = 0;
1191         rxq->rx_nb_avail = nb_rx;
1192         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1193
1194         /* if required, allocate new buffers to replenish descriptors */
1195         if (rxq->rx_tail > rxq->rx_free_trigger) {
1196                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1197
1198                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1199                         int i, j;
1200                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1201                                    "queue_id=%u", (unsigned) rxq->port_id,
1202                                    (unsigned) rxq->queue_id);
1203
1204                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1205                                 rxq->rx_free_thresh;
1206
1207                         /*
1208                          * Need to rewind any previous receives if we cannot
1209                          * allocate new buffers to replenish the old ones.
1210                          */
1211                         rxq->rx_nb_avail = 0;
1212                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1213                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1214                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1215
1216                         return 0;
1217                 }
1218
1219                 /* update tail pointer */
1220                 rte_wmb();
1221                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1222         }
1223
1224         if (rxq->rx_tail >= rxq->nb_rx_desc)
1225                 rxq->rx_tail = 0;
1226
1227         /* received any packets this loop? */
1228         if (rxq->rx_nb_avail)
1229                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1230
1231         return 0;
1232 }
1233
1234 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1235 static uint16_t
1236 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1237                            uint16_t nb_pkts)
1238 {
1239         uint16_t nb_rx;
1240
1241         if (unlikely(nb_pkts == 0))
1242                 return 0;
1243
1244         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1245                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1246
1247         /* request is relatively large, chunk it up */
1248         nb_rx = 0;
1249         while (nb_pkts) {
1250                 uint16_t ret, n;
1251                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1252                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1253                 nb_rx = (uint16_t)(nb_rx + ret);
1254                 nb_pkts = (uint16_t)(nb_pkts - ret);
1255                 if (ret < n)
1256                         break;
1257         }
1258
1259         return nb_rx;
1260 }
1261
1262 uint16_t
1263 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1264                 uint16_t nb_pkts)
1265 {
1266         struct ixgbe_rx_queue *rxq;
1267         volatile union ixgbe_adv_rx_desc *rx_ring;
1268         volatile union ixgbe_adv_rx_desc *rxdp;
1269         struct ixgbe_rx_entry *sw_ring;
1270         struct ixgbe_rx_entry *rxe;
1271         struct rte_mbuf *rxm;
1272         struct rte_mbuf *nmb;
1273         union ixgbe_adv_rx_desc rxd;
1274         uint64_t dma_addr;
1275         uint32_t staterr;
1276         uint32_t pkt_info;
1277         uint16_t pkt_len;
1278         uint16_t rx_id;
1279         uint16_t nb_rx;
1280         uint16_t nb_hold;
1281         uint64_t pkt_flags;
1282
1283         nb_rx = 0;
1284         nb_hold = 0;
1285         rxq = rx_queue;
1286         rx_id = rxq->rx_tail;
1287         rx_ring = rxq->rx_ring;
1288         sw_ring = rxq->sw_ring;
1289         while (nb_rx < nb_pkts) {
1290                 /*
1291                  * The order of operations here is important as the DD status
1292                  * bit must not be read after any other descriptor fields.
1293                  * rx_ring and rxdp are pointing to volatile data so the order
1294                  * of accesses cannot be reordered by the compiler. If they were
1295                  * not volatile, they could be reordered which could lead to
1296                  * using invalid descriptor fields when read from rxd.
1297                  */
1298                 rxdp = &rx_ring[rx_id];
1299                 staterr = rxdp->wb.upper.status_error;
1300                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1301                         break;
1302                 rxd = *rxdp;
1303
1304                 /*
1305                  * End of packet.
1306                  *
1307                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1308                  * is likely to be invalid and to be dropped by the various
1309                  * validation checks performed by the network stack.
1310                  *
1311                  * Allocate a new mbuf to replenish the RX ring descriptor.
1312                  * If the allocation fails:
1313                  *    - arrange for that RX descriptor to be the first one
1314                  *      being parsed the next time the receive function is
1315                  *      invoked [on the same queue].
1316                  *
1317                  *    - Stop parsing the RX ring and return immediately.
1318                  *
1319                  * This policy do not drop the packet received in the RX
1320                  * descriptor for which the allocation of a new mbuf failed.
1321                  * Thus, it allows that packet to be later retrieved if
1322                  * mbuf have been freed in the mean time.
1323                  * As a side effect, holding RX descriptors instead of
1324                  * systematically giving them back to the NIC may lead to
1325                  * RX ring exhaustion situations.
1326                  * However, the NIC can gracefully prevent such situations
1327                  * to happen by sending specific "back-pressure" flow control
1328                  * frames to its peer(s).
1329                  */
1330                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1331                            "ext_err_stat=0x%08x pkt_len=%u",
1332                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1333                            (unsigned) rx_id, (unsigned) staterr,
1334                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1335
1336                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1337                 if (nmb == NULL) {
1338                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1339                                    "queue_id=%u", (unsigned) rxq->port_id,
1340                                    (unsigned) rxq->queue_id);
1341                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1342                         break;
1343                 }
1344
1345                 nb_hold++;
1346                 rxe = &sw_ring[rx_id];
1347                 rx_id++;
1348                 if (rx_id == rxq->nb_rx_desc)
1349                         rx_id = 0;
1350
1351                 /* Prefetch next mbuf while processing current one. */
1352                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1353
1354                 /*
1355                  * When next RX descriptor is on a cache-line boundary,
1356                  * prefetch the next 4 RX descriptors and the next 8 pointers
1357                  * to mbufs.
1358                  */
1359                 if ((rx_id & 0x3) == 0) {
1360                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1361                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1362                 }
1363
1364                 rxm = rxe->mbuf;
1365                 rxe->mbuf = nmb;
1366                 dma_addr =
1367                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1368                 rxdp->read.hdr_addr = 0;
1369                 rxdp->read.pkt_addr = dma_addr;
1370
1371                 /*
1372                  * Initialize the returned mbuf.
1373                  * 1) setup generic mbuf fields:
1374                  *    - number of segments,
1375                  *    - next segment,
1376                  *    - packet length,
1377                  *    - RX port identifier.
1378                  * 2) integrate hardware offload data, if any:
1379                  *    - RSS flag & hash,
1380                  *    - IP checksum flag,
1381                  *    - VLAN TCI, if any,
1382                  *    - error flags.
1383                  */
1384                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1385                                       rxq->crc_len);
1386                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1387                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1388                 rxm->nb_segs = 1;
1389                 rxm->next = NULL;
1390                 rxm->pkt_len = pkt_len;
1391                 rxm->data_len = pkt_len;
1392                 rxm->port = rxq->port_id;
1393
1394                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1395                                                                 pkt_info);
1396                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1397                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1398
1399                 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1400                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1401                 pkt_flags = pkt_flags |
1402                         ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1403                 rxm->ol_flags = pkt_flags;
1404                 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1405
1406                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1407                         rxm->hash.rss = rte_le_to_cpu_32(
1408                                                 rxd.wb.lower.hi_dword.rss);
1409                 else if (pkt_flags & PKT_RX_FDIR) {
1410                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1411                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1412                                         IXGBE_ATR_HASH_MASK;
1413                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1414                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1415                 }
1416                 /*
1417                  * Store the mbuf address into the next entry of the array
1418                  * of returned packets.
1419                  */
1420                 rx_pkts[nb_rx++] = rxm;
1421         }
1422         rxq->rx_tail = rx_id;
1423
1424         /*
1425          * If the number of free RX descriptors is greater than the RX free
1426          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1427          * register.
1428          * Update the RDT with the value of the last processed RX descriptor
1429          * minus 1, to guarantee that the RDT register is never equal to the
1430          * RDH register, which creates a "full" ring situtation from the
1431          * hardware point of view...
1432          */
1433         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1434         if (nb_hold > rxq->rx_free_thresh) {
1435                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1436                            "nb_hold=%u nb_rx=%u",
1437                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1438                            (unsigned) rx_id, (unsigned) nb_hold,
1439                            (unsigned) nb_rx);
1440                 rx_id = (uint16_t) ((rx_id == 0) ?
1441                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1442                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1443                 nb_hold = 0;
1444         }
1445         rxq->nb_rx_hold = nb_hold;
1446         return (nb_rx);
1447 }
1448
1449 /**
1450  * Detect an RSC descriptor.
1451  */
1452 static inline uint32_t
1453 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1454 {
1455         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1456                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1457 }
1458
1459 /**
1460  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1461  *
1462  * Fill the following info in the HEAD buffer of the Rx cluster:
1463  *    - RX port identifier
1464  *    - hardware offload data, if any:
1465  *      - RSS flag & hash
1466  *      - IP checksum flag
1467  *      - VLAN TCI, if any
1468  *      - error flags
1469  * @head HEAD of the packet cluster
1470  * @desc HW descriptor to get data from
1471  * @port_id Port ID of the Rx queue
1472  */
1473 static inline void
1474 ixgbe_fill_cluster_head_buf(
1475         struct rte_mbuf *head,
1476         union ixgbe_adv_rx_desc *desc,
1477         uint8_t port_id,
1478         uint32_t staterr)
1479 {
1480         uint16_t pkt_info;
1481         uint64_t pkt_flags;
1482
1483         head->port = port_id;
1484
1485         /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1486          * set in the pkt_flags field.
1487          */
1488         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1489         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1490         pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1491         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1492         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1493         head->ol_flags = pkt_flags;
1494         head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1495
1496         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1497                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1498         else if (pkt_flags & PKT_RX_FDIR) {
1499                 head->hash.fdir.hash =
1500                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1501                                                           & IXGBE_ATR_HASH_MASK;
1502                 head->hash.fdir.id =
1503                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1504         }
1505 }
1506
1507 /**
1508  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1509  *
1510  * @rx_queue Rx queue handle
1511  * @rx_pkts table of received packets
1512  * @nb_pkts size of rx_pkts table
1513  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1514  *
1515  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1516  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1517  *
1518  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1519  * 1) When non-EOP RSC completion arrives:
1520  *    a) Update the HEAD of the current RSC aggregation cluster with the new
1521  *       segment's data length.
1522  *    b) Set the "next" pointer of the current segment to point to the segment
1523  *       at the NEXTP index.
1524  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1525  *       in the sw_rsc_ring.
1526  * 2) When EOP arrives we just update the cluster's total length and offload
1527  *    flags and deliver the cluster up to the upper layers. In our case - put it
1528  *    in the rx_pkts table.
1529  *
1530  * Returns the number of received packets/clusters (according to the "bulk
1531  * receive" interface).
1532  */
1533 static inline uint16_t
1534 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1535                     bool bulk_alloc)
1536 {
1537         struct ixgbe_rx_queue *rxq = rx_queue;
1538         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1539         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1540         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1541         uint16_t rx_id = rxq->rx_tail;
1542         uint16_t nb_rx = 0;
1543         uint16_t nb_hold = rxq->nb_rx_hold;
1544         uint16_t prev_id = rxq->rx_tail;
1545
1546         while (nb_rx < nb_pkts) {
1547                 bool eop;
1548                 struct ixgbe_rx_entry *rxe;
1549                 struct ixgbe_scattered_rx_entry *sc_entry;
1550                 struct ixgbe_scattered_rx_entry *next_sc_entry;
1551                 struct ixgbe_rx_entry *next_rxe;
1552                 struct rte_mbuf *first_seg;
1553                 struct rte_mbuf *rxm;
1554                 struct rte_mbuf *nmb;
1555                 union ixgbe_adv_rx_desc rxd;
1556                 uint16_t data_len;
1557                 uint16_t next_id;
1558                 volatile union ixgbe_adv_rx_desc *rxdp;
1559                 uint32_t staterr;
1560
1561 next_desc:
1562                 /*
1563                  * The code in this whole file uses the volatile pointer to
1564                  * ensure the read ordering of the status and the rest of the
1565                  * descriptor fields (on the compiler level only!!!). This is so
1566                  * UGLY - why not to just use the compiler barrier instead? DPDK
1567                  * even has the rte_compiler_barrier() for that.
1568                  *
1569                  * But most importantly this is just wrong because this doesn't
1570                  * ensure memory ordering in a general case at all. For
1571                  * instance, DPDK is supposed to work on Power CPUs where
1572                  * compiler barrier may just not be enough!
1573                  *
1574                  * I tried to write only this function properly to have a
1575                  * starting point (as a part of an LRO/RSC series) but the
1576                  * compiler cursed at me when I tried to cast away the
1577                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1578                  * keeping it the way it is for now.
1579                  *
1580                  * The code in this file is broken in so many other places and
1581                  * will just not work on a big endian CPU anyway therefore the
1582                  * lines below will have to be revisited together with the rest
1583                  * of the ixgbe PMD.
1584                  *
1585                  * TODO:
1586                  *    - Get rid of "volatile" crap and let the compiler do its
1587                  *      job.
1588                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
1589                  *      memory ordering below.
1590                  */
1591                 rxdp = &rx_ring[rx_id];
1592                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1593
1594                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1595                         break;
1596
1597                 rxd = *rxdp;
1598
1599                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1600                                   "staterr=0x%x data_len=%u",
1601                            rxq->port_id, rxq->queue_id, rx_id, staterr,
1602                            rte_le_to_cpu_16(rxd.wb.upper.length));
1603
1604                 if (!bulk_alloc) {
1605                         nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1606                         if (nmb == NULL) {
1607                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1608                                                   "port_id=%u queue_id=%u",
1609                                            rxq->port_id, rxq->queue_id);
1610
1611                                 rte_eth_devices[rxq->port_id].data->
1612                                                         rx_mbuf_alloc_failed++;
1613                                 break;
1614                         }
1615                 }
1616                 else if (nb_hold > rxq->rx_free_thresh) {
1617                         uint16_t next_rdt = rxq->rx_free_trigger;
1618
1619                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1620                                 rte_wmb();
1621                                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1622                                                     next_rdt);
1623                                 nb_hold -= rxq->rx_free_thresh;
1624                         } else {
1625                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1626                                                   "port_id=%u queue_id=%u",
1627                                            rxq->port_id, rxq->queue_id);
1628
1629                                 rte_eth_devices[rxq->port_id].data->
1630                                                         rx_mbuf_alloc_failed++;
1631                                 break;
1632                         }
1633                 }
1634
1635                 nb_hold++;
1636                 rxe = &sw_ring[rx_id];
1637                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1638
1639                 next_id = rx_id + 1;
1640                 if (next_id == rxq->nb_rx_desc)
1641                         next_id = 0;
1642
1643                 /* Prefetch next mbuf while processing current one. */
1644                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1645
1646                 /*
1647                  * When next RX descriptor is on a cache-line boundary,
1648                  * prefetch the next 4 RX descriptors and the next 4 pointers
1649                  * to mbufs.
1650                  */
1651                 if ((next_id & 0x3) == 0) {
1652                         rte_ixgbe_prefetch(&rx_ring[next_id]);
1653                         rte_ixgbe_prefetch(&sw_ring[next_id]);
1654                 }
1655
1656                 rxm = rxe->mbuf;
1657
1658                 if (!bulk_alloc) {
1659                         __le64 dma =
1660                           rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1661                         /*
1662                          * Update RX descriptor with the physical address of the
1663                          * new data buffer of the new allocated mbuf.
1664                          */
1665                         rxe->mbuf = nmb;
1666
1667                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
1668                         rxdp->read.hdr_addr = 0;
1669                         rxdp->read.pkt_addr = dma;
1670                 } else
1671                         rxe->mbuf = NULL;
1672
1673                 /*
1674                  * Set data length & data buffer address of mbuf.
1675                  */
1676                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1677                 rxm->data_len = data_len;
1678
1679                 if (!eop) {
1680                         uint16_t nextp_id;
1681                         /*
1682                          * Get next descriptor index:
1683                          *  - For RSC it's in the NEXTP field.
1684                          *  - For a scattered packet - it's just a following
1685                          *    descriptor.
1686                          */
1687                         if (ixgbe_rsc_count(&rxd))
1688                                 nextp_id =
1689                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1690                                                        IXGBE_RXDADV_NEXTP_SHIFT;
1691                         else
1692                                 nextp_id = next_id;
1693
1694                         next_sc_entry = &sw_sc_ring[nextp_id];
1695                         next_rxe = &sw_ring[nextp_id];
1696                         rte_ixgbe_prefetch(next_rxe);
1697                 }
1698
1699                 sc_entry = &sw_sc_ring[rx_id];
1700                 first_seg = sc_entry->fbuf;
1701                 sc_entry->fbuf = NULL;
1702
1703                 /*
1704                  * If this is the first buffer of the received packet,
1705                  * set the pointer to the first mbuf of the packet and
1706                  * initialize its context.
1707                  * Otherwise, update the total length and the number of segments
1708                  * of the current scattered packet, and update the pointer to
1709                  * the last mbuf of the current packet.
1710                  */
1711                 if (first_seg == NULL) {
1712                         first_seg = rxm;
1713                         first_seg->pkt_len = data_len;
1714                         first_seg->nb_segs = 1;
1715                 } else {
1716                         first_seg->pkt_len += data_len;
1717                         first_seg->nb_segs++;
1718                 }
1719
1720                 prev_id = rx_id;
1721                 rx_id = next_id;
1722
1723                 /*
1724                  * If this is not the last buffer of the received packet, update
1725                  * the pointer to the first mbuf at the NEXTP entry in the
1726                  * sw_sc_ring and continue to parse the RX ring.
1727                  */
1728                 if (!eop) {
1729                         rxm->next = next_rxe->mbuf;
1730                         next_sc_entry->fbuf = first_seg;
1731                         goto next_desc;
1732                 }
1733
1734                 /*
1735                  * This is the last buffer of the received packet - return
1736                  * the current cluster to the user.
1737                  */
1738                 rxm->next = NULL;
1739
1740                 /* Initialize the first mbuf of the returned packet */
1741                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1742                                             staterr);
1743
1744                 /*
1745                  * Deal with the case, when HW CRC srip is disabled.
1746                  * That can't happen when LRO is enabled, but still could
1747                  * happen for scattered RX mode.
1748                  */
1749                 first_seg->pkt_len -= rxq->crc_len;
1750                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1751                         struct rte_mbuf *lp;
1752
1753                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
1754                                 ;
1755
1756                         first_seg->nb_segs--;
1757                         lp->data_len -= rxq->crc_len - rxm->data_len;
1758                         lp->next = NULL;
1759                         rte_pktmbuf_free_seg(rxm);
1760                 } else
1761                         rxm->data_len -= rxq->crc_len;
1762
1763                 /* Prefetch data of first segment, if configured to do so. */
1764                 rte_packet_prefetch((char *)first_seg->buf_addr +
1765                         first_seg->data_off);
1766
1767                 /*
1768                  * Store the mbuf address into the next entry of the array
1769                  * of returned packets.
1770                  */
1771                 rx_pkts[nb_rx++] = first_seg;
1772         }
1773
1774         /*
1775          * Record index of the next RX descriptor to probe.
1776          */
1777         rxq->rx_tail = rx_id;
1778
1779         /*
1780          * If the number of free RX descriptors is greater than the RX free
1781          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1782          * register.
1783          * Update the RDT with the value of the last processed RX descriptor
1784          * minus 1, to guarantee that the RDT register is never equal to the
1785          * RDH register, which creates a "full" ring situtation from the
1786          * hardware point of view...
1787          */
1788         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1789                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1790                            "nb_hold=%u nb_rx=%u",
1791                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1792
1793                 rte_wmb();
1794                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1795                 nb_hold = 0;
1796         }
1797
1798         rxq->nb_rx_hold = nb_hold;
1799         return nb_rx;
1800 }
1801
1802 uint16_t
1803 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1804                                  uint16_t nb_pkts)
1805 {
1806         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1807 }
1808
1809 uint16_t
1810 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1811                                uint16_t nb_pkts)
1812 {
1813         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1814 }
1815
1816 /*********************************************************************
1817  *
1818  *  Queue management functions
1819  *
1820  **********************************************************************/
1821
1822 /*
1823  * Create memzone for HW rings. malloc can't be used as the physical address is
1824  * needed. If the memzone is already created, then this function returns a ptr
1825  * to the old one.
1826  */
1827 static const struct rte_memzone * __attribute__((cold))
1828 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1829                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1830 {
1831         char z_name[RTE_MEMZONE_NAMESIZE];
1832         const struct rte_memzone *mz;
1833
1834         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1835                         dev->driver->pci_drv.name, ring_name,
1836                         dev->data->port_id, queue_id);
1837
1838         mz = rte_memzone_lookup(z_name);
1839         if (mz)
1840                 return mz;
1841
1842 #ifdef RTE_LIBRTE_XEN_DOM0
1843         return rte_memzone_reserve_bounded(z_name, ring_size,
1844                 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1845 #else
1846         return rte_memzone_reserve_aligned(z_name, ring_size,
1847                 socket_id, 0, IXGBE_ALIGN);
1848 #endif
1849 }
1850
1851 static void __attribute__((cold))
1852 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1853 {
1854         unsigned i;
1855
1856         if (txq->sw_ring != NULL) {
1857                 for (i = 0; i < txq->nb_tx_desc; i++) {
1858                         if (txq->sw_ring[i].mbuf != NULL) {
1859                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1860                                 txq->sw_ring[i].mbuf = NULL;
1861                         }
1862                 }
1863         }
1864 }
1865
1866 static void __attribute__((cold))
1867 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1868 {
1869         if (txq != NULL &&
1870             txq->sw_ring != NULL)
1871                 rte_free(txq->sw_ring);
1872 }
1873
1874 static void __attribute__((cold))
1875 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1876 {
1877         if (txq != NULL && txq->ops != NULL) {
1878                 txq->ops->release_mbufs(txq);
1879                 txq->ops->free_swring(txq);
1880                 rte_free(txq);
1881         }
1882 }
1883
1884 void __attribute__((cold))
1885 ixgbe_dev_tx_queue_release(void *txq)
1886 {
1887         ixgbe_tx_queue_release(txq);
1888 }
1889
1890 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1891 static void __attribute__((cold))
1892 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1893 {
1894         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1895         struct ixgbe_tx_entry *txe = txq->sw_ring;
1896         uint16_t prev, i;
1897
1898         /* Zero out HW ring memory */
1899         for (i = 0; i < txq->nb_tx_desc; i++) {
1900                 txq->tx_ring[i] = zeroed_desc;
1901         }
1902
1903         /* Initialize SW ring entries */
1904         prev = (uint16_t) (txq->nb_tx_desc - 1);
1905         for (i = 0; i < txq->nb_tx_desc; i++) {
1906                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1907                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1908                 txe[i].mbuf = NULL;
1909                 txe[i].last_id = i;
1910                 txe[prev].next_id = i;
1911                 prev = i;
1912         }
1913
1914         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1915         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1916
1917         txq->tx_tail = 0;
1918         txq->nb_tx_used = 0;
1919         /*
1920          * Always allow 1 descriptor to be un-allocated to avoid
1921          * a H/W race condition
1922          */
1923         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1924         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1925         txq->ctx_curr = 0;
1926         memset((void*)&txq->ctx_cache, 0,
1927                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1928 }
1929
1930 static const struct ixgbe_txq_ops def_txq_ops = {
1931         .release_mbufs = ixgbe_tx_queue_release_mbufs,
1932         .free_swring = ixgbe_tx_free_swring,
1933         .reset = ixgbe_reset_tx_queue,
1934 };
1935
1936 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1937  * the queue parameters. Used in tx_queue_setup by primary process and then
1938  * in dev_init by secondary process when attaching to an existing ethdev.
1939  */
1940 void __attribute__((cold))
1941 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1942 {
1943         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1944         if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1945                         && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1946                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1947 #ifdef RTE_IXGBE_INC_VECTOR
1948                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1949                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1950                                         ixgbe_txq_vec_setup(txq) == 0)) {
1951                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1952                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1953                 } else
1954 #endif
1955                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1956         } else {
1957                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1958                 PMD_INIT_LOG(DEBUG,
1959                                 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1960                                 (unsigned long)txq->txq_flags,
1961                                 (unsigned long)IXGBE_SIMPLE_FLAGS);
1962                 PMD_INIT_LOG(DEBUG,
1963                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
1964                                 (unsigned long)txq->tx_rs_thresh,
1965                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
1966                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1967         }
1968 }
1969
1970 int __attribute__((cold))
1971 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1972                          uint16_t queue_idx,
1973                          uint16_t nb_desc,
1974                          unsigned int socket_id,
1975                          const struct rte_eth_txconf *tx_conf)
1976 {
1977         const struct rte_memzone *tz;
1978         struct ixgbe_tx_queue *txq;
1979         struct ixgbe_hw     *hw;
1980         uint16_t tx_rs_thresh, tx_free_thresh;
1981
1982         PMD_INIT_FUNC_TRACE();
1983         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1984
1985         /*
1986          * Validate number of transmit descriptors.
1987          * It must not exceed hardware maximum, and must be multiple
1988          * of IXGBE_ALIGN.
1989          */
1990         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
1991                         (nb_desc > IXGBE_MAX_RING_DESC) ||
1992                         (nb_desc < IXGBE_MIN_RING_DESC)) {
1993                 return -EINVAL;
1994         }
1995
1996         /*
1997          * The following two parameters control the setting of the RS bit on
1998          * transmit descriptors.
1999          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2000          * descriptors have been used.
2001          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2002          * descriptors are used or if the number of descriptors required
2003          * to transmit a packet is greater than the number of free TX
2004          * descriptors.
2005          * The following constraints must be satisfied:
2006          *  tx_rs_thresh must be greater than 0.
2007          *  tx_rs_thresh must be less than the size of the ring minus 2.
2008          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2009          *  tx_rs_thresh must be a divisor of the ring size.
2010          *  tx_free_thresh must be greater than 0.
2011          *  tx_free_thresh must be less than the size of the ring minus 3.
2012          * One descriptor in the TX ring is used as a sentinel to avoid a
2013          * H/W race condition, hence the maximum threshold constraints.
2014          * When set to zero use default values.
2015          */
2016         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2017                         tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2018         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2019                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2020         if (tx_rs_thresh >= (nb_desc - 2)) {
2021                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2022                              "of TX descriptors minus 2. (tx_rs_thresh=%u "
2023                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2024                              (int)dev->data->port_id, (int)queue_idx);
2025                 return -(EINVAL);
2026         }
2027         if (tx_free_thresh >= (nb_desc - 3)) {
2028                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2029                              "tx_free_thresh must be less than the number of "
2030                              "TX descriptors minus 3. (tx_free_thresh=%u "
2031                              "port=%d queue=%d)",
2032                              (unsigned int)tx_free_thresh,
2033                              (int)dev->data->port_id, (int)queue_idx);
2034                 return -(EINVAL);
2035         }
2036         if (tx_rs_thresh > tx_free_thresh) {
2037                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2038                              "tx_free_thresh. (tx_free_thresh=%u "
2039                              "tx_rs_thresh=%u port=%d queue=%d)",
2040                              (unsigned int)tx_free_thresh,
2041                              (unsigned int)tx_rs_thresh,
2042                              (int)dev->data->port_id,
2043                              (int)queue_idx);
2044                 return -(EINVAL);
2045         }
2046         if ((nb_desc % tx_rs_thresh) != 0) {
2047                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2048                              "number of TX descriptors. (tx_rs_thresh=%u "
2049                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2050                              (int)dev->data->port_id, (int)queue_idx);
2051                 return -(EINVAL);
2052         }
2053
2054         /*
2055          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2056          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2057          * by the NIC and all descriptors are written back after the NIC
2058          * accumulates WTHRESH descriptors.
2059          */
2060         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2061                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2062                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2063                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2064                              (int)dev->data->port_id, (int)queue_idx);
2065                 return -(EINVAL);
2066         }
2067
2068         /* Free memory prior to re-allocation if needed... */
2069         if (dev->data->tx_queues[queue_idx] != NULL) {
2070                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2071                 dev->data->tx_queues[queue_idx] = NULL;
2072         }
2073
2074         /* First allocate the tx queue data structure */
2075         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2076                                  RTE_CACHE_LINE_SIZE, socket_id);
2077         if (txq == NULL)
2078                 return (-ENOMEM);
2079
2080         /*
2081          * Allocate TX ring hardware descriptors. A memzone large enough to
2082          * handle the maximum ring size is allocated in order to allow for
2083          * resizing in later calls to the queue setup function.
2084          */
2085         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
2086                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2087                         socket_id);
2088         if (tz == NULL) {
2089                 ixgbe_tx_queue_release(txq);
2090                 return (-ENOMEM);
2091         }
2092
2093         txq->nb_tx_desc = nb_desc;
2094         txq->tx_rs_thresh = tx_rs_thresh;
2095         txq->tx_free_thresh = tx_free_thresh;
2096         txq->pthresh = tx_conf->tx_thresh.pthresh;
2097         txq->hthresh = tx_conf->tx_thresh.hthresh;
2098         txq->wthresh = tx_conf->tx_thresh.wthresh;
2099         txq->queue_id = queue_idx;
2100         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2101                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2102         txq->port_id = dev->data->port_id;
2103         txq->txq_flags = tx_conf->txq_flags;
2104         txq->ops = &def_txq_ops;
2105         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2106
2107         /*
2108          * Modification to set VFTDT for virtual function if vf is detected
2109          */
2110         if (hw->mac.type == ixgbe_mac_82599_vf ||
2111             hw->mac.type == ixgbe_mac_X540_vf ||
2112             hw->mac.type == ixgbe_mac_X550_vf ||
2113             hw->mac.type == ixgbe_mac_X550EM_x_vf)
2114                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2115         else
2116                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2117 #ifndef RTE_LIBRTE_XEN_DOM0
2118         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
2119 #else
2120         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2121 #endif
2122         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2123
2124         /* Allocate software ring */
2125         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2126                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2127                                 RTE_CACHE_LINE_SIZE, socket_id);
2128         if (txq->sw_ring == NULL) {
2129                 ixgbe_tx_queue_release(txq);
2130                 return (-ENOMEM);
2131         }
2132         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2133                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2134
2135         /* set up vector or scalar TX function as appropriate */
2136         ixgbe_set_tx_function(dev, txq);
2137
2138         txq->ops->reset(txq);
2139
2140         dev->data->tx_queues[queue_idx] = txq;
2141
2142
2143         return (0);
2144 }
2145
2146 /**
2147  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2148  *
2149  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2150  * in the sw_rsc_ring is not set to NULL but rather points to the next
2151  * mbuf of this RSC aggregation (that has not been completed yet and still
2152  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2153  * will just free first "nb_segs" segments of the cluster explicitly by calling
2154  * an rte_pktmbuf_free_seg().
2155  *
2156  * @m scattered cluster head
2157  */
2158 static void __attribute__((cold))
2159 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2160 {
2161         uint8_t i, nb_segs = m->nb_segs;
2162         struct rte_mbuf *next_seg;
2163
2164         for (i = 0; i < nb_segs; i++) {
2165                 next_seg = m->next;
2166                 rte_pktmbuf_free_seg(m);
2167                 m = next_seg;
2168         }
2169 }
2170
2171 static void __attribute__((cold))
2172 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2173 {
2174         unsigned i;
2175
2176 #ifdef RTE_IXGBE_INC_VECTOR
2177         /* SSE Vector driver has a different way of releasing mbufs. */
2178         if (rxq->rx_using_sse) {
2179                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2180                 return;
2181         }
2182 #endif
2183
2184         if (rxq->sw_ring != NULL) {
2185                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2186                         if (rxq->sw_ring[i].mbuf != NULL) {
2187                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2188                                 rxq->sw_ring[i].mbuf = NULL;
2189                         }
2190                 }
2191                 if (rxq->rx_nb_avail) {
2192                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2193                                 struct rte_mbuf *mb;
2194                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2195                                 rte_pktmbuf_free_seg(mb);
2196                         }
2197                         rxq->rx_nb_avail = 0;
2198                 }
2199         }
2200
2201         if (rxq->sw_sc_ring)
2202                 for (i = 0; i < rxq->nb_rx_desc; i++)
2203                         if (rxq->sw_sc_ring[i].fbuf) {
2204                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2205                                 rxq->sw_sc_ring[i].fbuf = NULL;
2206                         }
2207 }
2208
2209 static void __attribute__((cold))
2210 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2211 {
2212         if (rxq != NULL) {
2213                 ixgbe_rx_queue_release_mbufs(rxq);
2214                 rte_free(rxq->sw_ring);
2215                 rte_free(rxq->sw_sc_ring);
2216                 rte_free(rxq);
2217         }
2218 }
2219
2220 void __attribute__((cold))
2221 ixgbe_dev_rx_queue_release(void *rxq)
2222 {
2223         ixgbe_rx_queue_release(rxq);
2224 }
2225
2226 /*
2227  * Check if Rx Burst Bulk Alloc function can be used.
2228  * Return
2229  *        0: the preconditions are satisfied and the bulk allocation function
2230  *           can be used.
2231  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2232  *           function must be used.
2233  */
2234 static inline int __attribute__((cold))
2235 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2236 {
2237         int ret = 0;
2238
2239         /*
2240          * Make sure the following pre-conditions are satisfied:
2241          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2242          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2243          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2244          *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2245          * Scattered packets are not supported.  This should be checked
2246          * outside of this function.
2247          */
2248         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2249                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2250                              "rxq->rx_free_thresh=%d, "
2251                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2252                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2253                 ret = -EINVAL;
2254         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2255                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2256                              "rxq->rx_free_thresh=%d, "
2257                              "rxq->nb_rx_desc=%d",
2258                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2259                 ret = -EINVAL;
2260         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2261                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2262                              "rxq->nb_rx_desc=%d, "
2263                              "rxq->rx_free_thresh=%d",
2264                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2265                 ret = -EINVAL;
2266         } else if (!(rxq->nb_rx_desc <
2267                (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2268                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2269                              "rxq->nb_rx_desc=%d, "
2270                              "IXGBE_MAX_RING_DESC=%d, "
2271                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2272                              rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2273                              RTE_PMD_IXGBE_RX_MAX_BURST);
2274                 ret = -EINVAL;
2275         }
2276
2277         return ret;
2278 }
2279
2280 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2281 static void __attribute__((cold))
2282 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2283 {
2284         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2285         unsigned i;
2286         uint16_t len = rxq->nb_rx_desc;
2287
2288         /*
2289          * By default, the Rx queue setup function allocates enough memory for
2290          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2291          * extra memory at the end of the descriptor ring to be zero'd out. A
2292          * pre-condition for using the Rx burst bulk alloc function is that the
2293          * number of descriptors is less than or equal to
2294          * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2295          * constraints here to see if we need to zero out memory after the end
2296          * of the H/W descriptor ring.
2297          */
2298         if (adapter->rx_bulk_alloc_allowed)
2299                 /* zero out extra memory */
2300                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2301
2302         /*
2303          * Zero out HW ring memory. Zero out extra memory at the end of
2304          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2305          * reads extra memory as zeros.
2306          */
2307         for (i = 0; i < len; i++) {
2308                 rxq->rx_ring[i] = zeroed_desc;
2309         }
2310
2311         /*
2312          * initialize extra software ring entries. Space for these extra
2313          * entries is always allocated
2314          */
2315         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2316         for (i = rxq->nb_rx_desc; i < len; ++i) {
2317                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2318         }
2319
2320         rxq->rx_nb_avail = 0;
2321         rxq->rx_next_avail = 0;
2322         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2323         rxq->rx_tail = 0;
2324         rxq->nb_rx_hold = 0;
2325         rxq->pkt_first_seg = NULL;
2326         rxq->pkt_last_seg = NULL;
2327
2328 #ifdef RTE_IXGBE_INC_VECTOR
2329         rxq->rxrearm_start = 0;
2330         rxq->rxrearm_nb = 0;
2331 #endif
2332 }
2333
2334 int __attribute__((cold))
2335 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2336                          uint16_t queue_idx,
2337                          uint16_t nb_desc,
2338                          unsigned int socket_id,
2339                          const struct rte_eth_rxconf *rx_conf,
2340                          struct rte_mempool *mp)
2341 {
2342         const struct rte_memzone *rz;
2343         struct ixgbe_rx_queue *rxq;
2344         struct ixgbe_hw     *hw;
2345         uint16_t len;
2346         struct ixgbe_adapter *adapter =
2347                 (struct ixgbe_adapter *)dev->data->dev_private;
2348
2349         PMD_INIT_FUNC_TRACE();
2350         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2351
2352         /*
2353          * Validate number of receive descriptors.
2354          * It must not exceed hardware maximum, and must be multiple
2355          * of IXGBE_ALIGN.
2356          */
2357         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2358                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2359                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2360                 return (-EINVAL);
2361         }
2362
2363         /* Free memory prior to re-allocation if needed... */
2364         if (dev->data->rx_queues[queue_idx] != NULL) {
2365                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2366                 dev->data->rx_queues[queue_idx] = NULL;
2367         }
2368
2369         /* First allocate the rx queue data structure */
2370         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2371                                  RTE_CACHE_LINE_SIZE, socket_id);
2372         if (rxq == NULL)
2373                 return (-ENOMEM);
2374         rxq->mb_pool = mp;
2375         rxq->nb_rx_desc = nb_desc;
2376         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2377         rxq->queue_id = queue_idx;
2378         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2379                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2380         rxq->port_id = dev->data->port_id;
2381         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2382                                                         0 : ETHER_CRC_LEN);
2383         rxq->drop_en = rx_conf->rx_drop_en;
2384         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2385
2386         /*
2387          * Allocate RX ring hardware descriptors. A memzone large enough to
2388          * handle the maximum ring size is allocated in order to allow for
2389          * resizing in later calls to the queue setup function.
2390          */
2391         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2392                                    RX_RING_SZ, socket_id);
2393         if (rz == NULL) {
2394                 ixgbe_rx_queue_release(rxq);
2395                 return (-ENOMEM);
2396         }
2397
2398         /*
2399          * Zero init all the descriptors in the ring.
2400          */
2401         memset (rz->addr, 0, RX_RING_SZ);
2402
2403         /*
2404          * Modified to setup VFRDT for Virtual Function
2405          */
2406         if (hw->mac.type == ixgbe_mac_82599_vf ||
2407             hw->mac.type == ixgbe_mac_X540_vf ||
2408             hw->mac.type == ixgbe_mac_X550_vf ||
2409             hw->mac.type == ixgbe_mac_X550EM_x_vf) {
2410                 rxq->rdt_reg_addr =
2411                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2412                 rxq->rdh_reg_addr =
2413                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2414         }
2415         else {
2416                 rxq->rdt_reg_addr =
2417                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2418                 rxq->rdh_reg_addr =
2419                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2420         }
2421 #ifndef RTE_LIBRTE_XEN_DOM0
2422         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2423 #else
2424         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2425 #endif
2426         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2427
2428         /*
2429          * Certain constraints must be met in order to use the bulk buffer
2430          * allocation Rx burst function. If any of Rx queues doesn't meet them
2431          * the feature should be disabled for the whole port.
2432          */
2433         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2434                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2435                                     "preconditions - canceling the feature for "
2436                                     "the whole port[%d]",
2437                              rxq->queue_id, rxq->port_id);
2438                 adapter->rx_bulk_alloc_allowed = false;
2439         }
2440
2441         /*
2442          * Allocate software ring. Allow for space at the end of the
2443          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2444          * function does not access an invalid memory region.
2445          */
2446         len = nb_desc;
2447         if (adapter->rx_bulk_alloc_allowed)
2448                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2449
2450         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2451                                           sizeof(struct ixgbe_rx_entry) * len,
2452                                           RTE_CACHE_LINE_SIZE, socket_id);
2453         if (!rxq->sw_ring) {
2454                 ixgbe_rx_queue_release(rxq);
2455                 return (-ENOMEM);
2456         }
2457
2458         /*
2459          * Always allocate even if it's not going to be needed in order to
2460          * simplify the code.
2461          *
2462          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2463          * be requested in ixgbe_dev_rx_init(), which is called later from
2464          * dev_start() flow.
2465          */
2466         rxq->sw_sc_ring =
2467                 rte_zmalloc_socket("rxq->sw_sc_ring",
2468                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
2469                                    RTE_CACHE_LINE_SIZE, socket_id);
2470         if (!rxq->sw_sc_ring) {
2471                 ixgbe_rx_queue_release(rxq);
2472                 return (-ENOMEM);
2473         }
2474
2475         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2476                             "dma_addr=0x%"PRIx64,
2477                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2478                      rxq->rx_ring_phys_addr);
2479
2480         if (!rte_is_power_of_2(nb_desc)) {
2481                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2482                                     "preconditions - canceling the feature for "
2483                                     "the whole port[%d]",
2484                              rxq->queue_id, rxq->port_id);
2485                 adapter->rx_vec_allowed = false;
2486         } else
2487                 ixgbe_rxq_vec_setup(rxq);
2488
2489         dev->data->rx_queues[queue_idx] = rxq;
2490
2491         ixgbe_reset_rx_queue(adapter, rxq);
2492
2493         return 0;
2494 }
2495
2496 uint32_t
2497 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2498 {
2499 #define IXGBE_RXQ_SCAN_INTERVAL 4
2500         volatile union ixgbe_adv_rx_desc *rxdp;
2501         struct ixgbe_rx_queue *rxq;
2502         uint32_t desc = 0;
2503
2504         if (rx_queue_id >= dev->data->nb_rx_queues) {
2505                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2506                 return 0;
2507         }
2508
2509         rxq = dev->data->rx_queues[rx_queue_id];
2510         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2511
2512         while ((desc < rxq->nb_rx_desc) &&
2513                 (rxdp->wb.upper.status_error &
2514                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2515                 desc += IXGBE_RXQ_SCAN_INTERVAL;
2516                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2517                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2518                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2519                                 desc - rxq->nb_rx_desc]);
2520         }
2521
2522         return desc;
2523 }
2524
2525 int
2526 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2527 {
2528         volatile union ixgbe_adv_rx_desc *rxdp;
2529         struct ixgbe_rx_queue *rxq = rx_queue;
2530         uint32_t desc;
2531
2532         if (unlikely(offset >= rxq->nb_rx_desc))
2533                 return 0;
2534         desc = rxq->rx_tail + offset;
2535         if (desc >= rxq->nb_rx_desc)
2536                 desc -= rxq->nb_rx_desc;
2537
2538         rxdp = &rxq->rx_ring[desc];
2539         return !!(rxdp->wb.upper.status_error &
2540                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2541 }
2542
2543 void __attribute__((cold))
2544 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2545 {
2546         unsigned i;
2547         struct ixgbe_adapter *adapter =
2548                 (struct ixgbe_adapter *)dev->data->dev_private;
2549
2550         PMD_INIT_FUNC_TRACE();
2551
2552         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2553                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2554                 if (txq != NULL) {
2555                         txq->ops->release_mbufs(txq);
2556                         txq->ops->reset(txq);
2557                 }
2558         }
2559
2560         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2561                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2562                 if (rxq != NULL) {
2563                         ixgbe_rx_queue_release_mbufs(rxq);
2564                         ixgbe_reset_rx_queue(adapter, rxq);
2565                 }
2566         }
2567 }
2568
2569 void
2570 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2571 {
2572         unsigned i;
2573
2574         PMD_INIT_FUNC_TRACE();
2575
2576         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2577                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2578                 dev->data->rx_queues[i] = NULL;
2579         }
2580         dev->data->nb_rx_queues = 0;
2581
2582         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2583                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2584                 dev->data->tx_queues[i] = NULL;
2585         }
2586         dev->data->nb_tx_queues = 0;
2587 }
2588
2589 /*********************************************************************
2590  *
2591  *  Device RX/TX init functions
2592  *
2593  **********************************************************************/
2594
2595 /**
2596  * Receive Side Scaling (RSS)
2597  * See section 7.1.2.8 in the following document:
2598  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2599  *
2600  * Principles:
2601  * The source and destination IP addresses of the IP header and the source
2602  * and destination ports of TCP/UDP headers, if any, of received packets are
2603  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2604  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2605  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2606  * RSS output index which is used as the RX queue index where to store the
2607  * received packets.
2608  * The following output is supplied in the RX write-back descriptor:
2609  *     - 32-bit result of the Microsoft RSS hash function,
2610  *     - 4-bit RSS type field.
2611  */
2612
2613 /*
2614  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2615  * Used as the default key.
2616  */
2617 static uint8_t rss_intel_key[40] = {
2618         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2619         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2620         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2621         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2622         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2623 };
2624
2625 static void
2626 ixgbe_rss_disable(struct rte_eth_dev *dev)
2627 {
2628         struct ixgbe_hw *hw;
2629         uint32_t mrqc;
2630         uint32_t mrqc_reg;
2631
2632         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2633         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2634         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2635         mrqc &= ~IXGBE_MRQC_RSSEN;
2636         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2637 }
2638
2639 static void
2640 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2641 {
2642         uint8_t  *hash_key;
2643         uint32_t mrqc;
2644         uint32_t rss_key;
2645         uint64_t rss_hf;
2646         uint16_t i;
2647         uint32_t mrqc_reg;
2648         uint32_t rssrk_reg;
2649
2650         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2651         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2652
2653         hash_key = rss_conf->rss_key;
2654         if (hash_key != NULL) {
2655                 /* Fill in RSS hash key */
2656                 for (i = 0; i < 10; i++) {
2657                         rss_key  = hash_key[(i * 4)];
2658                         rss_key |= hash_key[(i * 4) + 1] << 8;
2659                         rss_key |= hash_key[(i * 4) + 2] << 16;
2660                         rss_key |= hash_key[(i * 4) + 3] << 24;
2661                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2662                 }
2663         }
2664
2665         /* Set configured hashing protocols in MRQC register */
2666         rss_hf = rss_conf->rss_hf;
2667         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2668         if (rss_hf & ETH_RSS_IPV4)
2669                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2670         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2671                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2672         if (rss_hf & ETH_RSS_IPV6)
2673                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2674         if (rss_hf & ETH_RSS_IPV6_EX)
2675                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2676         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2677                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2678         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2679                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2680         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2681                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2682         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2683                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2684         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2685                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2686         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2687 }
2688
2689 int
2690 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2691                           struct rte_eth_rss_conf *rss_conf)
2692 {
2693         struct ixgbe_hw *hw;
2694         uint32_t mrqc;
2695         uint64_t rss_hf;
2696         uint32_t mrqc_reg;
2697
2698         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2699
2700         if (!ixgbe_rss_update_sp(hw->mac.type)) {
2701                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2702                         "NIC.");
2703                 return -ENOTSUP;
2704         }
2705         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2706
2707         /*
2708          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2709          *     "RSS enabling cannot be done dynamically while it must be
2710          *      preceded by a software reset"
2711          * Before changing anything, first check that the update RSS operation
2712          * does not attempt to disable RSS, if RSS was enabled at
2713          * initialization time, or does not attempt to enable RSS, if RSS was
2714          * disabled at initialization time.
2715          */
2716         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2717         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2718         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2719                 if (rss_hf != 0) /* Enable RSS */
2720                         return -(EINVAL);
2721                 return 0; /* Nothing to do */
2722         }
2723         /* RSS enabled */
2724         if (rss_hf == 0) /* Disable RSS */
2725                 return -(EINVAL);
2726         ixgbe_hw_rss_hash_set(hw, rss_conf);
2727         return 0;
2728 }
2729
2730 int
2731 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2732                             struct rte_eth_rss_conf *rss_conf)
2733 {
2734         struct ixgbe_hw *hw;
2735         uint8_t *hash_key;
2736         uint32_t mrqc;
2737         uint32_t rss_key;
2738         uint64_t rss_hf;
2739         uint16_t i;
2740         uint32_t mrqc_reg;
2741         uint32_t rssrk_reg;
2742
2743         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2744         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2745         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2746         hash_key = rss_conf->rss_key;
2747         if (hash_key != NULL) {
2748                 /* Return RSS hash key */
2749                 for (i = 0; i < 10; i++) {
2750                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2751                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2752                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2753                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2754                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2755                 }
2756         }
2757
2758         /* Get RSS functions configured in MRQC register */
2759         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2760         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2761                 rss_conf->rss_hf = 0;
2762                 return 0;
2763         }
2764         rss_hf = 0;
2765         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2766                 rss_hf |= ETH_RSS_IPV4;
2767         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2768                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2769         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2770                 rss_hf |= ETH_RSS_IPV6;
2771         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2772                 rss_hf |= ETH_RSS_IPV6_EX;
2773         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2774                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2775         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2776                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2777         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2778                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2779         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2780                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2781         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2782                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2783         rss_conf->rss_hf = rss_hf;
2784         return 0;
2785 }
2786
2787 static void
2788 ixgbe_rss_configure(struct rte_eth_dev *dev)
2789 {
2790         struct rte_eth_rss_conf rss_conf;
2791         struct ixgbe_hw *hw;
2792         uint32_t reta;
2793         uint16_t i;
2794         uint16_t j;
2795         uint16_t sp_reta_size;
2796         uint32_t reta_reg;
2797
2798         PMD_INIT_FUNC_TRACE();
2799         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2800
2801         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2802
2803         /*
2804          * Fill in redirection table
2805          * The byte-swap is needed because NIC registers are in
2806          * little-endian order.
2807          */
2808         reta = 0;
2809         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2810                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2811
2812                 if (j == dev->data->nb_rx_queues)
2813                         j = 0;
2814                 reta = (reta << 8) | j;
2815                 if ((i & 3) == 3)
2816                         IXGBE_WRITE_REG(hw, reta_reg,
2817                                         rte_bswap32(reta));
2818         }
2819
2820         /*
2821          * Configure the RSS key and the RSS protocols used to compute
2822          * the RSS hash of input packets.
2823          */
2824         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2825         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2826                 ixgbe_rss_disable(dev);
2827                 return;
2828         }
2829         if (rss_conf.rss_key == NULL)
2830                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2831         ixgbe_hw_rss_hash_set(hw, &rss_conf);
2832 }
2833
2834 #define NUM_VFTA_REGISTERS 128
2835 #define NIC_RX_BUFFER_SIZE 0x200
2836 #define X550_RX_BUFFER_SIZE 0x180
2837
2838 static void
2839 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2840 {
2841         struct rte_eth_vmdq_dcb_conf *cfg;
2842         struct ixgbe_hw *hw;
2843         enum rte_eth_nb_pools num_pools;
2844         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2845         uint16_t pbsize;
2846         uint8_t nb_tcs; /* number of traffic classes */
2847         int i;
2848
2849         PMD_INIT_FUNC_TRACE();
2850         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2851         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2852         num_pools = cfg->nb_queue_pools;
2853         /* Check we have a valid number of pools */
2854         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2855                 ixgbe_rss_disable(dev);
2856                 return;
2857         }
2858         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2859         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2860
2861         /*
2862          * RXPBSIZE
2863          * split rx buffer up into sections, each for 1 traffic class
2864          */
2865         switch (hw->mac.type) {
2866         case ixgbe_mac_X550:
2867         case ixgbe_mac_X550EM_x:
2868                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2869                 break;
2870         default:
2871                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2872                 break;
2873         }
2874         for (i = 0 ; i < nb_tcs; i++) {
2875                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2876                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2877                 /* clear 10 bits. */
2878                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2879                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2880         }
2881         /* zero alloc all unused TCs */
2882         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2883                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2884                 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2885                 /* clear 10 bits. */
2886                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2887         }
2888
2889         /* MRQC: enable vmdq and dcb */
2890         mrqc = ((num_pools == ETH_16_POOLS) ? \
2891                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2892         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2893
2894         /* PFVTCTL: turn on virtualisation and set the default pool */
2895         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2896         if (cfg->enable_default_pool) {
2897                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2898         } else {
2899                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2900         }
2901
2902         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2903
2904         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2905         queue_mapping = 0;
2906         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2907                 /*
2908                  * mapping is done with 3 bits per priority,
2909                  * so shift by i*3 each time
2910                  */
2911                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2912
2913         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2914
2915         /* RTRPCS: DCB related */
2916         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2917
2918         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2919         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2920         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2921         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2922
2923         /* VFTA - enable all vlan filters */
2924         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2925                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2926         }
2927
2928         /* VFRE: pool enabling for receive - 16 or 32 */
2929         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2930                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2931
2932         /*
2933          * MPSAR - allow pools to read specific mac addresses
2934          * In this case, all pools should be able to read from mac addr 0
2935          */
2936         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2937         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2938
2939         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2940         for (i = 0; i < cfg->nb_pool_maps; i++) {
2941                 /* set vlan id in VF register and set the valid bit */
2942                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2943                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
2944                 /*
2945                  * Put the allowed pools in VFB reg. As we only have 16 or 32
2946                  * pools, we only need to use the first half of the register
2947                  * i.e. bits 0-31
2948                  */
2949                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2950         }
2951 }
2952
2953 /**
2954  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2955  * @hw: pointer to hardware structure
2956  * @dcb_config: pointer to ixgbe_dcb_config structure
2957  */
2958 static void
2959 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2960                struct ixgbe_dcb_config *dcb_config)
2961 {
2962         uint32_t reg;
2963         uint32_t q;
2964
2965         PMD_INIT_FUNC_TRACE();
2966         if (hw->mac.type != ixgbe_mac_82598EB) {
2967                 /* Disable the Tx desc arbiter so that MTQC can be changed */
2968                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2969                 reg |= IXGBE_RTTDCS_ARBDIS;
2970                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2971
2972                 /* Enable DCB for Tx with 8 TCs */
2973                 if (dcb_config->num_tcs.pg_tcs == 8) {
2974                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2975                 }
2976                 else {
2977                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2978                 }
2979                 if (dcb_config->vt_mode)
2980                     reg |= IXGBE_MTQC_VT_ENA;
2981                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2982
2983                 /* Disable drop for all queues */
2984                 for (q = 0; q < 128; q++)
2985                         IXGBE_WRITE_REG(hw, IXGBE_QDE,
2986                      (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2987
2988                 /* Enable the Tx desc arbiter */
2989                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2990                 reg &= ~IXGBE_RTTDCS_ARBDIS;
2991                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2992
2993                 /* Enable Security TX Buffer IFG for DCB */
2994                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2995                 reg |= IXGBE_SECTX_DCB;
2996                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2997         }
2998         return;
2999 }
3000
3001 /**
3002  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3003  * @dev: pointer to rte_eth_dev structure
3004  * @dcb_config: pointer to ixgbe_dcb_config structure
3005  */
3006 static void
3007 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3008                         struct ixgbe_dcb_config *dcb_config)
3009 {
3010         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3011                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3012         struct ixgbe_hw *hw =
3013                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3014
3015         PMD_INIT_FUNC_TRACE();
3016         if (hw->mac.type != ixgbe_mac_82598EB)
3017                 /*PF VF Transmit Enable*/
3018                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3019                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3020
3021         /*Configure general DCB TX parameters*/
3022         ixgbe_dcb_tx_hw_config(hw,dcb_config);
3023         return;
3024 }
3025
3026 static void
3027 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3028                         struct ixgbe_dcb_config *dcb_config)
3029 {
3030         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3031                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3032         struct ixgbe_dcb_tc_config *tc;
3033         uint8_t i,j;
3034
3035         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3036         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3037                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3038                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3039         }
3040         else {
3041                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3042                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3043         }
3044         /* User Priority to Traffic Class mapping */
3045         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3046                 j = vmdq_rx_conf->dcb_tc[i];
3047                 tc = &dcb_config->tc_config[j];
3048                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3049                                                 (uint8_t)(1 << j);
3050         }
3051 }
3052
3053 static void
3054 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3055                         struct ixgbe_dcb_config *dcb_config)
3056 {
3057         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3058                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3059         struct ixgbe_dcb_tc_config *tc;
3060         uint8_t i,j;
3061
3062         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3063         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3064                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3065                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3066         }
3067         else {
3068                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3069                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3070         }
3071
3072         /* User Priority to Traffic Class mapping */
3073         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3074                 j = vmdq_tx_conf->dcb_tc[i];
3075                 tc = &dcb_config->tc_config[j];
3076                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3077                                                 (uint8_t)(1 << j);
3078         }
3079         return;
3080 }
3081
3082 static void
3083 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3084                 struct ixgbe_dcb_config *dcb_config)
3085 {
3086         struct rte_eth_dcb_rx_conf *rx_conf =
3087                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3088         struct ixgbe_dcb_tc_config *tc;
3089         uint8_t i,j;
3090
3091         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3092         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3093
3094         /* User Priority to Traffic Class mapping */
3095         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3096                 j = rx_conf->dcb_tc[i];
3097                 tc = &dcb_config->tc_config[j];
3098                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3099                                                 (uint8_t)(1 << j);
3100         }
3101 }
3102
3103 static void
3104 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3105                 struct ixgbe_dcb_config *dcb_config)
3106 {
3107         struct rte_eth_dcb_tx_conf *tx_conf =
3108                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3109         struct ixgbe_dcb_tc_config *tc;
3110         uint8_t i,j;
3111
3112         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3113         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3114
3115         /* User Priority to Traffic Class mapping */
3116         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3117                 j = tx_conf->dcb_tc[i];
3118                 tc = &dcb_config->tc_config[j];
3119                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3120                                                 (uint8_t)(1 << j);
3121         }
3122 }
3123
3124 /**
3125  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3126  * @hw: pointer to hardware structure
3127  * @dcb_config: pointer to ixgbe_dcb_config structure
3128  */
3129 static void
3130 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3131                struct ixgbe_dcb_config *dcb_config)
3132 {
3133         uint32_t reg;
3134         uint32_t vlanctrl;
3135         uint8_t i;
3136
3137         PMD_INIT_FUNC_TRACE();
3138         /*
3139          * Disable the arbiter before changing parameters
3140          * (always enable recycle mode; WSP)
3141          */
3142         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3143         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3144
3145         if (hw->mac.type != ixgbe_mac_82598EB) {
3146                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3147                 if (dcb_config->num_tcs.pg_tcs == 4) {
3148                         if (dcb_config->vt_mode)
3149                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3150                                         IXGBE_MRQC_VMDQRT4TCEN;
3151                         else {
3152                                 /* no matter the mode is DCB or DCB_RSS, just
3153                                  * set the MRQE to RSSXTCEN. RSS is controlled
3154                                  * by RSS_FIELD
3155                                  */
3156                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3157                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3158                                         IXGBE_MRQC_RTRSS4TCEN;
3159                         }
3160                 }
3161                 if (dcb_config->num_tcs.pg_tcs == 8) {
3162                         if (dcb_config->vt_mode)
3163                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3164                                         IXGBE_MRQC_VMDQRT8TCEN;
3165                         else {
3166                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3167                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3168                                         IXGBE_MRQC_RTRSS8TCEN;
3169                         }
3170                 }
3171
3172                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3173         }
3174
3175         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3176         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3177         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3178         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3179
3180         /* VFTA - enable all vlan filters */
3181         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3182                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3183         }
3184
3185         /*
3186          * Configure Rx packet plane (recycle mode; WSP) and
3187          * enable arbiter
3188          */
3189         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3190         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3191
3192         return;
3193 }
3194
3195 static void
3196 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3197                         uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3198 {
3199         switch (hw->mac.type) {
3200         case ixgbe_mac_82598EB:
3201                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3202                 break;
3203         case ixgbe_mac_82599EB:
3204         case ixgbe_mac_X540:
3205         case ixgbe_mac_X550:
3206         case ixgbe_mac_X550EM_x:
3207                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3208                                                   tsa, map);
3209                 break;
3210         default:
3211                 break;
3212         }
3213 }
3214
3215 static void
3216 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3217                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3218 {
3219         switch (hw->mac.type) {
3220         case ixgbe_mac_82598EB:
3221                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3222                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3223                 break;
3224         case ixgbe_mac_82599EB:
3225         case ixgbe_mac_X540:
3226         case ixgbe_mac_X550:
3227         case ixgbe_mac_X550EM_x:
3228                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3229                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3230                 break;
3231         default:
3232                 break;
3233         }
3234 }
3235
3236 #define DCB_RX_CONFIG  1
3237 #define DCB_TX_CONFIG  1
3238 #define DCB_TX_PB      1024
3239 /**
3240  * ixgbe_dcb_hw_configure - Enable DCB and configure
3241  * general DCB in VT mode and non-VT mode parameters
3242  * @dev: pointer to rte_eth_dev structure
3243  * @dcb_config: pointer to ixgbe_dcb_config structure
3244  */
3245 static int
3246 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3247                         struct ixgbe_dcb_config *dcb_config)
3248 {
3249         int     ret = 0;
3250         uint8_t i,pfc_en,nb_tcs;
3251         uint16_t pbsize, rx_buffer_size;
3252         uint8_t config_dcb_rx = 0;
3253         uint8_t config_dcb_tx = 0;
3254         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3255         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3256         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3257         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3258         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3259         struct ixgbe_dcb_tc_config *tc;
3260         uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3261         struct ixgbe_hw *hw =
3262                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3263
3264         switch(dev->data->dev_conf.rxmode.mq_mode){
3265         case ETH_MQ_RX_VMDQ_DCB:
3266                 dcb_config->vt_mode = true;
3267                 if (hw->mac.type != ixgbe_mac_82598EB) {
3268                         config_dcb_rx = DCB_RX_CONFIG;
3269                         /*
3270                          *get dcb and VT rx configuration parameters
3271                          *from rte_eth_conf
3272                          */
3273                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3274                         /*Configure general VMDQ and DCB RX parameters*/
3275                         ixgbe_vmdq_dcb_configure(dev);
3276                 }
3277                 break;
3278         case ETH_MQ_RX_DCB:
3279         case ETH_MQ_RX_DCB_RSS:
3280                 dcb_config->vt_mode = false;
3281                 config_dcb_rx = DCB_RX_CONFIG;
3282                 /* Get dcb TX configuration parameters from rte_eth_conf */
3283                 ixgbe_dcb_rx_config(dev, dcb_config);
3284                 /*Configure general DCB RX parameters*/
3285                 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3286                 break;
3287         default:
3288                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3289                 break;
3290         }
3291         switch (dev->data->dev_conf.txmode.mq_mode) {
3292         case ETH_MQ_TX_VMDQ_DCB:
3293                 dcb_config->vt_mode = true;
3294                 config_dcb_tx = DCB_TX_CONFIG;
3295                 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3296                 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3297                 /*Configure general VMDQ and DCB TX parameters*/
3298                 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3299                 break;
3300
3301         case ETH_MQ_TX_DCB:
3302                 dcb_config->vt_mode = false;
3303                 config_dcb_tx = DCB_TX_CONFIG;
3304                 /*get DCB TX configuration parameters from rte_eth_conf*/
3305                 ixgbe_dcb_tx_config(dev, dcb_config);
3306                 /*Configure general DCB TX parameters*/
3307                 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3308                 break;
3309         default:
3310                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3311                 break;
3312         }
3313
3314         nb_tcs = dcb_config->num_tcs.pfc_tcs;
3315         /* Unpack map */
3316         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3317         if(nb_tcs == ETH_4_TCS) {
3318                 /* Avoid un-configured priority mapping to TC0 */
3319                 uint8_t j = 4;
3320                 uint8_t mask = 0xFF;
3321                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3322                         mask = (uint8_t)(mask & (~ (1 << map[i])));
3323                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3324                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3325                                 map[j++] = i;
3326                         mask >>= 1;
3327                 }
3328                 /* Re-configure 4 TCs BW */
3329                 for (i = 0; i < nb_tcs; i++) {
3330                         tc = &dcb_config->tc_config[i];
3331                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3332                                                 (uint8_t)(100 / nb_tcs);
3333                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3334                                                 (uint8_t)(100 / nb_tcs);
3335                 }
3336                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3337                         tc = &dcb_config->tc_config[i];
3338                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3339                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3340                 }
3341         }
3342
3343         switch (hw->mac.type) {
3344         case ixgbe_mac_X550:
3345         case ixgbe_mac_X550EM_x:
3346                 rx_buffer_size = X550_RX_BUFFER_SIZE;
3347                 break;
3348         default:
3349                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3350                 break;
3351         }
3352
3353         if(config_dcb_rx) {
3354                 /* Set RX buffer size */
3355                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3356                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3357                 for (i = 0 ; i < nb_tcs; i++) {
3358                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3359                 }
3360                 /* zero alloc all unused TCs */
3361                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3362                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3363                 }
3364         }
3365         if(config_dcb_tx) {
3366                 /* Only support an equally distributed Tx packet buffer strategy. */
3367                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3368                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3369                 for (i = 0; i < nb_tcs; i++) {
3370                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3371                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3372                 }
3373                 /* Clear unused TCs, if any, to zero buffer size*/
3374                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3375                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3376                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3377                 }
3378         }
3379
3380         /*Calculates traffic class credits*/
3381         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3382                                 IXGBE_DCB_TX_CONFIG);
3383         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3384                                 IXGBE_DCB_RX_CONFIG);
3385
3386         if(config_dcb_rx) {
3387                 /* Unpack CEE standard containers */
3388                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3389                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3390                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3391                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3392                 /* Configure PG(ETS) RX */
3393                 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3394         }
3395
3396         if(config_dcb_tx) {
3397                 /* Unpack CEE standard containers */
3398                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3399                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3400                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3401                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3402                 /* Configure PG(ETS) TX */
3403                 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3404         }
3405
3406         /*Configure queue statistics registers*/
3407         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3408
3409         /* Check if the PFC is supported */
3410         if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3411                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3412                 for (i = 0; i < nb_tcs; i++) {
3413                         /*
3414                         * If the TC count is 8,and the default high_water is 48,
3415                         * the low_water is 16 as default.
3416                         */
3417                         hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3418                         hw->fc.low_water[i] = pbsize / 4;
3419                         /* Enable pfc for this TC */
3420                         tc = &dcb_config->tc_config[i];
3421                         tc->pfc = ixgbe_dcb_pfc_enabled;
3422                 }
3423                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3424                 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3425                         pfc_en &= 0x0F;
3426                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3427         }
3428
3429         return ret;
3430 }
3431
3432 /**
3433  * ixgbe_configure_dcb - Configure DCB  Hardware
3434  * @dev: pointer to rte_eth_dev
3435  */
3436 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3437 {
3438         struct ixgbe_dcb_config *dcb_cfg =
3439                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3440         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3441
3442         PMD_INIT_FUNC_TRACE();
3443
3444         /* check support mq_mode for DCB */
3445         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3446             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3447             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3448                 return;
3449
3450         if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3451                 return;
3452
3453         /** Configure DCB hardware **/
3454         ixgbe_dcb_hw_configure(dev, dcb_cfg);
3455
3456         return;
3457 }
3458
3459 /*
3460  * VMDq only support for 10 GbE NIC.
3461  */
3462 static void
3463 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3464 {
3465         struct rte_eth_vmdq_rx_conf *cfg;
3466         struct ixgbe_hw *hw;
3467         enum rte_eth_nb_pools num_pools;
3468         uint32_t mrqc, vt_ctl, vlanctrl;
3469         uint32_t vmolr = 0;
3470         int i;
3471
3472         PMD_INIT_FUNC_TRACE();
3473         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3474         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3475         num_pools = cfg->nb_queue_pools;
3476
3477         ixgbe_rss_disable(dev);
3478
3479         /* MRQC: enable vmdq */
3480         mrqc = IXGBE_MRQC_VMDQEN;
3481         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3482
3483         /* PFVTCTL: turn on virtualisation and set the default pool */
3484         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3485         if (cfg->enable_default_pool)
3486                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3487         else
3488                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3489
3490         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3491
3492         for (i = 0; i < (int)num_pools; i++) {
3493                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3494                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3495         }
3496
3497         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3498         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3499         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3500         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3501
3502         /* VFTA - enable all vlan filters */
3503         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3504                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3505
3506         /* VFRE: pool enabling for receive - 64 */
3507         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3508         if (num_pools == ETH_64_POOLS)
3509                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3510
3511         /*
3512          * MPSAR - allow pools to read specific mac addresses
3513          * In this case, all pools should be able to read from mac addr 0
3514          */
3515         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3516         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3517
3518         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3519         for (i = 0; i < cfg->nb_pool_maps; i++) {
3520                 /* set vlan id in VF register and set the valid bit */
3521                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3522                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3523                 /*
3524                  * Put the allowed pools in VFB reg. As we only have 16 or 64
3525                  * pools, we only need to use the first half of the register
3526                  * i.e. bits 0-31
3527                  */
3528                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3529                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3530                                         (cfg->pool_map[i].pools & UINT32_MAX));
3531                 else
3532                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3533                                         ((cfg->pool_map[i].pools >> 32) \
3534                                         & UINT32_MAX));
3535
3536         }
3537
3538         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3539         if (cfg->enable_loop_back) {
3540                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3541                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3542                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3543         }
3544
3545         IXGBE_WRITE_FLUSH(hw);
3546 }
3547
3548 /*
3549  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3550  * @hw: pointer to hardware structure
3551  */
3552 static void
3553 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3554 {
3555         uint32_t reg;
3556         uint32_t q;
3557
3558         PMD_INIT_FUNC_TRACE();
3559         /*PF VF Transmit Enable*/
3560         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3561         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3562
3563         /* Disable the Tx desc arbiter so that MTQC can be changed */
3564         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3565         reg |= IXGBE_RTTDCS_ARBDIS;
3566         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3567
3568         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3569         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3570
3571         /* Disable drop for all queues */
3572         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3573                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3574                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3575
3576         /* Enable the Tx desc arbiter */
3577         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3578         reg &= ~IXGBE_RTTDCS_ARBDIS;
3579         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3580
3581         IXGBE_WRITE_FLUSH(hw);
3582
3583         return;
3584 }
3585
3586 static int __attribute__((cold))
3587 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3588 {
3589         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3590         uint64_t dma_addr;
3591         unsigned i;
3592
3593         /* Initialize software ring entries */
3594         for (i = 0; i < rxq->nb_rx_desc; i++) {
3595                 volatile union ixgbe_adv_rx_desc *rxd;
3596                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3597                 if (mbuf == NULL) {
3598                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3599                                      (unsigned) rxq->queue_id);
3600                         return (-ENOMEM);
3601                 }
3602
3603                 rte_mbuf_refcnt_set(mbuf, 1);
3604                 mbuf->next = NULL;
3605                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3606                 mbuf->nb_segs = 1;
3607                 mbuf->port = rxq->port_id;
3608
3609                 dma_addr =
3610                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3611                 rxd = &rxq->rx_ring[i];
3612                 rxd->read.hdr_addr = 0;
3613                 rxd->read.pkt_addr = dma_addr;
3614                 rxe[i].mbuf = mbuf;
3615         }
3616
3617         return 0;
3618 }
3619
3620 static int
3621 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3622 {
3623         struct ixgbe_hw *hw;
3624         uint32_t mrqc;
3625
3626         ixgbe_rss_configure(dev);
3627
3628         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3629
3630         /* MRQC: enable VF RSS */
3631         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3632         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3633         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3634         case ETH_64_POOLS:
3635                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3636                 break;
3637
3638         case ETH_32_POOLS:
3639                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3640                 break;
3641
3642         default:
3643                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3644                 return -EINVAL;
3645         }
3646
3647         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3648
3649         return 0;
3650 }
3651
3652 static int
3653 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3654 {
3655         struct ixgbe_hw *hw =
3656                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3657
3658         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3659         case ETH_64_POOLS:
3660                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3661                         IXGBE_MRQC_VMDQEN);
3662                 break;
3663
3664         case ETH_32_POOLS:
3665                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3666                         IXGBE_MRQC_VMDQRT4TCEN);
3667                 break;
3668
3669         case ETH_16_POOLS:
3670                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3671                         IXGBE_MRQC_VMDQRT8TCEN);
3672                 break;
3673         default:
3674                 PMD_INIT_LOG(ERR,
3675                         "invalid pool number in IOV mode");
3676                 break;
3677         }
3678         return 0;
3679 }
3680
3681 static int
3682 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3683 {
3684         struct ixgbe_hw *hw =
3685                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3686
3687         if (hw->mac.type == ixgbe_mac_82598EB)
3688                 return 0;
3689
3690         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3691                 /*
3692                  * SRIOV inactive scheme
3693                  * any DCB/RSS w/o VMDq multi-queue setting
3694                  */
3695                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3696                 case ETH_MQ_RX_RSS:
3697                 case ETH_MQ_RX_DCB_RSS:
3698                 case ETH_MQ_RX_VMDQ_RSS:
3699                         ixgbe_rss_configure(dev);
3700                         break;
3701
3702                 case ETH_MQ_RX_VMDQ_DCB:
3703                         ixgbe_vmdq_dcb_configure(dev);
3704                         break;
3705
3706                 case ETH_MQ_RX_VMDQ_ONLY:
3707                         ixgbe_vmdq_rx_hw_configure(dev);
3708                         break;
3709
3710                 case ETH_MQ_RX_NONE:
3711                 default:
3712                         /* if mq_mode is none, disable rss mode.*/
3713                         ixgbe_rss_disable(dev);
3714                         break;
3715                 }
3716         } else {
3717                 /*
3718                  * SRIOV active scheme
3719                  * Support RSS together with VMDq & SRIOV
3720                  */
3721                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3722                 case ETH_MQ_RX_RSS:
3723                 case ETH_MQ_RX_VMDQ_RSS:
3724                         ixgbe_config_vf_rss(dev);
3725                         break;
3726
3727                 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3728                 case ETH_MQ_RX_VMDQ_DCB:
3729                 case ETH_MQ_RX_VMDQ_DCB_RSS:
3730                         PMD_INIT_LOG(ERR,
3731                                 "Could not support DCB with VMDq & SRIOV");
3732                         return -1;
3733                 default:
3734                         ixgbe_config_vf_default(dev);
3735                         break;
3736                 }
3737         }
3738
3739         return 0;
3740 }
3741
3742 static int
3743 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3744 {
3745         struct ixgbe_hw *hw =
3746                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3747         uint32_t mtqc;
3748         uint32_t rttdcs;
3749
3750         if (hw->mac.type == ixgbe_mac_82598EB)
3751                 return 0;
3752
3753         /* disable arbiter before setting MTQC */
3754         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3755         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3756         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3757
3758         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3759                 /*
3760                  * SRIOV inactive scheme
3761                  * any DCB w/o VMDq multi-queue setting
3762                  */
3763                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3764                         ixgbe_vmdq_tx_hw_configure(hw);
3765                 else {
3766                         mtqc = IXGBE_MTQC_64Q_1PB;
3767                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3768                 }
3769         } else {
3770                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3771
3772                 /*
3773                  * SRIOV active scheme
3774                  * FIXME if support DCB together with VMDq & SRIOV
3775                  */
3776                 case ETH_64_POOLS:
3777                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3778                         break;
3779                 case ETH_32_POOLS:
3780                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3781                         break;
3782                 case ETH_16_POOLS:
3783                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3784                                 IXGBE_MTQC_8TC_8TQ;
3785                         break;
3786                 default:
3787                         mtqc = IXGBE_MTQC_64Q_1PB;
3788                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3789                 }
3790                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3791         }
3792
3793         /* re-enable arbiter */
3794         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3795         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3796
3797         return 0;
3798 }
3799
3800 /**
3801  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3802  *
3803  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3804  * spec rev. 3.0 chapter 8.2.3.8.13.
3805  *
3806  * @pool Memory pool of the Rx queue
3807  */
3808 static inline uint32_t
3809 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3810 {
3811         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3812
3813         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3814         uint16_t maxdesc =
3815                 IPV4_MAX_PKT_LEN /
3816                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3817
3818         if (maxdesc >= 16)
3819                 return IXGBE_RSCCTL_MAXDESC_16;
3820         else if (maxdesc >= 8)
3821                 return IXGBE_RSCCTL_MAXDESC_8;
3822         else if (maxdesc >= 4)
3823                 return IXGBE_RSCCTL_MAXDESC_4;
3824         else
3825                 return IXGBE_RSCCTL_MAXDESC_1;
3826 }
3827
3828 /**
3829  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3830  * interrupt
3831  *
3832  * (Taken from FreeBSD tree)
3833  * (yes this is all very magic and confusing :)
3834  *
3835  * @dev port handle
3836  * @entry the register array entry
3837  * @vector the MSIX vector for this queue
3838  * @type RX/TX/MISC
3839  */
3840 static void
3841 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3842 {
3843         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3844         u32 ivar, index;
3845
3846         vector |= IXGBE_IVAR_ALLOC_VAL;
3847
3848         switch (hw->mac.type) {
3849
3850         case ixgbe_mac_82598EB:
3851                 if (type == -1)
3852                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3853                 else
3854                         entry += (type * 64);
3855                 index = (entry >> 2) & 0x1F;
3856                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3857                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3858                 ivar |= (vector << (8 * (entry & 0x3)));
3859                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3860                 break;
3861
3862         case ixgbe_mac_82599EB:
3863         case ixgbe_mac_X540:
3864                 if (type == -1) { /* MISC IVAR */
3865                         index = (entry & 1) * 8;
3866                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3867                         ivar &= ~(0xFF << index);
3868                         ivar |= (vector << index);
3869                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3870                 } else {        /* RX/TX IVARS */
3871                         index = (16 * (entry & 1)) + (8 * type);
3872                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3873                         ivar &= ~(0xFF << index);
3874                         ivar |= (vector << index);
3875                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3876                 }
3877
3878                 break;
3879
3880         default:
3881                 break;
3882         }
3883 }
3884
3885 void __attribute__((cold))
3886 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3887 {
3888         uint16_t i, rx_using_sse;
3889         struct ixgbe_adapter *adapter =
3890                 (struct ixgbe_adapter *)dev->data->dev_private;
3891
3892         /*
3893          * In order to allow Vector Rx there are a few configuration
3894          * conditions to be met and Rx Bulk Allocation should be allowed.
3895          */
3896         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3897             !adapter->rx_bulk_alloc_allowed) {
3898                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3899                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
3900                                     "not enabled",
3901                              dev->data->port_id);
3902
3903                 adapter->rx_vec_allowed = false;
3904         }
3905
3906         /*
3907          * Initialize the appropriate LRO callback.
3908          *
3909          * If all queues satisfy the bulk allocation preconditions
3910          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3911          * Otherwise use a single allocation version.
3912          */
3913         if (dev->data->lro) {
3914                 if (adapter->rx_bulk_alloc_allowed) {
3915                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3916                                            "allocation version");
3917                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3918                 } else {
3919                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3920                                            "allocation version");
3921                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3922                 }
3923         } else if (dev->data->scattered_rx) {
3924                 /*
3925                  * Set the non-LRO scattered callback: there are Vector and
3926                  * single allocation versions.
3927                  */
3928                 if (adapter->rx_vec_allowed) {
3929                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3930                                             "callback (port=%d).",
3931                                      dev->data->port_id);
3932
3933                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3934                 } else if (adapter->rx_bulk_alloc_allowed) {
3935                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3936                                            "allocation callback (port=%d).",
3937                                      dev->data->port_id);
3938                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3939                 } else {
3940                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3941                                             "single allocation) "
3942                                             "Scattered Rx callback "
3943                                             "(port=%d).",
3944                                      dev->data->port_id);
3945
3946                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3947                 }
3948         /*
3949          * Below we set "simple" callbacks according to port/queues parameters.
3950          * If parameters allow we are going to choose between the following
3951          * callbacks:
3952          *    - Vector
3953          *    - Bulk Allocation
3954          *    - Single buffer allocation (the simplest one)
3955          */
3956         } else if (adapter->rx_vec_allowed) {
3957                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
3958                                     "burst size no less than %d (port=%d).",
3959                              RTE_IXGBE_DESCS_PER_LOOP,
3960                              dev->data->port_id);
3961
3962                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
3963         } else if (adapter->rx_bulk_alloc_allowed) {
3964                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
3965                                     "satisfied. Rx Burst Bulk Alloc function "
3966                                     "will be used on port=%d.",
3967                              dev->data->port_id);
3968
3969                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
3970         } else {
3971                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
3972                                     "satisfied, or Scattered Rx is requested "
3973                                     "(port=%d).",
3974                              dev->data->port_id);
3975
3976                 dev->rx_pkt_burst = ixgbe_recv_pkts;
3977         }
3978
3979         /* Propagate information about RX function choice through all queues. */
3980
3981         rx_using_sse =
3982                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
3983                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
3984
3985         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3986                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3987                 rxq->rx_using_sse = rx_using_sse;
3988         }
3989 }
3990
3991 /**
3992  * ixgbe_set_rsc - configure RSC related port HW registers
3993  *
3994  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
3995  * of 82599 Spec (x540 configuration is virtually the same).
3996  *
3997  * @dev port handle
3998  *
3999  * Returns 0 in case of success or a non-zero error code
4000  */
4001 static int
4002 ixgbe_set_rsc(struct rte_eth_dev *dev)
4003 {
4004         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4005         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4006         struct rte_eth_dev_info dev_info = { 0 };
4007         bool rsc_capable = false;
4008         uint16_t i;
4009         uint32_t rdrxctl;
4010
4011         /* Sanity check */
4012         dev->dev_ops->dev_infos_get(dev, &dev_info);
4013         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4014                 rsc_capable = true;
4015
4016         if (!rsc_capable && rx_conf->enable_lro) {
4017                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4018                                    "support it");
4019                 return -EINVAL;
4020         }
4021
4022         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4023
4024         if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4025                 /*
4026                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4027                  * 3.0 RSC configuration requires HW CRC stripping being
4028                  * enabled. If user requested both HW CRC stripping off
4029                  * and RSC on - return an error.
4030                  */
4031                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4032                                     "is disabled");
4033                 return -EINVAL;
4034         }
4035
4036         /* RFCTL configuration  */
4037         if (rsc_capable) {
4038                 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4039                 if (rx_conf->enable_lro)
4040                         /*
4041                          * Since NFS packets coalescing is not supported - clear
4042                          * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4043                          * enabled.
4044                          */
4045                         rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4046                                    IXGBE_RFCTL_NFSR_DIS);
4047                 else
4048                         rfctl |= IXGBE_RFCTL_RSC_DIS;
4049
4050                 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4051         }
4052
4053         /* If LRO hasn't been requested - we are done here. */
4054         if (!rx_conf->enable_lro)
4055                 return 0;
4056
4057         /* Set RDRXCTL.RSCACKC bit */
4058         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4059         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4060         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4061
4062         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4063         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4064                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4065                 uint32_t srrctl =
4066                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4067                 uint32_t rscctl =
4068                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4069                 uint32_t psrtype =
4070                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4071                 uint32_t eitr =
4072                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4073
4074                 /*
4075                  * ixgbe PMD doesn't support header-split at the moment.
4076                  *
4077                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4078                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4079                  * should be configured even if header split is not
4080                  * enabled. We will configure it 128 bytes following the
4081                  * recommendation in the spec.
4082                  */
4083                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4084                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4085                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4086
4087                 /*
4088                  * TODO: Consider setting the Receive Descriptor Minimum
4089                  * Threshold Size for an RSC case. This is not an obviously
4090                  * beneficiary option but the one worth considering...
4091                  */
4092
4093                 rscctl |= IXGBE_RSCCTL_RSCEN;
4094                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4095                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4096
4097                 /*
4098                  * RSC: Set ITR interval corresponding to 2K ints/s.
4099                  *
4100                  * Full-sized RSC aggregations for a 10Gb/s link will
4101                  * arrive at about 20K aggregation/s rate.
4102                  *
4103                  * 2K inst/s rate will make only 10% of the
4104                  * aggregations to be closed due to the interrupt timer
4105                  * expiration for a streaming at wire-speed case.
4106                  *
4107                  * For a sparse streaming case this setting will yield
4108                  * at most 500us latency for a single RSC aggregation.
4109                  */
4110                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4111                 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4112
4113                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4114                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4115                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4116                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4117
4118                 /*
4119                  * RSC requires the mapping of the queue to the
4120                  * interrupt vector.
4121                  */
4122                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4123         }
4124
4125         dev->data->lro = 1;
4126
4127         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4128
4129         return 0;
4130 }
4131
4132 /*
4133  * Initializes Receive Unit.
4134  */
4135 int __attribute__((cold))
4136 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4137 {
4138         struct ixgbe_hw     *hw;
4139         struct ixgbe_rx_queue *rxq;
4140         uint64_t bus_addr;
4141         uint32_t rxctrl;
4142         uint32_t fctrl;
4143         uint32_t hlreg0;
4144         uint32_t maxfrs;
4145         uint32_t srrctl;
4146         uint32_t rdrxctl;
4147         uint32_t rxcsum;
4148         uint16_t buf_size;
4149         uint16_t i;
4150         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4151         int rc;
4152
4153         PMD_INIT_FUNC_TRACE();
4154         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4155
4156         /*
4157          * Make sure receives are disabled while setting
4158          * up the RX context (registers, descriptor rings, etc.).
4159          */
4160         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4161         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4162
4163         /* Enable receipt of broadcasted frames */
4164         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4165         fctrl |= IXGBE_FCTRL_BAM;
4166         fctrl |= IXGBE_FCTRL_DPF;
4167         fctrl |= IXGBE_FCTRL_PMCF;
4168         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4169
4170         /*
4171          * Configure CRC stripping, if any.
4172          */
4173         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4174         if (rx_conf->hw_strip_crc)
4175                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4176         else
4177                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4178
4179         /*
4180          * Configure jumbo frame support, if any.
4181          */
4182         if (rx_conf->jumbo_frame == 1) {
4183                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4184                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4185                 maxfrs &= 0x0000FFFF;
4186                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4187                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4188         } else
4189                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4190
4191         /*
4192          * If loopback mode is configured for 82599, set LPBK bit.
4193          */
4194         if (hw->mac.type == ixgbe_mac_82599EB &&
4195                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4196                 hlreg0 |= IXGBE_HLREG0_LPBK;
4197         else
4198                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4199
4200         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4201
4202         /* Setup RX queues */
4203         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4204                 rxq = dev->data->rx_queues[i];
4205
4206                 /*
4207                  * Reset crc_len in case it was changed after queue setup by a
4208                  * call to configure.
4209                  */
4210                 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4211
4212                 /* Setup the Base and Length of the Rx Descriptor Rings */
4213                 bus_addr = rxq->rx_ring_phys_addr;
4214                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4215                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4216                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4217                                 (uint32_t)(bus_addr >> 32));
4218                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4219                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4220                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4221                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4222
4223                 /* Configure the SRRCTL register */
4224 #ifdef RTE_HEADER_SPLIT_ENABLE
4225                 /*
4226                  * Configure Header Split
4227                  */
4228                 if (rx_conf->header_split) {
4229                         if (hw->mac.type == ixgbe_mac_82599EB) {
4230                                 /* Must setup the PSRTYPE register */
4231                                 uint32_t psrtype;
4232                                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4233                                         IXGBE_PSRTYPE_UDPHDR   |
4234                                         IXGBE_PSRTYPE_IPV4HDR  |
4235                                         IXGBE_PSRTYPE_IPV6HDR;
4236                                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4237                         }
4238                         srrctl = ((rx_conf->split_hdr_size <<
4239                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4240                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4241                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4242                 } else
4243 #endif
4244                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4245
4246                 /* Set if packets are dropped when no descriptors available */
4247                 if (rxq->drop_en)
4248                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4249
4250                 /*
4251                  * Configure the RX buffer size in the BSIZEPACKET field of
4252                  * the SRRCTL register of the queue.
4253                  * The value is in 1 KB resolution. Valid values can be from
4254                  * 1 KB to 16 KB.
4255                  */
4256                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4257                         RTE_PKTMBUF_HEADROOM);
4258                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4259                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4260
4261                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4262
4263                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4264                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4265
4266                 /* It adds dual VLAN length for supporting dual VLAN */
4267                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4268                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4269                         dev->data->scattered_rx = 1;
4270         }
4271
4272         if (rx_conf->enable_scatter)
4273                 dev->data->scattered_rx = 1;
4274
4275         /*
4276          * Device configured with multiple RX queues.
4277          */
4278         ixgbe_dev_mq_rx_configure(dev);
4279
4280         /*
4281          * Setup the Checksum Register.
4282          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4283          * Enable IP/L4 checkum computation by hardware if requested to do so.
4284          */
4285         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4286         rxcsum |= IXGBE_RXCSUM_PCSD;
4287         if (rx_conf->hw_ip_checksum)
4288                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4289         else
4290                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4291
4292         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4293
4294         if (hw->mac.type == ixgbe_mac_82599EB ||
4295             hw->mac.type == ixgbe_mac_X540) {
4296                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4297                 if (rx_conf->hw_strip_crc)
4298                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4299                 else
4300                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4301                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4302                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4303         }
4304
4305         rc = ixgbe_set_rsc(dev);
4306         if (rc)
4307                 return rc;
4308
4309         ixgbe_set_rx_function(dev);
4310
4311         return 0;
4312 }
4313
4314 /*
4315  * Initializes Transmit Unit.
4316  */
4317 void __attribute__((cold))
4318 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4319 {
4320         struct ixgbe_hw     *hw;
4321         struct ixgbe_tx_queue *txq;
4322         uint64_t bus_addr;
4323         uint32_t hlreg0;
4324         uint32_t txctrl;
4325         uint16_t i;
4326
4327         PMD_INIT_FUNC_TRACE();
4328         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4329
4330         /* Enable TX CRC (checksum offload requirement) and hw padding
4331          * (TSO requirement) */
4332         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4333         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4334         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4335
4336         /* Setup the Base and Length of the Tx Descriptor Rings */
4337         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4338                 txq = dev->data->tx_queues[i];
4339
4340                 bus_addr = txq->tx_ring_phys_addr;
4341                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4342                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4343                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4344                                 (uint32_t)(bus_addr >> 32));
4345                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4346                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4347                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4348                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4349                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4350
4351                 /*
4352                  * Disable Tx Head Writeback RO bit, since this hoses
4353                  * bookkeeping if things aren't delivered in order.
4354                  */
4355                 switch (hw->mac.type) {
4356                         case ixgbe_mac_82598EB:
4357                                 txctrl = IXGBE_READ_REG(hw,
4358                                                         IXGBE_DCA_TXCTRL(txq->reg_idx));
4359                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4360                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4361                                                 txctrl);
4362                                 break;
4363
4364                         case ixgbe_mac_82599EB:
4365                         case ixgbe_mac_X540:
4366                         case ixgbe_mac_X550:
4367                         case ixgbe_mac_X550EM_x:
4368                         default:
4369                                 txctrl = IXGBE_READ_REG(hw,
4370                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4371                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4372                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4373                                                 txctrl);
4374                                 break;
4375                 }
4376         }
4377
4378         /* Device configured with multiple TX queues. */
4379         ixgbe_dev_mq_tx_configure(dev);
4380 }
4381
4382 /*
4383  * Set up link for 82599 loopback mode Tx->Rx.
4384  */
4385 static inline void __attribute__((cold))
4386 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4387 {
4388         PMD_INIT_FUNC_TRACE();
4389
4390         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4391                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4392                                 IXGBE_SUCCESS) {
4393                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4394                         /* ignore error */
4395                         return;
4396                 }
4397         }
4398
4399         /* Restart link */
4400         IXGBE_WRITE_REG(hw,
4401                         IXGBE_AUTOC,
4402                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4403         ixgbe_reset_pipeline_82599(hw);
4404
4405         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4406         msec_delay(50);
4407 }
4408
4409
4410 /*
4411  * Start Transmit and Receive Units.
4412  */
4413 int __attribute__((cold))
4414 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4415 {
4416         struct ixgbe_hw     *hw;
4417         struct ixgbe_tx_queue *txq;
4418         struct ixgbe_rx_queue *rxq;
4419         uint32_t txdctl;
4420         uint32_t dmatxctl;
4421         uint32_t rxctrl;
4422         uint16_t i;
4423         int ret = 0;
4424
4425         PMD_INIT_FUNC_TRACE();
4426         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427
4428         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4429                 txq = dev->data->tx_queues[i];
4430                 /* Setup Transmit Threshold Registers */
4431                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4432                 txdctl |= txq->pthresh & 0x7F;
4433                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4434                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4435                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4436         }
4437
4438         if (hw->mac.type != ixgbe_mac_82598EB) {
4439                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4440                 dmatxctl |= IXGBE_DMATXCTL_TE;
4441                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4442         }
4443
4444         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4445                 txq = dev->data->tx_queues[i];
4446                 if (!txq->tx_deferred_start) {
4447                         ret = ixgbe_dev_tx_queue_start(dev, i);
4448                         if (ret < 0)
4449                                 return ret;
4450                 }
4451         }
4452
4453         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4454                 rxq = dev->data->rx_queues[i];
4455                 if (!rxq->rx_deferred_start) {
4456                         ret = ixgbe_dev_rx_queue_start(dev, i);
4457                         if (ret < 0)
4458                                 return ret;
4459                 }
4460         }
4461
4462         /* Enable Receive engine */
4463         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4464         if (hw->mac.type == ixgbe_mac_82598EB)
4465                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4466         rxctrl |= IXGBE_RXCTRL_RXEN;
4467         hw->mac.ops.enable_rx_dma(hw, rxctrl);
4468
4469         /* If loopback mode is enabled for 82599, set up the link accordingly */
4470         if (hw->mac.type == ixgbe_mac_82599EB &&
4471                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4472                 ixgbe_setup_loopback_link_82599(hw);
4473
4474         return 0;
4475 }
4476
4477 /*
4478  * Start Receive Units for specified queue.
4479  */
4480 int __attribute__((cold))
4481 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4482 {
4483         struct ixgbe_hw     *hw;
4484         struct ixgbe_rx_queue *rxq;
4485         uint32_t rxdctl;
4486         int poll_ms;
4487
4488         PMD_INIT_FUNC_TRACE();
4489         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4490
4491         if (rx_queue_id < dev->data->nb_rx_queues) {
4492                 rxq = dev->data->rx_queues[rx_queue_id];
4493
4494                 /* Allocate buffers for descriptor rings */
4495                 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4496                         PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4497                                      rx_queue_id);
4498                         return -1;
4499                 }
4500                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4501                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4502                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4503
4504                 /* Wait until RX Enable ready */
4505                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4506                 do {
4507                         rte_delay_ms(1);
4508                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4509                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4510                 if (!poll_ms)
4511                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4512                                      rx_queue_id);
4513                 rte_wmb();
4514                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4515                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4516                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4517         } else
4518                 return -1;
4519
4520         return 0;
4521 }
4522
4523 /*
4524  * Stop Receive Units for specified queue.
4525  */
4526 int __attribute__((cold))
4527 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4528 {
4529         struct ixgbe_hw     *hw;
4530         struct ixgbe_adapter *adapter =
4531                 (struct ixgbe_adapter *)dev->data->dev_private;
4532         struct ixgbe_rx_queue *rxq;
4533         uint32_t rxdctl;
4534         int poll_ms;
4535
4536         PMD_INIT_FUNC_TRACE();
4537         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4538
4539         if (rx_queue_id < dev->data->nb_rx_queues) {
4540                 rxq = dev->data->rx_queues[rx_queue_id];
4541
4542                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4543                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4544                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4545
4546                 /* Wait until RX Enable ready */
4547                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4548                 do {
4549                         rte_delay_ms(1);
4550                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4551                 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4552                 if (!poll_ms)
4553                         PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4554                                      rx_queue_id);
4555
4556                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4557
4558                 ixgbe_rx_queue_release_mbufs(rxq);
4559                 ixgbe_reset_rx_queue(adapter, rxq);
4560                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4561         } else
4562                 return -1;
4563
4564         return 0;
4565 }
4566
4567
4568 /*
4569  * Start Transmit Units for specified queue.
4570  */
4571 int __attribute__((cold))
4572 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4573 {
4574         struct ixgbe_hw     *hw;
4575         struct ixgbe_tx_queue *txq;
4576         uint32_t txdctl;
4577         int poll_ms;
4578
4579         PMD_INIT_FUNC_TRACE();
4580         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4581
4582         if (tx_queue_id < dev->data->nb_tx_queues) {
4583                 txq = dev->data->tx_queues[tx_queue_id];
4584                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4585                 txdctl |= IXGBE_TXDCTL_ENABLE;
4586                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4587
4588                 /* Wait until TX Enable ready */
4589                 if (hw->mac.type == ixgbe_mac_82599EB) {
4590                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4591                         do {
4592                                 rte_delay_ms(1);
4593                                 txdctl = IXGBE_READ_REG(hw,
4594                                         IXGBE_TXDCTL(txq->reg_idx));
4595                         } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4596                         if (!poll_ms)
4597                                 PMD_INIT_LOG(ERR, "Could not enable "
4598                                              "Tx Queue %d", tx_queue_id);
4599                 }
4600                 rte_wmb();
4601                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4602                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4603                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4604         } else
4605                 return -1;
4606
4607         return 0;
4608 }
4609
4610 /*
4611  * Stop Transmit Units for specified queue.
4612  */
4613 int __attribute__((cold))
4614 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4615 {
4616         struct ixgbe_hw     *hw;
4617         struct ixgbe_tx_queue *txq;
4618         uint32_t txdctl;
4619         uint32_t txtdh, txtdt;
4620         int poll_ms;
4621
4622         PMD_INIT_FUNC_TRACE();
4623         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4624
4625         if (tx_queue_id < dev->data->nb_tx_queues) {
4626                 txq = dev->data->tx_queues[tx_queue_id];
4627
4628                 /* Wait until TX queue is empty */
4629                 if (hw->mac.type == ixgbe_mac_82599EB) {
4630                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4631                         do {
4632                                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4633                                 txtdh = IXGBE_READ_REG(hw,
4634                                                 IXGBE_TDH(txq->reg_idx));
4635                                 txtdt = IXGBE_READ_REG(hw,
4636                                                 IXGBE_TDT(txq->reg_idx));
4637                         } while (--poll_ms && (txtdh != txtdt));
4638                         if (!poll_ms)
4639                                 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4640                                              "when stopping.", tx_queue_id);
4641                 }
4642
4643                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4644                 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4645                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4646
4647                 /* Wait until TX Enable ready */
4648                 if (hw->mac.type == ixgbe_mac_82599EB) {
4649                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4650                         do {
4651                                 rte_delay_ms(1);
4652                                 txdctl = IXGBE_READ_REG(hw,
4653                                                 IXGBE_TXDCTL(txq->reg_idx));
4654                         } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4655                         if (!poll_ms)
4656                                 PMD_INIT_LOG(ERR, "Could not disable "
4657                                              "Tx Queue %d", tx_queue_id);
4658                 }
4659
4660                 if (txq->ops != NULL) {
4661                         txq->ops->release_mbufs(txq);
4662                         txq->ops->reset(txq);
4663                 }
4664                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4665         } else
4666                 return -1;
4667
4668         return 0;
4669 }
4670
4671 void
4672 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4673         struct rte_eth_rxq_info *qinfo)
4674 {
4675         struct ixgbe_rx_queue *rxq;
4676
4677         rxq = dev->data->rx_queues[queue_id];
4678
4679         qinfo->mp = rxq->mb_pool;
4680         qinfo->scattered_rx = dev->data->scattered_rx;
4681         qinfo->nb_desc = rxq->nb_rx_desc;
4682
4683         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4684         qinfo->conf.rx_drop_en = rxq->drop_en;
4685         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4686 }
4687
4688 void
4689 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4690         struct rte_eth_txq_info *qinfo)
4691 {
4692         struct ixgbe_tx_queue *txq;
4693
4694         txq = dev->data->tx_queues[queue_id];
4695
4696         qinfo->nb_desc = txq->nb_tx_desc;
4697
4698         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4699         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4700         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4701
4702         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4703         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4704         qinfo->conf.txq_flags = txq->txq_flags;
4705         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4706 }
4707
4708 /*
4709  * [VF] Initializes Receive Unit.
4710  */
4711 int __attribute__((cold))
4712 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4713 {
4714         struct ixgbe_hw     *hw;
4715         struct ixgbe_rx_queue *rxq;
4716         uint64_t bus_addr;
4717         uint32_t srrctl, psrtype = 0;
4718         uint16_t buf_size;
4719         uint16_t i;
4720         int ret;
4721
4722         PMD_INIT_FUNC_TRACE();
4723         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4724
4725         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4726                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4727                         "it should be power of 2");
4728                 return -1;
4729         }
4730
4731         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4732                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4733                         "it should be equal to or less than %d",
4734                         hw->mac.max_rx_queues);
4735                 return -1;
4736         }
4737
4738         /*
4739          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4740          * disables the VF receipt of packets if the PF MTU is > 1500.
4741          * This is done to deal with 82599 limitations that imposes
4742          * the PF and all VFs to share the same MTU.
4743          * Then, the PF driver enables again the VF receipt of packet when
4744          * the VF driver issues a IXGBE_VF_SET_LPE request.
4745          * In the meantime, the VF device cannot be used, even if the VF driver
4746          * and the Guest VM network stack are ready to accept packets with a
4747          * size up to the PF MTU.
4748          * As a work-around to this PF behaviour, force the call to
4749          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4750          * VF packets received can work in all cases.
4751          */
4752         ixgbevf_rlpml_set_vf(hw,
4753                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4754
4755         /* Setup RX queues */
4756         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4757                 rxq = dev->data->rx_queues[i];
4758
4759                 /* Allocate buffers for descriptor rings */
4760                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4761                 if (ret)
4762                         return ret;
4763
4764                 /* Setup the Base and Length of the Rx Descriptor Rings */
4765                 bus_addr = rxq->rx_ring_phys_addr;
4766
4767                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4768                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4769                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4770                                 (uint32_t)(bus_addr >> 32));
4771                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4772                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4773                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4774                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4775
4776
4777                 /* Configure the SRRCTL register */
4778 #ifdef RTE_HEADER_SPLIT_ENABLE
4779                 /*
4780                  * Configure Header Split
4781                  */
4782                 if (dev->data->dev_conf.rxmode.header_split) {
4783                         srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4784                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4785                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4786                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4787                 } else
4788 #endif
4789                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4790
4791                 /* Set if packets are dropped when no descriptors available */
4792                 if (rxq->drop_en)
4793                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4794
4795                 /*
4796                  * Configure the RX buffer size in the BSIZEPACKET field of
4797                  * the SRRCTL register of the queue.
4798                  * The value is in 1 KB resolution. Valid values can be from
4799                  * 1 KB to 16 KB.
4800                  */
4801                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4802                         RTE_PKTMBUF_HEADROOM);
4803                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4804                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4805
4806                 /*
4807                  * VF modification to write virtual function SRRCTL register
4808                  */
4809                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4810
4811                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4812                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4813
4814                 if (dev->data->dev_conf.rxmode.enable_scatter ||
4815                     /* It adds dual VLAN length for supporting dual VLAN */
4816                     (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4817                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4818                         if (!dev->data->scattered_rx)
4819                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4820                         dev->data->scattered_rx = 1;
4821                 }
4822         }
4823
4824 #ifdef RTE_HEADER_SPLIT_ENABLE
4825         if (dev->data->dev_conf.rxmode.header_split)
4826                 /* Must setup the PSRTYPE register */
4827                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4828                         IXGBE_PSRTYPE_UDPHDR   |
4829                         IXGBE_PSRTYPE_IPV4HDR  |
4830                         IXGBE_PSRTYPE_IPV6HDR;
4831 #endif
4832
4833         /* Set RQPL for VF RSS according to max Rx queue */
4834         psrtype |= (dev->data->nb_rx_queues >> 1) <<
4835                 IXGBE_PSRTYPE_RQPL_SHIFT;
4836         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4837
4838         ixgbe_set_rx_function(dev);
4839
4840         return 0;
4841 }
4842
4843 /*
4844  * [VF] Initializes Transmit Unit.
4845  */
4846 void __attribute__((cold))
4847 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4848 {
4849         struct ixgbe_hw     *hw;
4850         struct ixgbe_tx_queue *txq;
4851         uint64_t bus_addr;
4852         uint32_t txctrl;
4853         uint16_t i;
4854
4855         PMD_INIT_FUNC_TRACE();
4856         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4857
4858         /* Setup the Base and Length of the Tx Descriptor Rings */
4859         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4860                 txq = dev->data->tx_queues[i];
4861                 bus_addr = txq->tx_ring_phys_addr;
4862                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4863                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4864                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4865                                 (uint32_t)(bus_addr >> 32));
4866                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4867                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4868                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4869                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4870                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4871
4872                 /*
4873                  * Disable Tx Head Writeback RO bit, since this hoses
4874                  * bookkeeping if things aren't delivered in order.
4875                  */
4876                 txctrl = IXGBE_READ_REG(hw,
4877                                 IXGBE_VFDCA_TXCTRL(i));
4878                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4879                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4880                                 txctrl);
4881         }
4882 }
4883
4884 /*
4885  * [VF] Start Transmit and Receive Units.
4886  */
4887 void __attribute__((cold))
4888 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4889 {
4890         struct ixgbe_hw     *hw;
4891         struct ixgbe_tx_queue *txq;
4892         struct ixgbe_rx_queue *rxq;
4893         uint32_t txdctl;
4894         uint32_t rxdctl;
4895         uint16_t i;
4896         int poll_ms;
4897
4898         PMD_INIT_FUNC_TRACE();
4899         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4900
4901         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4902                 txq = dev->data->tx_queues[i];
4903                 /* Setup Transmit Threshold Registers */
4904                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4905                 txdctl |= txq->pthresh & 0x7F;
4906                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4907                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4908                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4909         }
4910
4911         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4912
4913                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4914                 txdctl |= IXGBE_TXDCTL_ENABLE;
4915                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4916
4917                 poll_ms = 10;
4918                 /* Wait until TX Enable ready */
4919                 do {
4920                         rte_delay_ms(1);
4921                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4922                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4923                 if (!poll_ms)
4924                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4925         }
4926         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4927
4928                 rxq = dev->data->rx_queues[i];
4929
4930                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4931                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4932                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4933
4934                 /* Wait until RX Enable ready */
4935                 poll_ms = 10;
4936                 do {
4937                         rte_delay_ms(1);
4938                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4939                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4940                 if (!poll_ms)
4941                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4942                 rte_wmb();
4943                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4944
4945         }
4946 }
4947
4948 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4949 int __attribute__((weak))
4950 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4951 {
4952         return -1;
4953 }
4954
4955 uint16_t __attribute__((weak))
4956 ixgbe_recv_pkts_vec(
4957         void __rte_unused *rx_queue,
4958         struct rte_mbuf __rte_unused **rx_pkts,
4959         uint16_t __rte_unused nb_pkts)
4960 {
4961         return 0;
4962 }
4963
4964 uint16_t __attribute__((weak))
4965 ixgbe_recv_scattered_pkts_vec(
4966         void __rte_unused *rx_queue,
4967         struct rte_mbuf __rte_unused **rx_pkts,
4968         uint16_t __rte_unused nb_pkts)
4969 {
4970         return 0;
4971 }
4972
4973 int __attribute__((weak))
4974 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
4975 {
4976         return -1;
4977 }