ixgbe: get queue info and descriptor limits
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   Copyright 2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <sys/queue.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <stdarg.h>
43 #include <unistd.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
49 #include <rte_log.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
52 #include <rte_pci.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
68 #include <rte_udp.h>
69 #include <rte_tcp.h>
70 #include <rte_sctp.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
73 #include <rte_ip.h>
74
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
82
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK (                  \
85                 PKT_TX_VLAN_PKT |                \
86                 PKT_TX_IP_CKSUM |                \
87                 PKT_TX_L4_MASK |                 \
88                 PKT_TX_TCP_SEG)
89
90 static inline struct rte_mbuf *
91 rte_rxmbuf_alloc(struct rte_mempool *mp)
92 {
93         struct rte_mbuf *m;
94
95         m = __rte_mbuf_raw_alloc(mp);
96         __rte_mbuf_sanity_check_raw(m, 0);
97         return (m);
98 }
99
100
101 #if 1
102 #define RTE_PMD_USE_PREFETCH
103 #endif
104
105 #ifdef RTE_PMD_USE_PREFETCH
106 /*
107  * Prefetch a cache line into all cache levels.
108  */
109 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
110 #else
111 #define rte_ixgbe_prefetch(p)   do {} while(0)
112 #endif
113
114 /*********************************************************************
115  *
116  *  TX functions
117  *
118  **********************************************************************/
119
120 /*
121  * Check for descriptors with their DD bit set and free mbufs.
122  * Return the total number of buffers freed.
123  */
124 static inline int __attribute__((always_inline))
125 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
126 {
127         struct ixgbe_tx_entry *txep;
128         uint32_t status;
129         int i;
130
131         /* check DD bit on threshold descriptor */
132         status = txq->tx_ring[txq->tx_next_dd].wb.status;
133         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
134                 return 0;
135
136         /*
137          * first buffer to free from S/W ring is at index
138          * tx_next_dd - (tx_rs_thresh-1)
139          */
140         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
141
142         /* free buffers one at a time */
143         if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
144                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145                         txep->mbuf->next = NULL;
146                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
147                         txep->mbuf = NULL;
148                 }
149         } else {
150                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
151                         rte_pktmbuf_free_seg(txep->mbuf);
152                         txep->mbuf = NULL;
153                 }
154         }
155
156         /* buffers were freed, update counters */
157         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
158         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
159         if (txq->tx_next_dd >= txq->nb_tx_desc)
160                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
161
162         return txq->tx_rs_thresh;
163 }
164
165 /* Populate 4 descriptors with data from 4 mbufs */
166 static inline void
167 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
168 {
169         uint64_t buf_dma_addr;
170         uint32_t pkt_len;
171         int i;
172
173         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
174                 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
175                 pkt_len = (*pkts)->data_len;
176
177                 /* write data to descriptor */
178                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
179
180                 txdp->read.cmd_type_len =
181                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
182
183                 txdp->read.olinfo_status =
184                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
185
186                 rte_prefetch0(&(*pkts)->pool);
187         }
188 }
189
190 /* Populate 1 descriptor with data from 1 mbuf */
191 static inline void
192 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
193 {
194         uint64_t buf_dma_addr;
195         uint32_t pkt_len;
196
197         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
198         pkt_len = (*pkts)->data_len;
199
200         /* write data to descriptor */
201         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
202         txdp->read.cmd_type_len =
203                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
204         txdp->read.olinfo_status =
205                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
206         rte_prefetch0(&(*pkts)->pool);
207 }
208
209 /*
210  * Fill H/W descriptor ring with mbuf data.
211  * Copy mbuf pointers to the S/W ring.
212  */
213 static inline void
214 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
215                       uint16_t nb_pkts)
216 {
217         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
218         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
219         const int N_PER_LOOP = 4;
220         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
221         int mainpart, leftover;
222         int i, j;
223
224         /*
225          * Process most of the packets in chunks of N pkts.  Any
226          * leftover packets will get processed one at a time.
227          */
228         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
229         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
230         for (i = 0; i < mainpart; i += N_PER_LOOP) {
231                 /* Copy N mbuf pointers to the S/W ring */
232                 for (j = 0; j < N_PER_LOOP; ++j) {
233                         (txep + i + j)->mbuf = *(pkts + i + j);
234                 }
235                 tx4(txdp + i, pkts + i);
236         }
237
238         if (unlikely(leftover > 0)) {
239                 for (i = 0; i < leftover; ++i) {
240                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
241                         tx1(txdp + mainpart + i, pkts + mainpart + i);
242                 }
243         }
244 }
245
246 static inline uint16_t
247 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
248              uint16_t nb_pkts)
249 {
250         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
251         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
252         uint16_t n = 0;
253
254         /*
255          * Begin scanning the H/W ring for done descriptors when the
256          * number of available descriptors drops below tx_free_thresh.  For
257          * each done descriptor, free the associated buffer.
258          */
259         if (txq->nb_tx_free < txq->tx_free_thresh)
260                 ixgbe_tx_free_bufs(txq);
261
262         /* Only use descriptors that are available */
263         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
264         if (unlikely(nb_pkts == 0))
265                 return 0;
266
267         /* Use exactly nb_pkts descriptors */
268         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
269
270         /*
271          * At this point, we know there are enough descriptors in the
272          * ring to transmit all the packets.  This assumes that each
273          * mbuf contains a single segment, and that no new offloads
274          * are expected, which would require a new context descriptor.
275          */
276
277         /*
278          * See if we're going to wrap-around. If so, handle the top
279          * of the descriptor ring first, then do the bottom.  If not,
280          * the processing looks just like the "bottom" part anyway...
281          */
282         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
283                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
284                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
285
286                 /*
287                  * We know that the last descriptor in the ring will need to
288                  * have its RS bit set because tx_rs_thresh has to be
289                  * a divisor of the ring size
290                  */
291                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
292                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
293                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
294
295                 txq->tx_tail = 0;
296         }
297
298         /* Fill H/W descriptor ring with mbuf data */
299         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
300         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
301
302         /*
303          * Determine if RS bit should be set
304          * This is what we actually want:
305          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
306          * but instead of subtracting 1 and doing >=, we can just do
307          * greater than without subtracting.
308          */
309         if (txq->tx_tail > txq->tx_next_rs) {
310                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
311                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
312                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
313                                                 txq->tx_rs_thresh);
314                 if (txq->tx_next_rs >= txq->nb_tx_desc)
315                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
316         }
317
318         /*
319          * Check for wrap-around. This would only happen if we used
320          * up to the last descriptor in the ring, no more, no less.
321          */
322         if (txq->tx_tail >= txq->nb_tx_desc)
323                 txq->tx_tail = 0;
324
325         /* update tail pointer */
326         rte_wmb();
327         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
328
329         return nb_pkts;
330 }
331
332 uint16_t
333 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
334                        uint16_t nb_pkts)
335 {
336         uint16_t nb_tx;
337
338         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
339         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
340                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
341
342         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
343         nb_tx = 0;
344         while (nb_pkts) {
345                 uint16_t ret, n;
346                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
347                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
348                 nb_tx = (uint16_t)(nb_tx + ret);
349                 nb_pkts = (uint16_t)(nb_pkts - ret);
350                 if (ret < n)
351                         break;
352         }
353
354         return nb_tx;
355 }
356
357 static inline void
358 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
359                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
360                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
361 {
362         uint32_t type_tucmd_mlhl;
363         uint32_t mss_l4len_idx = 0;
364         uint32_t ctx_idx;
365         uint32_t vlan_macip_lens;
366         union ixgbe_tx_offload tx_offload_mask;
367
368         ctx_idx = txq->ctx_curr;
369         tx_offload_mask.data = 0;
370         type_tucmd_mlhl = 0;
371
372         /* Specify which HW CTX to upload. */
373         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
374
375         if (ol_flags & PKT_TX_VLAN_PKT) {
376                 tx_offload_mask.vlan_tci |= ~0;
377         }
378
379         /* check if TCP segmentation required for this packet */
380         if (ol_flags & PKT_TX_TCP_SEG) {
381                 /* implies IP cksum in IPv4 */
382                 if (ol_flags & PKT_TX_IP_CKSUM)
383                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
384                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
385                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
386                 else
387                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
388                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
389                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
390
391                 tx_offload_mask.l2_len |= ~0;
392                 tx_offload_mask.l3_len |= ~0;
393                 tx_offload_mask.l4_len |= ~0;
394                 tx_offload_mask.tso_segsz |= ~0;
395                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
396                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
397         } else { /* no TSO, check if hardware checksum is needed */
398                 if (ol_flags & PKT_TX_IP_CKSUM) {
399                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
400                         tx_offload_mask.l2_len |= ~0;
401                         tx_offload_mask.l3_len |= ~0;
402                 }
403
404                 switch (ol_flags & PKT_TX_L4_MASK) {
405                 case PKT_TX_UDP_CKSUM:
406                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
407                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
409                         tx_offload_mask.l2_len |= ~0;
410                         tx_offload_mask.l3_len |= ~0;
411                         break;
412                 case PKT_TX_TCP_CKSUM:
413                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
414                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
415                         mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
416                         tx_offload_mask.l2_len |= ~0;
417                         tx_offload_mask.l3_len |= ~0;
418                         tx_offload_mask.l4_len |= ~0;
419                         break;
420                 case PKT_TX_SCTP_CKSUM:
421                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
422                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
423                         mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
424                         tx_offload_mask.l2_len |= ~0;
425                         tx_offload_mask.l3_len |= ~0;
426                         break;
427                 default:
428                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
429                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
430                         break;
431                 }
432         }
433
434         txq->ctx_cache[ctx_idx].flags = ol_flags;
435         txq->ctx_cache[ctx_idx].tx_offload.data  =
436                 tx_offload_mask.data & tx_offload.data;
437         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
438
439         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
440         vlan_macip_lens = tx_offload.l3_len;
441         vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
442         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
443         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
444         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
445         ctx_txd->seqnum_seed     = 0;
446 }
447
448 /*
449  * Check which hardware context can be used. Use the existing match
450  * or create a new context descriptor.
451  */
452 static inline uint32_t
453 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
454                 union ixgbe_tx_offload tx_offload)
455 {
456         /* If match with the current used context */
457         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
458                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
459                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
460                         return txq->ctx_curr;
461         }
462
463         /* What if match with the next context  */
464         txq->ctx_curr ^= 1;
465         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
466                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
467                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
468                         return txq->ctx_curr;
469         }
470
471         /* Mismatch, use the previous context */
472         return (IXGBE_CTX_NUM);
473 }
474
475 static inline uint32_t
476 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
477 {
478         uint32_t tmp = 0;
479         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
480                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
481         if (ol_flags & PKT_TX_IP_CKSUM)
482                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
483         if (ol_flags & PKT_TX_TCP_SEG)
484                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
485         return tmp;
486 }
487
488 static inline uint32_t
489 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
490 {
491         uint32_t cmdtype = 0;
492         if (ol_flags & PKT_TX_VLAN_PKT)
493                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
494         if (ol_flags & PKT_TX_TCP_SEG)
495                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
496         return cmdtype;
497 }
498
499 /* Default RS bit threshold values */
500 #ifndef DEFAULT_TX_RS_THRESH
501 #define DEFAULT_TX_RS_THRESH   32
502 #endif
503 #ifndef DEFAULT_TX_FREE_THRESH
504 #define DEFAULT_TX_FREE_THRESH 32
505 #endif
506
507 /* Reset transmit descriptors after they have been used */
508 static inline int
509 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
510 {
511         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
512         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
513         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
514         uint16_t nb_tx_desc = txq->nb_tx_desc;
515         uint16_t desc_to_clean_to;
516         uint16_t nb_tx_to_clean;
517         uint32_t status;
518
519         /* Determine the last descriptor needing to be cleaned */
520         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
521         if (desc_to_clean_to >= nb_tx_desc)
522                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
523
524         /* Check to make sure the last descriptor to clean is done */
525         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
526         status = txr[desc_to_clean_to].wb.status;
527         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
528         {
529                 PMD_TX_FREE_LOG(DEBUG,
530                                 "TX descriptor %4u is not done"
531                                 "(port=%d queue=%d)",
532                                 desc_to_clean_to,
533                                 txq->port_id, txq->queue_id);
534                 /* Failed to clean any descriptors, better luck next time */
535                 return -(1);
536         }
537
538         /* Figure out how many descriptors will be cleaned */
539         if (last_desc_cleaned > desc_to_clean_to)
540                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
541                                                         desc_to_clean_to);
542         else
543                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
544                                                 last_desc_cleaned);
545
546         PMD_TX_FREE_LOG(DEBUG,
547                         "Cleaning %4u TX descriptors: %4u to %4u "
548                         "(port=%d queue=%d)",
549                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
550                         txq->port_id, txq->queue_id);
551
552         /*
553          * The last descriptor to clean is done, so that means all the
554          * descriptors from the last descriptor that was cleaned
555          * up to the last descriptor with the RS bit set
556          * are done. Only reset the threshold descriptor.
557          */
558         txr[desc_to_clean_to].wb.status = 0;
559
560         /* Update the txq to reflect the last descriptor that was cleaned */
561         txq->last_desc_cleaned = desc_to_clean_to;
562         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
563
564         /* No Error */
565         return (0);
566 }
567
568 uint16_t
569 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
570                 uint16_t nb_pkts)
571 {
572         struct ixgbe_tx_queue *txq;
573         struct ixgbe_tx_entry *sw_ring;
574         struct ixgbe_tx_entry *txe, *txn;
575         volatile union ixgbe_adv_tx_desc *txr;
576         volatile union ixgbe_adv_tx_desc *txd;
577         struct rte_mbuf     *tx_pkt;
578         struct rte_mbuf     *m_seg;
579         uint64_t buf_dma_addr;
580         uint32_t olinfo_status;
581         uint32_t cmd_type_len;
582         uint32_t pkt_len;
583         uint16_t slen;
584         uint64_t ol_flags;
585         uint16_t tx_id;
586         uint16_t tx_last;
587         uint16_t nb_tx;
588         uint16_t nb_used;
589         uint64_t tx_ol_req;
590         uint32_t ctx = 0;
591         uint32_t new_ctx;
592         union ixgbe_tx_offload tx_offload = {0};
593
594         txq = tx_queue;
595         sw_ring = txq->sw_ring;
596         txr     = txq->tx_ring;
597         tx_id   = txq->tx_tail;
598         txe = &sw_ring[tx_id];
599
600         /* Determine if the descriptor ring needs to be cleaned. */
601         if (txq->nb_tx_free < txq->tx_free_thresh)
602                 ixgbe_xmit_cleanup(txq);
603
604         rte_prefetch0(&txe->mbuf->pool);
605
606         /* TX loop */
607         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
608                 new_ctx = 0;
609                 tx_pkt = *tx_pkts++;
610                 pkt_len = tx_pkt->pkt_len;
611
612                 /*
613                  * Determine how many (if any) context descriptors
614                  * are needed for offload functionality.
615                  */
616                 ol_flags = tx_pkt->ol_flags;
617
618                 /* If hardware offload required */
619                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
620                 if (tx_ol_req) {
621                         tx_offload.l2_len = tx_pkt->l2_len;
622                         tx_offload.l3_len = tx_pkt->l3_len;
623                         tx_offload.l4_len = tx_pkt->l4_len;
624                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
625                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
626
627                         /* If new context need be built or reuse the exist ctx. */
628                         ctx = what_advctx_update(txq, tx_ol_req,
629                                 tx_offload);
630                         /* Only allocate context descriptor if required*/
631                         new_ctx = (ctx == IXGBE_CTX_NUM);
632                         ctx = txq->ctx_curr;
633                 }
634
635                 /*
636                  * Keep track of how many descriptors are used this loop
637                  * This will always be the number of segments + the number of
638                  * Context descriptors required to transmit the packet
639                  */
640                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
641
642                 /*
643                  * The number of descriptors that must be allocated for a
644                  * packet is the number of segments of that packet, plus 1
645                  * Context Descriptor for the hardware offload, if any.
646                  * Determine the last TX descriptor to allocate in the TX ring
647                  * for the packet, starting from the current position (tx_id)
648                  * in the ring.
649                  */
650                 tx_last = (uint16_t) (tx_id + nb_used - 1);
651
652                 /* Circular ring */
653                 if (tx_last >= txq->nb_tx_desc)
654                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
655
656                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
657                            " tx_first=%u tx_last=%u",
658                            (unsigned) txq->port_id,
659                            (unsigned) txq->queue_id,
660                            (unsigned) pkt_len,
661                            (unsigned) tx_id,
662                            (unsigned) tx_last);
663
664                 /*
665                  * Make sure there are enough TX descriptors available to
666                  * transmit the entire packet.
667                  * nb_used better be less than or equal to txq->tx_rs_thresh
668                  */
669                 if (nb_used > txq->nb_tx_free) {
670                         PMD_TX_FREE_LOG(DEBUG,
671                                         "Not enough free TX descriptors "
672                                         "nb_used=%4u nb_free=%4u "
673                                         "(port=%d queue=%d)",
674                                         nb_used, txq->nb_tx_free,
675                                         txq->port_id, txq->queue_id);
676
677                         if (ixgbe_xmit_cleanup(txq) != 0) {
678                                 /* Could not clean any descriptors */
679                                 if (nb_tx == 0)
680                                         return (0);
681                                 goto end_of_tx;
682                         }
683
684                         /* nb_used better be <= txq->tx_rs_thresh */
685                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
686                                 PMD_TX_FREE_LOG(DEBUG,
687                                         "The number of descriptors needed to "
688                                         "transmit the packet exceeds the "
689                                         "RS bit threshold. This will impact "
690                                         "performance."
691                                         "nb_used=%4u nb_free=%4u "
692                                         "tx_rs_thresh=%4u. "
693                                         "(port=%d queue=%d)",
694                                         nb_used, txq->nb_tx_free,
695                                         txq->tx_rs_thresh,
696                                         txq->port_id, txq->queue_id);
697                                 /*
698                                  * Loop here until there are enough TX
699                                  * descriptors or until the ring cannot be
700                                  * cleaned.
701                                  */
702                                 while (nb_used > txq->nb_tx_free) {
703                                         if (ixgbe_xmit_cleanup(txq) != 0) {
704                                                 /*
705                                                  * Could not clean any
706                                                  * descriptors
707                                                  */
708                                                 if (nb_tx == 0)
709                                                         return (0);
710                                                 goto end_of_tx;
711                                         }
712                                 }
713                         }
714                 }
715
716                 /*
717                  * By now there are enough free TX descriptors to transmit
718                  * the packet.
719                  */
720
721                 /*
722                  * Set common flags of all TX Data Descriptors.
723                  *
724                  * The following bits must be set in all Data Descriptors:
725                  *   - IXGBE_ADVTXD_DTYP_DATA
726                  *   - IXGBE_ADVTXD_DCMD_DEXT
727                  *
728                  * The following bits must be set in the first Data Descriptor
729                  * and are ignored in the other ones:
730                  *   - IXGBE_ADVTXD_DCMD_IFCS
731                  *   - IXGBE_ADVTXD_MAC_1588
732                  *   - IXGBE_ADVTXD_DCMD_VLE
733                  *
734                  * The following bits must only be set in the last Data
735                  * Descriptor:
736                  *   - IXGBE_TXD_CMD_EOP
737                  *
738                  * The following bits can be set in any Data Descriptor, but
739                  * are only set in the last Data Descriptor:
740                  *   - IXGBE_TXD_CMD_RS
741                  */
742                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
743                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
744
745 #ifdef RTE_LIBRTE_IEEE1588
746                 if (ol_flags & PKT_TX_IEEE1588_TMST)
747                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
748 #endif
749
750                 olinfo_status = 0;
751                 if (tx_ol_req) {
752
753                         if (ol_flags & PKT_TX_TCP_SEG) {
754                                 /* when TSO is on, paylen in descriptor is the
755                                  * not the packet len but the tcp payload len */
756                                 pkt_len -= (tx_offload.l2_len +
757                                         tx_offload.l3_len + tx_offload.l4_len);
758                         }
759
760                         /*
761                          * Setup the TX Advanced Context Descriptor if required
762                          */
763                         if (new_ctx) {
764                                 volatile struct ixgbe_adv_tx_context_desc *
765                                     ctx_txd;
766
767                                 ctx_txd = (volatile struct
768                                     ixgbe_adv_tx_context_desc *)
769                                     &txr[tx_id];
770
771                                 txn = &sw_ring[txe->next_id];
772                                 rte_prefetch0(&txn->mbuf->pool);
773
774                                 if (txe->mbuf != NULL) {
775                                         rte_pktmbuf_free_seg(txe->mbuf);
776                                         txe->mbuf = NULL;
777                                 }
778
779                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
780                                         tx_offload);
781
782                                 txe->last_id = tx_last;
783                                 tx_id = txe->next_id;
784                                 txe = txn;
785                         }
786
787                         /*
788                          * Setup the TX Advanced Data Descriptor,
789                          * This path will go through
790                          * whatever new/reuse the context descriptor
791                          */
792                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
793                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
794                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
795                 }
796
797                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
798
799                 m_seg = tx_pkt;
800                 do {
801                         txd = &txr[tx_id];
802                         txn = &sw_ring[txe->next_id];
803                         rte_prefetch0(&txn->mbuf->pool);
804
805                         if (txe->mbuf != NULL)
806                                 rte_pktmbuf_free_seg(txe->mbuf);
807                         txe->mbuf = m_seg;
808
809                         /*
810                          * Set up Transmit Data Descriptor.
811                          */
812                         slen = m_seg->data_len;
813                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
814                         txd->read.buffer_addr =
815                                 rte_cpu_to_le_64(buf_dma_addr);
816                         txd->read.cmd_type_len =
817                                 rte_cpu_to_le_32(cmd_type_len | slen);
818                         txd->read.olinfo_status =
819                                 rte_cpu_to_le_32(olinfo_status);
820                         txe->last_id = tx_last;
821                         tx_id = txe->next_id;
822                         txe = txn;
823                         m_seg = m_seg->next;
824                 } while (m_seg != NULL);
825
826                 /*
827                  * The last packet data descriptor needs End Of Packet (EOP)
828                  */
829                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
830                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
831                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
832
833                 /* Set RS bit only on threshold packets' last descriptor */
834                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
835                         PMD_TX_FREE_LOG(DEBUG,
836                                         "Setting RS bit on TXD id="
837                                         "%4u (port=%d queue=%d)",
838                                         tx_last, txq->port_id, txq->queue_id);
839
840                         cmd_type_len |= IXGBE_TXD_CMD_RS;
841
842                         /* Update txq RS bit counters */
843                         txq->nb_tx_used = 0;
844                 }
845                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
846         }
847 end_of_tx:
848         rte_wmb();
849
850         /*
851          * Set the Transmit Descriptor Tail (TDT)
852          */
853         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
854                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
855                    (unsigned) tx_id, (unsigned) nb_tx);
856         IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
857         txq->tx_tail = tx_id;
858
859         return (nb_tx);
860 }
861
862 /*********************************************************************
863  *
864  *  RX functions
865  *
866  **********************************************************************/
867 #define IXGBE_PACKET_TYPE_IPV4              0X01
868 #define IXGBE_PACKET_TYPE_IPV4_TCP          0X11
869 #define IXGBE_PACKET_TYPE_IPV4_UDP          0X21
870 #define IXGBE_PACKET_TYPE_IPV4_SCTP         0X41
871 #define IXGBE_PACKET_TYPE_IPV4_EXT          0X03
872 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP     0X43
873 #define IXGBE_PACKET_TYPE_IPV6              0X04
874 #define IXGBE_PACKET_TYPE_IPV6_TCP          0X14
875 #define IXGBE_PACKET_TYPE_IPV6_UDP          0X24
876 #define IXGBE_PACKET_TYPE_IPV6_EXT          0X0C
877 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP      0X1C
878 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP      0X2C
879 #define IXGBE_PACKET_TYPE_IPV4_IPV6         0X05
880 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP     0X15
881 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP     0X25
882 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
883 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
884 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
885 #define IXGBE_PACKET_TYPE_MAX               0X80
886 #define IXGBE_PACKET_TYPE_MASK              0X7F
887 #define IXGBE_PACKET_TYPE_SHIFT             0X04
888 static inline uint32_t
889 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
890 {
891         static const uint32_t
892                 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
893                 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
894                         RTE_PTYPE_L3_IPV4,
895                 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
896                         RTE_PTYPE_L3_IPV4_EXT,
897                 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
898                         RTE_PTYPE_L3_IPV6,
899                 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
900                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
901                         RTE_PTYPE_INNER_L3_IPV6,
902                 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
903                         RTE_PTYPE_L3_IPV6_EXT,
904                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
905                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
906                         RTE_PTYPE_INNER_L3_IPV6_EXT,
907                 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
908                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
909                 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
910                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
911                 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
912                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
913                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
914                 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
915                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
916                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
917                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
918                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
919                 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
920                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
921                 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
922                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
923                 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
924                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
925                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
926                 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
927                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
928                 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
929                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
930                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
931                 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
932                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
933                 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
934                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
935         };
936         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
937                 return RTE_PTYPE_UNKNOWN;
938
939         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
940                                 IXGBE_PACKET_TYPE_MASK;
941
942         return ptype_table[pkt_info];
943 }
944
945 static inline uint64_t
946 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
947 {
948         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
949                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
950                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
951                 PKT_RX_RSS_HASH, 0, 0, 0,
952                 0, 0, 0,  PKT_RX_FDIR,
953         };
954 #ifdef RTE_LIBRTE_IEEE1588
955         static uint64_t ip_pkt_etqf_map[8] = {
956                 0, 0, 0, PKT_RX_IEEE1588_PTP,
957                 0, 0, 0, 0,
958         };
959
960         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
961                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
962                                 ip_rss_types_map[pkt_info & 0XF];
963         else
964                 return ip_rss_types_map[pkt_info & 0XF];
965 #else
966         return ip_rss_types_map[pkt_info & 0XF];
967 #endif
968 }
969
970 static inline uint64_t
971 rx_desc_status_to_pkt_flags(uint32_t rx_status)
972 {
973         uint64_t pkt_flags;
974
975         /*
976          * Check if VLAN present only.
977          * Do not check whether L3/L4 rx checksum done by NIC or not,
978          * That can be found from rte_eth_rxmode.hw_ip_checksum flag
979          */
980         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
981
982 #ifdef RTE_LIBRTE_IEEE1588
983         if (rx_status & IXGBE_RXD_STAT_TMST)
984                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
985 #endif
986         return pkt_flags;
987 }
988
989 static inline uint64_t
990 rx_desc_error_to_pkt_flags(uint32_t rx_status)
991 {
992         /*
993          * Bit 31: IPE, IPv4 checksum error
994          * Bit 30: L4I, L4I integrity error
995          */
996         static uint64_t error_to_pkt_flags_map[4] = {
997                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
998                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
999         };
1000         return error_to_pkt_flags_map[(rx_status >>
1001                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1002 }
1003
1004 /*
1005  * LOOK_AHEAD defines how many desc statuses to check beyond the
1006  * current descriptor.
1007  * It must be a pound define for optimal performance.
1008  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1009  * function only works with LOOK_AHEAD=8.
1010  */
1011 #define LOOK_AHEAD 8
1012 #if (LOOK_AHEAD != 8)
1013 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1014 #endif
1015 static inline int
1016 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1017 {
1018         volatile union ixgbe_adv_rx_desc *rxdp;
1019         struct ixgbe_rx_entry *rxep;
1020         struct rte_mbuf *mb;
1021         uint16_t pkt_len;
1022         uint64_t pkt_flags;
1023         int nb_dd;
1024         uint32_t s[LOOK_AHEAD];
1025         uint16_t pkt_info[LOOK_AHEAD];
1026         int i, j, nb_rx = 0;
1027         uint32_t status;
1028
1029         /* get references to current descriptor and S/W ring entry */
1030         rxdp = &rxq->rx_ring[rxq->rx_tail];
1031         rxep = &rxq->sw_ring[rxq->rx_tail];
1032
1033         status = rxdp->wb.upper.status_error;
1034         /* check to make sure there is at least 1 packet to receive */
1035         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1036                 return 0;
1037
1038         /*
1039          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1040          * reference packets that are ready to be received.
1041          */
1042         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1043              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1044         {
1045                 /* Read desc statuses backwards to avoid race condition */
1046                 for (j = LOOK_AHEAD-1; j >= 0; --j)
1047                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1048
1049                 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1050                         pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1051                                                 hs_rss.pkt_info;
1052
1053                 /* Compute how many status bits were set */
1054                 nb_dd = 0;
1055                 for (j = 0; j < LOOK_AHEAD; ++j)
1056                         nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1057
1058                 nb_rx += nb_dd;
1059
1060                 /* Translate descriptor info to mbuf format */
1061                 for (j = 0; j < nb_dd; ++j) {
1062                         mb = rxep[j].mbuf;
1063                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1064                                   rxq->crc_len;
1065                         mb->data_len = pkt_len;
1066                         mb->pkt_len = pkt_len;
1067                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1068
1069                         /* convert descriptor fields to rte mbuf flags */
1070                         pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1071                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1072                         pkt_flags |=
1073                                 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1074                         mb->ol_flags = pkt_flags;
1075                         mb->packet_type =
1076                                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1077
1078                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1079                                 mb->hash.rss = rte_le_to_cpu_32(
1080                                     rxdp[j].wb.lower.hi_dword.rss);
1081                         else if (pkt_flags & PKT_RX_FDIR) {
1082                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1083                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1084                                     IXGBE_ATR_HASH_MASK;
1085                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1086                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1087                         }
1088                 }
1089
1090                 /* Move mbuf pointers from the S/W ring to the stage */
1091                 for (j = 0; j < LOOK_AHEAD; ++j) {
1092                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1093                 }
1094
1095                 /* stop if all requested packets could not be received */
1096                 if (nb_dd != LOOK_AHEAD)
1097                         break;
1098         }
1099
1100         /* clear software ring entries so we can cleanup correctly */
1101         for (i = 0; i < nb_rx; ++i) {
1102                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1103         }
1104
1105
1106         return nb_rx;
1107 }
1108
1109 static inline int
1110 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1111 {
1112         volatile union ixgbe_adv_rx_desc *rxdp;
1113         struct ixgbe_rx_entry *rxep;
1114         struct rte_mbuf *mb;
1115         uint16_t alloc_idx;
1116         __le64 dma_addr;
1117         int diag, i;
1118
1119         /* allocate buffers in bulk directly into the S/W ring */
1120         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1121         rxep = &rxq->sw_ring[alloc_idx];
1122         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1123                                     rxq->rx_free_thresh);
1124         if (unlikely(diag != 0))
1125                 return (-ENOMEM);
1126
1127         rxdp = &rxq->rx_ring[alloc_idx];
1128         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1129                 /* populate the static rte mbuf fields */
1130                 mb = rxep[i].mbuf;
1131                 if (reset_mbuf) {
1132                         mb->next = NULL;
1133                         mb->nb_segs = 1;
1134                         mb->port = rxq->port_id;
1135                 }
1136
1137                 rte_mbuf_refcnt_set(mb, 1);
1138                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1139
1140                 /* populate the descriptors */
1141                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1142                 rxdp[i].read.hdr_addr = 0;
1143                 rxdp[i].read.pkt_addr = dma_addr;
1144         }
1145
1146         /* update state of internal queue structure */
1147         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1148         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1149                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1150
1151         /* no errors */
1152         return 0;
1153 }
1154
1155 static inline uint16_t
1156 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1157                          uint16_t nb_pkts)
1158 {
1159         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1160         int i;
1161
1162         /* how many packets are ready to return? */
1163         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1164
1165         /* copy mbuf pointers to the application's packet list */
1166         for (i = 0; i < nb_pkts; ++i)
1167                 rx_pkts[i] = stage[i];
1168
1169         /* update internal queue state */
1170         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1171         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1172
1173         return nb_pkts;
1174 }
1175
1176 static inline uint16_t
1177 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1178              uint16_t nb_pkts)
1179 {
1180         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1181         uint16_t nb_rx = 0;
1182
1183         /* Any previously recv'd pkts will be returned from the Rx stage */
1184         if (rxq->rx_nb_avail)
1185                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1186
1187         /* Scan the H/W ring for packets to receive */
1188         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1189
1190         /* update internal queue state */
1191         rxq->rx_next_avail = 0;
1192         rxq->rx_nb_avail = nb_rx;
1193         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1194
1195         /* if required, allocate new buffers to replenish descriptors */
1196         if (rxq->rx_tail > rxq->rx_free_trigger) {
1197                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1198
1199                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1200                         int i, j;
1201                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1202                                    "queue_id=%u", (unsigned) rxq->port_id,
1203                                    (unsigned) rxq->queue_id);
1204
1205                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1206                                 rxq->rx_free_thresh;
1207
1208                         /*
1209                          * Need to rewind any previous receives if we cannot
1210                          * allocate new buffers to replenish the old ones.
1211                          */
1212                         rxq->rx_nb_avail = 0;
1213                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1214                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1215                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1216
1217                         return 0;
1218                 }
1219
1220                 /* update tail pointer */
1221                 rte_wmb();
1222                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1223         }
1224
1225         if (rxq->rx_tail >= rxq->nb_rx_desc)
1226                 rxq->rx_tail = 0;
1227
1228         /* received any packets this loop? */
1229         if (rxq->rx_nb_avail)
1230                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1231
1232         return 0;
1233 }
1234
1235 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1236 static uint16_t
1237 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1238                            uint16_t nb_pkts)
1239 {
1240         uint16_t nb_rx;
1241
1242         if (unlikely(nb_pkts == 0))
1243                 return 0;
1244
1245         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1246                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1247
1248         /* request is relatively large, chunk it up */
1249         nb_rx = 0;
1250         while (nb_pkts) {
1251                 uint16_t ret, n;
1252                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1253                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1254                 nb_rx = (uint16_t)(nb_rx + ret);
1255                 nb_pkts = (uint16_t)(nb_pkts - ret);
1256                 if (ret < n)
1257                         break;
1258         }
1259
1260         return nb_rx;
1261 }
1262
1263 uint16_t
1264 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1265                 uint16_t nb_pkts)
1266 {
1267         struct ixgbe_rx_queue *rxq;
1268         volatile union ixgbe_adv_rx_desc *rx_ring;
1269         volatile union ixgbe_adv_rx_desc *rxdp;
1270         struct ixgbe_rx_entry *sw_ring;
1271         struct ixgbe_rx_entry *rxe;
1272         struct rte_mbuf *rxm;
1273         struct rte_mbuf *nmb;
1274         union ixgbe_adv_rx_desc rxd;
1275         uint64_t dma_addr;
1276         uint32_t staterr;
1277         uint32_t pkt_info;
1278         uint16_t pkt_len;
1279         uint16_t rx_id;
1280         uint16_t nb_rx;
1281         uint16_t nb_hold;
1282         uint64_t pkt_flags;
1283
1284         nb_rx = 0;
1285         nb_hold = 0;
1286         rxq = rx_queue;
1287         rx_id = rxq->rx_tail;
1288         rx_ring = rxq->rx_ring;
1289         sw_ring = rxq->sw_ring;
1290         while (nb_rx < nb_pkts) {
1291                 /*
1292                  * The order of operations here is important as the DD status
1293                  * bit must not be read after any other descriptor fields.
1294                  * rx_ring and rxdp are pointing to volatile data so the order
1295                  * of accesses cannot be reordered by the compiler. If they were
1296                  * not volatile, they could be reordered which could lead to
1297                  * using invalid descriptor fields when read from rxd.
1298                  */
1299                 rxdp = &rx_ring[rx_id];
1300                 staterr = rxdp->wb.upper.status_error;
1301                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1302                         break;
1303                 rxd = *rxdp;
1304
1305                 /*
1306                  * End of packet.
1307                  *
1308                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1309                  * is likely to be invalid and to be dropped by the various
1310                  * validation checks performed by the network stack.
1311                  *
1312                  * Allocate a new mbuf to replenish the RX ring descriptor.
1313                  * If the allocation fails:
1314                  *    - arrange for that RX descriptor to be the first one
1315                  *      being parsed the next time the receive function is
1316                  *      invoked [on the same queue].
1317                  *
1318                  *    - Stop parsing the RX ring and return immediately.
1319                  *
1320                  * This policy do not drop the packet received in the RX
1321                  * descriptor for which the allocation of a new mbuf failed.
1322                  * Thus, it allows that packet to be later retrieved if
1323                  * mbuf have been freed in the mean time.
1324                  * As a side effect, holding RX descriptors instead of
1325                  * systematically giving them back to the NIC may lead to
1326                  * RX ring exhaustion situations.
1327                  * However, the NIC can gracefully prevent such situations
1328                  * to happen by sending specific "back-pressure" flow control
1329                  * frames to its peer(s).
1330                  */
1331                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1332                            "ext_err_stat=0x%08x pkt_len=%u",
1333                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1334                            (unsigned) rx_id, (unsigned) staterr,
1335                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1336
1337                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1338                 if (nmb == NULL) {
1339                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1340                                    "queue_id=%u", (unsigned) rxq->port_id,
1341                                    (unsigned) rxq->queue_id);
1342                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1343                         break;
1344                 }
1345
1346                 nb_hold++;
1347                 rxe = &sw_ring[rx_id];
1348                 rx_id++;
1349                 if (rx_id == rxq->nb_rx_desc)
1350                         rx_id = 0;
1351
1352                 /* Prefetch next mbuf while processing current one. */
1353                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1354
1355                 /*
1356                  * When next RX descriptor is on a cache-line boundary,
1357                  * prefetch the next 4 RX descriptors and the next 8 pointers
1358                  * to mbufs.
1359                  */
1360                 if ((rx_id & 0x3) == 0) {
1361                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1362                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1363                 }
1364
1365                 rxm = rxe->mbuf;
1366                 rxe->mbuf = nmb;
1367                 dma_addr =
1368                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1369                 rxdp->read.hdr_addr = 0;
1370                 rxdp->read.pkt_addr = dma_addr;
1371
1372                 /*
1373                  * Initialize the returned mbuf.
1374                  * 1) setup generic mbuf fields:
1375                  *    - number of segments,
1376                  *    - next segment,
1377                  *    - packet length,
1378                  *    - RX port identifier.
1379                  * 2) integrate hardware offload data, if any:
1380                  *    - RSS flag & hash,
1381                  *    - IP checksum flag,
1382                  *    - VLAN TCI, if any,
1383                  *    - error flags.
1384                  */
1385                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1386                                       rxq->crc_len);
1387                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1388                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1389                 rxm->nb_segs = 1;
1390                 rxm->next = NULL;
1391                 rxm->pkt_len = pkt_len;
1392                 rxm->data_len = pkt_len;
1393                 rxm->port = rxq->port_id;
1394
1395                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1396                                                                 pkt_info);
1397                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1398                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1399
1400                 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1401                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1402                 pkt_flags = pkt_flags |
1403                         ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1404                 rxm->ol_flags = pkt_flags;
1405                 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1406
1407                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1408                         rxm->hash.rss = rte_le_to_cpu_32(
1409                                                 rxd.wb.lower.hi_dword.rss);
1410                 else if (pkt_flags & PKT_RX_FDIR) {
1411                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1412                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1413                                         IXGBE_ATR_HASH_MASK;
1414                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1415                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1416                 }
1417                 /*
1418                  * Store the mbuf address into the next entry of the array
1419                  * of returned packets.
1420                  */
1421                 rx_pkts[nb_rx++] = rxm;
1422         }
1423         rxq->rx_tail = rx_id;
1424
1425         /*
1426          * If the number of free RX descriptors is greater than the RX free
1427          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1428          * register.
1429          * Update the RDT with the value of the last processed RX descriptor
1430          * minus 1, to guarantee that the RDT register is never equal to the
1431          * RDH register, which creates a "full" ring situtation from the
1432          * hardware point of view...
1433          */
1434         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1435         if (nb_hold > rxq->rx_free_thresh) {
1436                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1437                            "nb_hold=%u nb_rx=%u",
1438                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1439                            (unsigned) rx_id, (unsigned) nb_hold,
1440                            (unsigned) nb_rx);
1441                 rx_id = (uint16_t) ((rx_id == 0) ?
1442                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1443                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1444                 nb_hold = 0;
1445         }
1446         rxq->nb_rx_hold = nb_hold;
1447         return (nb_rx);
1448 }
1449
1450 /**
1451  * Detect an RSC descriptor.
1452  */
1453 static inline uint32_t
1454 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1455 {
1456         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1457                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1458 }
1459
1460 /**
1461  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1462  *
1463  * Fill the following info in the HEAD buffer of the Rx cluster:
1464  *    - RX port identifier
1465  *    - hardware offload data, if any:
1466  *      - RSS flag & hash
1467  *      - IP checksum flag
1468  *      - VLAN TCI, if any
1469  *      - error flags
1470  * @head HEAD of the packet cluster
1471  * @desc HW descriptor to get data from
1472  * @port_id Port ID of the Rx queue
1473  */
1474 static inline void
1475 ixgbe_fill_cluster_head_buf(
1476         struct rte_mbuf *head,
1477         union ixgbe_adv_rx_desc *desc,
1478         uint8_t port_id,
1479         uint32_t staterr)
1480 {
1481         uint16_t pkt_info;
1482         uint64_t pkt_flags;
1483
1484         head->port = port_id;
1485
1486         /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1487          * set in the pkt_flags field.
1488          */
1489         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1490         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1491         pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1492         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1493         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1494         head->ol_flags = pkt_flags;
1495         head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1496
1497         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1498                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1499         else if (pkt_flags & PKT_RX_FDIR) {
1500                 head->hash.fdir.hash =
1501                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1502                                                           & IXGBE_ATR_HASH_MASK;
1503                 head->hash.fdir.id =
1504                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1505         }
1506 }
1507
1508 /**
1509  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1510  *
1511  * @rx_queue Rx queue handle
1512  * @rx_pkts table of received packets
1513  * @nb_pkts size of rx_pkts table
1514  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1515  *
1516  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1517  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1518  *
1519  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1520  * 1) When non-EOP RSC completion arrives:
1521  *    a) Update the HEAD of the current RSC aggregation cluster with the new
1522  *       segment's data length.
1523  *    b) Set the "next" pointer of the current segment to point to the segment
1524  *       at the NEXTP index.
1525  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1526  *       in the sw_rsc_ring.
1527  * 2) When EOP arrives we just update the cluster's total length and offload
1528  *    flags and deliver the cluster up to the upper layers. In our case - put it
1529  *    in the rx_pkts table.
1530  *
1531  * Returns the number of received packets/clusters (according to the "bulk
1532  * receive" interface).
1533  */
1534 static inline uint16_t
1535 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1536                     bool bulk_alloc)
1537 {
1538         struct ixgbe_rx_queue *rxq = rx_queue;
1539         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1540         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1541         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1542         uint16_t rx_id = rxq->rx_tail;
1543         uint16_t nb_rx = 0;
1544         uint16_t nb_hold = rxq->nb_rx_hold;
1545         uint16_t prev_id = rxq->rx_tail;
1546
1547         while (nb_rx < nb_pkts) {
1548                 bool eop;
1549                 struct ixgbe_rx_entry *rxe;
1550                 struct ixgbe_scattered_rx_entry *sc_entry;
1551                 struct ixgbe_scattered_rx_entry *next_sc_entry;
1552                 struct ixgbe_rx_entry *next_rxe;
1553                 struct rte_mbuf *first_seg;
1554                 struct rte_mbuf *rxm;
1555                 struct rte_mbuf *nmb;
1556                 union ixgbe_adv_rx_desc rxd;
1557                 uint16_t data_len;
1558                 uint16_t next_id;
1559                 volatile union ixgbe_adv_rx_desc *rxdp;
1560                 uint32_t staterr;
1561
1562 next_desc:
1563                 /*
1564                  * The code in this whole file uses the volatile pointer to
1565                  * ensure the read ordering of the status and the rest of the
1566                  * descriptor fields (on the compiler level only!!!). This is so
1567                  * UGLY - why not to just use the compiler barrier instead? DPDK
1568                  * even has the rte_compiler_barrier() for that.
1569                  *
1570                  * But most importantly this is just wrong because this doesn't
1571                  * ensure memory ordering in a general case at all. For
1572                  * instance, DPDK is supposed to work on Power CPUs where
1573                  * compiler barrier may just not be enough!
1574                  *
1575                  * I tried to write only this function properly to have a
1576                  * starting point (as a part of an LRO/RSC series) but the
1577                  * compiler cursed at me when I tried to cast away the
1578                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1579                  * keeping it the way it is for now.
1580                  *
1581                  * The code in this file is broken in so many other places and
1582                  * will just not work on a big endian CPU anyway therefore the
1583                  * lines below will have to be revisited together with the rest
1584                  * of the ixgbe PMD.
1585                  *
1586                  * TODO:
1587                  *    - Get rid of "volatile" crap and let the compiler do its
1588                  *      job.
1589                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
1590                  *      memory ordering below.
1591                  */
1592                 rxdp = &rx_ring[rx_id];
1593                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1594
1595                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1596                         break;
1597
1598                 rxd = *rxdp;
1599
1600                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1601                                   "staterr=0x%x data_len=%u",
1602                            rxq->port_id, rxq->queue_id, rx_id, staterr,
1603                            rte_le_to_cpu_16(rxd.wb.upper.length));
1604
1605                 if (!bulk_alloc) {
1606                         nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1607                         if (nmb == NULL) {
1608                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1609                                                   "port_id=%u queue_id=%u",
1610                                            rxq->port_id, rxq->queue_id);
1611
1612                                 rte_eth_devices[rxq->port_id].data->
1613                                                         rx_mbuf_alloc_failed++;
1614                                 break;
1615                         }
1616                 }
1617                 else if (nb_hold > rxq->rx_free_thresh) {
1618                         uint16_t next_rdt = rxq->rx_free_trigger;
1619
1620                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1621                                 rte_wmb();
1622                                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1623                                                     next_rdt);
1624                                 nb_hold -= rxq->rx_free_thresh;
1625                         } else {
1626                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1627                                                   "port_id=%u queue_id=%u",
1628                                            rxq->port_id, rxq->queue_id);
1629
1630                                 rte_eth_devices[rxq->port_id].data->
1631                                                         rx_mbuf_alloc_failed++;
1632                                 break;
1633                         }
1634                 }
1635
1636                 nb_hold++;
1637                 rxe = &sw_ring[rx_id];
1638                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1639
1640                 next_id = rx_id + 1;
1641                 if (next_id == rxq->nb_rx_desc)
1642                         next_id = 0;
1643
1644                 /* Prefetch next mbuf while processing current one. */
1645                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1646
1647                 /*
1648                  * When next RX descriptor is on a cache-line boundary,
1649                  * prefetch the next 4 RX descriptors and the next 4 pointers
1650                  * to mbufs.
1651                  */
1652                 if ((next_id & 0x3) == 0) {
1653                         rte_ixgbe_prefetch(&rx_ring[next_id]);
1654                         rte_ixgbe_prefetch(&sw_ring[next_id]);
1655                 }
1656
1657                 rxm = rxe->mbuf;
1658
1659                 if (!bulk_alloc) {
1660                         __le64 dma =
1661                           rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1662                         /*
1663                          * Update RX descriptor with the physical address of the
1664                          * new data buffer of the new allocated mbuf.
1665                          */
1666                         rxe->mbuf = nmb;
1667
1668                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
1669                         rxdp->read.hdr_addr = 0;
1670                         rxdp->read.pkt_addr = dma;
1671                 } else
1672                         rxe->mbuf = NULL;
1673
1674                 /*
1675                  * Set data length & data buffer address of mbuf.
1676                  */
1677                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1678                 rxm->data_len = data_len;
1679
1680                 if (!eop) {
1681                         uint16_t nextp_id;
1682                         /*
1683                          * Get next descriptor index:
1684                          *  - For RSC it's in the NEXTP field.
1685                          *  - For a scattered packet - it's just a following
1686                          *    descriptor.
1687                          */
1688                         if (ixgbe_rsc_count(&rxd))
1689                                 nextp_id =
1690                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1691                                                        IXGBE_RXDADV_NEXTP_SHIFT;
1692                         else
1693                                 nextp_id = next_id;
1694
1695                         next_sc_entry = &sw_sc_ring[nextp_id];
1696                         next_rxe = &sw_ring[nextp_id];
1697                         rte_ixgbe_prefetch(next_rxe);
1698                 }
1699
1700                 sc_entry = &sw_sc_ring[rx_id];
1701                 first_seg = sc_entry->fbuf;
1702                 sc_entry->fbuf = NULL;
1703
1704                 /*
1705                  * If this is the first buffer of the received packet,
1706                  * set the pointer to the first mbuf of the packet and
1707                  * initialize its context.
1708                  * Otherwise, update the total length and the number of segments
1709                  * of the current scattered packet, and update the pointer to
1710                  * the last mbuf of the current packet.
1711                  */
1712                 if (first_seg == NULL) {
1713                         first_seg = rxm;
1714                         first_seg->pkt_len = data_len;
1715                         first_seg->nb_segs = 1;
1716                 } else {
1717                         first_seg->pkt_len += data_len;
1718                         first_seg->nb_segs++;
1719                 }
1720
1721                 prev_id = rx_id;
1722                 rx_id = next_id;
1723
1724                 /*
1725                  * If this is not the last buffer of the received packet, update
1726                  * the pointer to the first mbuf at the NEXTP entry in the
1727                  * sw_sc_ring and continue to parse the RX ring.
1728                  */
1729                 if (!eop) {
1730                         rxm->next = next_rxe->mbuf;
1731                         next_sc_entry->fbuf = first_seg;
1732                         goto next_desc;
1733                 }
1734
1735                 /*
1736                  * This is the last buffer of the received packet - return
1737                  * the current cluster to the user.
1738                  */
1739                 rxm->next = NULL;
1740
1741                 /* Initialize the first mbuf of the returned packet */
1742                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1743                                             staterr);
1744
1745                 /*
1746                  * Deal with the case, when HW CRC srip is disabled.
1747                  * That can't happen when LRO is enabled, but still could
1748                  * happen for scattered RX mode.
1749                  */
1750                 first_seg->pkt_len -= rxq->crc_len;
1751                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1752                         struct rte_mbuf *lp;
1753
1754                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
1755                                 ;
1756
1757                         first_seg->nb_segs--;
1758                         lp->data_len -= rxq->crc_len - rxm->data_len;
1759                         lp->next = NULL;
1760                         rte_pktmbuf_free_seg(rxm);
1761                 } else
1762                         rxm->data_len -= rxq->crc_len;
1763
1764                 /* Prefetch data of first segment, if configured to do so. */
1765                 rte_packet_prefetch((char *)first_seg->buf_addr +
1766                         first_seg->data_off);
1767
1768                 /*
1769                  * Store the mbuf address into the next entry of the array
1770                  * of returned packets.
1771                  */
1772                 rx_pkts[nb_rx++] = first_seg;
1773         }
1774
1775         /*
1776          * Record index of the next RX descriptor to probe.
1777          */
1778         rxq->rx_tail = rx_id;
1779
1780         /*
1781          * If the number of free RX descriptors is greater than the RX free
1782          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1783          * register.
1784          * Update the RDT with the value of the last processed RX descriptor
1785          * minus 1, to guarantee that the RDT register is never equal to the
1786          * RDH register, which creates a "full" ring situtation from the
1787          * hardware point of view...
1788          */
1789         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1790                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1791                            "nb_hold=%u nb_rx=%u",
1792                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1793
1794                 rte_wmb();
1795                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1796                 nb_hold = 0;
1797         }
1798
1799         rxq->nb_rx_hold = nb_hold;
1800         return nb_rx;
1801 }
1802
1803 uint16_t
1804 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1805                                  uint16_t nb_pkts)
1806 {
1807         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1808 }
1809
1810 uint16_t
1811 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1812                                uint16_t nb_pkts)
1813 {
1814         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1815 }
1816
1817 /*********************************************************************
1818  *
1819  *  Queue management functions
1820  *
1821  **********************************************************************/
1822
1823 /*
1824  * Create memzone for HW rings. malloc can't be used as the physical address is
1825  * needed. If the memzone is already created, then this function returns a ptr
1826  * to the old one.
1827  */
1828 static const struct rte_memzone * __attribute__((cold))
1829 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1830                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1831 {
1832         char z_name[RTE_MEMZONE_NAMESIZE];
1833         const struct rte_memzone *mz;
1834
1835         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1836                         dev->driver->pci_drv.name, ring_name,
1837                         dev->data->port_id, queue_id);
1838
1839         mz = rte_memzone_lookup(z_name);
1840         if (mz)
1841                 return mz;
1842
1843 #ifdef RTE_LIBRTE_XEN_DOM0
1844         return rte_memzone_reserve_bounded(z_name, ring_size,
1845                 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1846 #else
1847         return rte_memzone_reserve_aligned(z_name, ring_size,
1848                 socket_id, 0, IXGBE_ALIGN);
1849 #endif
1850 }
1851
1852 static void __attribute__((cold))
1853 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1854 {
1855         unsigned i;
1856
1857         if (txq->sw_ring != NULL) {
1858                 for (i = 0; i < txq->nb_tx_desc; i++) {
1859                         if (txq->sw_ring[i].mbuf != NULL) {
1860                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1861                                 txq->sw_ring[i].mbuf = NULL;
1862                         }
1863                 }
1864         }
1865 }
1866
1867 static void __attribute__((cold))
1868 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1869 {
1870         if (txq != NULL &&
1871             txq->sw_ring != NULL)
1872                 rte_free(txq->sw_ring);
1873 }
1874
1875 static void __attribute__((cold))
1876 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1877 {
1878         if (txq != NULL && txq->ops != NULL) {
1879                 txq->ops->release_mbufs(txq);
1880                 txq->ops->free_swring(txq);
1881                 rte_free(txq);
1882         }
1883 }
1884
1885 void __attribute__((cold))
1886 ixgbe_dev_tx_queue_release(void *txq)
1887 {
1888         ixgbe_tx_queue_release(txq);
1889 }
1890
1891 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1892 static void __attribute__((cold))
1893 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1894 {
1895         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1896         struct ixgbe_tx_entry *txe = txq->sw_ring;
1897         uint16_t prev, i;
1898
1899         /* Zero out HW ring memory */
1900         for (i = 0; i < txq->nb_tx_desc; i++) {
1901                 txq->tx_ring[i] = zeroed_desc;
1902         }
1903
1904         /* Initialize SW ring entries */
1905         prev = (uint16_t) (txq->nb_tx_desc - 1);
1906         for (i = 0; i < txq->nb_tx_desc; i++) {
1907                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1908                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1909                 txe[i].mbuf = NULL;
1910                 txe[i].last_id = i;
1911                 txe[prev].next_id = i;
1912                 prev = i;
1913         }
1914
1915         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1916         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1917
1918         txq->tx_tail = 0;
1919         txq->nb_tx_used = 0;
1920         /*
1921          * Always allow 1 descriptor to be un-allocated to avoid
1922          * a H/W race condition
1923          */
1924         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1925         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1926         txq->ctx_curr = 0;
1927         memset((void*)&txq->ctx_cache, 0,
1928                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1929 }
1930
1931 static const struct ixgbe_txq_ops def_txq_ops = {
1932         .release_mbufs = ixgbe_tx_queue_release_mbufs,
1933         .free_swring = ixgbe_tx_free_swring,
1934         .reset = ixgbe_reset_tx_queue,
1935 };
1936
1937 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1938  * the queue parameters. Used in tx_queue_setup by primary process and then
1939  * in dev_init by secondary process when attaching to an existing ethdev.
1940  */
1941 void __attribute__((cold))
1942 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1943 {
1944         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1945         if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1946                         && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1947                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1948 #ifdef RTE_IXGBE_INC_VECTOR
1949                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1950                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1951                                         ixgbe_txq_vec_setup(txq) == 0)) {
1952                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1953                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1954                 } else
1955 #endif
1956                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1957         } else {
1958                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1959                 PMD_INIT_LOG(DEBUG,
1960                                 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1961                                 (unsigned long)txq->txq_flags,
1962                                 (unsigned long)IXGBE_SIMPLE_FLAGS);
1963                 PMD_INIT_LOG(DEBUG,
1964                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
1965                                 (unsigned long)txq->tx_rs_thresh,
1966                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
1967                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1968         }
1969 }
1970
1971 int __attribute__((cold))
1972 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1973                          uint16_t queue_idx,
1974                          uint16_t nb_desc,
1975                          unsigned int socket_id,
1976                          const struct rte_eth_txconf *tx_conf)
1977 {
1978         const struct rte_memzone *tz;
1979         struct ixgbe_tx_queue *txq;
1980         struct ixgbe_hw     *hw;
1981         uint16_t tx_rs_thresh, tx_free_thresh;
1982
1983         PMD_INIT_FUNC_TRACE();
1984         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1985
1986         /*
1987          * Validate number of transmit descriptors.
1988          * It must not exceed hardware maximum, and must be multiple
1989          * of IXGBE_ALIGN.
1990          */
1991         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
1992                         (nb_desc > IXGBE_MAX_RING_DESC) ||
1993                         (nb_desc < IXGBE_MIN_RING_DESC)) {
1994                 return -EINVAL;
1995         }
1996
1997         /*
1998          * The following two parameters control the setting of the RS bit on
1999          * transmit descriptors.
2000          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2001          * descriptors have been used.
2002          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2003          * descriptors are used or if the number of descriptors required
2004          * to transmit a packet is greater than the number of free TX
2005          * descriptors.
2006          * The following constraints must be satisfied:
2007          *  tx_rs_thresh must be greater than 0.
2008          *  tx_rs_thresh must be less than the size of the ring minus 2.
2009          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2010          *  tx_rs_thresh must be a divisor of the ring size.
2011          *  tx_free_thresh must be greater than 0.
2012          *  tx_free_thresh must be less than the size of the ring minus 3.
2013          * One descriptor in the TX ring is used as a sentinel to avoid a
2014          * H/W race condition, hence the maximum threshold constraints.
2015          * When set to zero use default values.
2016          */
2017         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2018                         tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2019         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2020                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2021         if (tx_rs_thresh >= (nb_desc - 2)) {
2022                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2023                              "of TX descriptors minus 2. (tx_rs_thresh=%u "
2024                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2025                              (int)dev->data->port_id, (int)queue_idx);
2026                 return -(EINVAL);
2027         }
2028         if (tx_free_thresh >= (nb_desc - 3)) {
2029                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2030                              "tx_free_thresh must be less than the number of "
2031                              "TX descriptors minus 3. (tx_free_thresh=%u "
2032                              "port=%d queue=%d)",
2033                              (unsigned int)tx_free_thresh,
2034                              (int)dev->data->port_id, (int)queue_idx);
2035                 return -(EINVAL);
2036         }
2037         if (tx_rs_thresh > tx_free_thresh) {
2038                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2039                              "tx_free_thresh. (tx_free_thresh=%u "
2040                              "tx_rs_thresh=%u port=%d queue=%d)",
2041                              (unsigned int)tx_free_thresh,
2042                              (unsigned int)tx_rs_thresh,
2043                              (int)dev->data->port_id,
2044                              (int)queue_idx);
2045                 return -(EINVAL);
2046         }
2047         if ((nb_desc % tx_rs_thresh) != 0) {
2048                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2049                              "number of TX descriptors. (tx_rs_thresh=%u "
2050                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2051                              (int)dev->data->port_id, (int)queue_idx);
2052                 return -(EINVAL);
2053         }
2054
2055         /*
2056          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2057          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2058          * by the NIC and all descriptors are written back after the NIC
2059          * accumulates WTHRESH descriptors.
2060          */
2061         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2062                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2063                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2064                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2065                              (int)dev->data->port_id, (int)queue_idx);
2066                 return -(EINVAL);
2067         }
2068
2069         /* Free memory prior to re-allocation if needed... */
2070         if (dev->data->tx_queues[queue_idx] != NULL) {
2071                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2072                 dev->data->tx_queues[queue_idx] = NULL;
2073         }
2074
2075         /* First allocate the tx queue data structure */
2076         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2077                                  RTE_CACHE_LINE_SIZE, socket_id);
2078         if (txq == NULL)
2079                 return (-ENOMEM);
2080
2081         /*
2082          * Allocate TX ring hardware descriptors. A memzone large enough to
2083          * handle the maximum ring size is allocated in order to allow for
2084          * resizing in later calls to the queue setup function.
2085          */
2086         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
2087                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2088                         socket_id);
2089         if (tz == NULL) {
2090                 ixgbe_tx_queue_release(txq);
2091                 return (-ENOMEM);
2092         }
2093
2094         txq->nb_tx_desc = nb_desc;
2095         txq->tx_rs_thresh = tx_rs_thresh;
2096         txq->tx_free_thresh = tx_free_thresh;
2097         txq->pthresh = tx_conf->tx_thresh.pthresh;
2098         txq->hthresh = tx_conf->tx_thresh.hthresh;
2099         txq->wthresh = tx_conf->tx_thresh.wthresh;
2100         txq->queue_id = queue_idx;
2101         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2102                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2103         txq->port_id = dev->data->port_id;
2104         txq->txq_flags = tx_conf->txq_flags;
2105         txq->ops = &def_txq_ops;
2106         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2107
2108         /*
2109          * Modification to set VFTDT for virtual function if vf is detected
2110          */
2111         if (hw->mac.type == ixgbe_mac_82599_vf ||
2112             hw->mac.type == ixgbe_mac_X540_vf ||
2113             hw->mac.type == ixgbe_mac_X550_vf ||
2114             hw->mac.type == ixgbe_mac_X550EM_x_vf)
2115                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2116         else
2117                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2118 #ifndef RTE_LIBRTE_XEN_DOM0
2119         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
2120 #else
2121         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2122 #endif
2123         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2124
2125         /* Allocate software ring */
2126         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2127                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2128                                 RTE_CACHE_LINE_SIZE, socket_id);
2129         if (txq->sw_ring == NULL) {
2130                 ixgbe_tx_queue_release(txq);
2131                 return (-ENOMEM);
2132         }
2133         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2134                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2135
2136         /* set up vector or scalar TX function as appropriate */
2137         ixgbe_set_tx_function(dev, txq);
2138
2139         txq->ops->reset(txq);
2140
2141         dev->data->tx_queues[queue_idx] = txq;
2142
2143
2144         return (0);
2145 }
2146
2147 /**
2148  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2149  *
2150  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2151  * in the sw_rsc_ring is not set to NULL but rather points to the next
2152  * mbuf of this RSC aggregation (that has not been completed yet and still
2153  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2154  * will just free first "nb_segs" segments of the cluster explicitly by calling
2155  * an rte_pktmbuf_free_seg().
2156  *
2157  * @m scattered cluster head
2158  */
2159 static void __attribute__((cold))
2160 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2161 {
2162         uint8_t i, nb_segs = m->nb_segs;
2163         struct rte_mbuf *next_seg;
2164
2165         for (i = 0; i < nb_segs; i++) {
2166                 next_seg = m->next;
2167                 rte_pktmbuf_free_seg(m);
2168                 m = next_seg;
2169         }
2170 }
2171
2172 static void __attribute__((cold))
2173 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2174 {
2175         unsigned i;
2176
2177 #ifdef RTE_IXGBE_INC_VECTOR
2178         /* SSE Vector driver has a different way of releasing mbufs. */
2179         if (rxq->rx_using_sse) {
2180                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2181                 return;
2182         }
2183 #endif
2184
2185         if (rxq->sw_ring != NULL) {
2186                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2187                         if (rxq->sw_ring[i].mbuf != NULL) {
2188                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2189                                 rxq->sw_ring[i].mbuf = NULL;
2190                         }
2191                 }
2192                 if (rxq->rx_nb_avail) {
2193                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2194                                 struct rte_mbuf *mb;
2195                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2196                                 rte_pktmbuf_free_seg(mb);
2197                         }
2198                         rxq->rx_nb_avail = 0;
2199                 }
2200         }
2201
2202         if (rxq->sw_sc_ring)
2203                 for (i = 0; i < rxq->nb_rx_desc; i++)
2204                         if (rxq->sw_sc_ring[i].fbuf) {
2205                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2206                                 rxq->sw_sc_ring[i].fbuf = NULL;
2207                         }
2208 }
2209
2210 static void __attribute__((cold))
2211 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2212 {
2213         if (rxq != NULL) {
2214                 ixgbe_rx_queue_release_mbufs(rxq);
2215                 rte_free(rxq->sw_ring);
2216                 rte_free(rxq->sw_sc_ring);
2217                 rte_free(rxq);
2218         }
2219 }
2220
2221 void __attribute__((cold))
2222 ixgbe_dev_rx_queue_release(void *rxq)
2223 {
2224         ixgbe_rx_queue_release(rxq);
2225 }
2226
2227 /*
2228  * Check if Rx Burst Bulk Alloc function can be used.
2229  * Return
2230  *        0: the preconditions are satisfied and the bulk allocation function
2231  *           can be used.
2232  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2233  *           function must be used.
2234  */
2235 static inline int __attribute__((cold))
2236 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2237 {
2238         int ret = 0;
2239
2240         /*
2241          * Make sure the following pre-conditions are satisfied:
2242          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2243          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2244          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2245          *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2246          * Scattered packets are not supported.  This should be checked
2247          * outside of this function.
2248          */
2249         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2250                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2251                              "rxq->rx_free_thresh=%d, "
2252                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2253                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2254                 ret = -EINVAL;
2255         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2256                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2257                              "rxq->rx_free_thresh=%d, "
2258                              "rxq->nb_rx_desc=%d",
2259                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2260                 ret = -EINVAL;
2261         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2262                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2263                              "rxq->nb_rx_desc=%d, "
2264                              "rxq->rx_free_thresh=%d",
2265                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2266                 ret = -EINVAL;
2267         } else if (!(rxq->nb_rx_desc <
2268                (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2269                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2270                              "rxq->nb_rx_desc=%d, "
2271                              "IXGBE_MAX_RING_DESC=%d, "
2272                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2273                              rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2274                              RTE_PMD_IXGBE_RX_MAX_BURST);
2275                 ret = -EINVAL;
2276         }
2277
2278         return ret;
2279 }
2280
2281 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2282 static void __attribute__((cold))
2283 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2284 {
2285         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2286         unsigned i;
2287         uint16_t len = rxq->nb_rx_desc;
2288
2289         /*
2290          * By default, the Rx queue setup function allocates enough memory for
2291          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2292          * extra memory at the end of the descriptor ring to be zero'd out. A
2293          * pre-condition for using the Rx burst bulk alloc function is that the
2294          * number of descriptors is less than or equal to
2295          * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2296          * constraints here to see if we need to zero out memory after the end
2297          * of the H/W descriptor ring.
2298          */
2299         if (adapter->rx_bulk_alloc_allowed)
2300                 /* zero out extra memory */
2301                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2302
2303         /*
2304          * Zero out HW ring memory. Zero out extra memory at the end of
2305          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2306          * reads extra memory as zeros.
2307          */
2308         for (i = 0; i < len; i++) {
2309                 rxq->rx_ring[i] = zeroed_desc;
2310         }
2311
2312         /*
2313          * initialize extra software ring entries. Space for these extra
2314          * entries is always allocated
2315          */
2316         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2317         for (i = rxq->nb_rx_desc; i < len; ++i) {
2318                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2319         }
2320
2321         rxq->rx_nb_avail = 0;
2322         rxq->rx_next_avail = 0;
2323         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2324         rxq->rx_tail = 0;
2325         rxq->nb_rx_hold = 0;
2326         rxq->pkt_first_seg = NULL;
2327         rxq->pkt_last_seg = NULL;
2328
2329 #ifdef RTE_IXGBE_INC_VECTOR
2330         rxq->rxrearm_start = 0;
2331         rxq->rxrearm_nb = 0;
2332 #endif
2333 }
2334
2335 int __attribute__((cold))
2336 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2337                          uint16_t queue_idx,
2338                          uint16_t nb_desc,
2339                          unsigned int socket_id,
2340                          const struct rte_eth_rxconf *rx_conf,
2341                          struct rte_mempool *mp)
2342 {
2343         const struct rte_memzone *rz;
2344         struct ixgbe_rx_queue *rxq;
2345         struct ixgbe_hw     *hw;
2346         uint16_t len;
2347         struct ixgbe_adapter *adapter =
2348                 (struct ixgbe_adapter *)dev->data->dev_private;
2349
2350         PMD_INIT_FUNC_TRACE();
2351         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2352
2353         /*
2354          * Validate number of receive descriptors.
2355          * It must not exceed hardware maximum, and must be multiple
2356          * of IXGBE_ALIGN.
2357          */
2358         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2359                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2360                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2361                 return (-EINVAL);
2362         }
2363
2364         /* Free memory prior to re-allocation if needed... */
2365         if (dev->data->rx_queues[queue_idx] != NULL) {
2366                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2367                 dev->data->rx_queues[queue_idx] = NULL;
2368         }
2369
2370         /* First allocate the rx queue data structure */
2371         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2372                                  RTE_CACHE_LINE_SIZE, socket_id);
2373         if (rxq == NULL)
2374                 return (-ENOMEM);
2375         rxq->mb_pool = mp;
2376         rxq->nb_rx_desc = nb_desc;
2377         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2378         rxq->queue_id = queue_idx;
2379         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2380                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2381         rxq->port_id = dev->data->port_id;
2382         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2383                                                         0 : ETHER_CRC_LEN);
2384         rxq->drop_en = rx_conf->rx_drop_en;
2385         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2386
2387         /*
2388          * Allocate RX ring hardware descriptors. A memzone large enough to
2389          * handle the maximum ring size is allocated in order to allow for
2390          * resizing in later calls to the queue setup function.
2391          */
2392         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2393                                    RX_RING_SZ, socket_id);
2394         if (rz == NULL) {
2395                 ixgbe_rx_queue_release(rxq);
2396                 return (-ENOMEM);
2397         }
2398
2399         /*
2400          * Zero init all the descriptors in the ring.
2401          */
2402         memset (rz->addr, 0, RX_RING_SZ);
2403
2404         /*
2405          * Modified to setup VFRDT for Virtual Function
2406          */
2407         if (hw->mac.type == ixgbe_mac_82599_vf ||
2408             hw->mac.type == ixgbe_mac_X540_vf ||
2409             hw->mac.type == ixgbe_mac_X550_vf ||
2410             hw->mac.type == ixgbe_mac_X550EM_x_vf) {
2411                 rxq->rdt_reg_addr =
2412                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2413                 rxq->rdh_reg_addr =
2414                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2415         }
2416         else {
2417                 rxq->rdt_reg_addr =
2418                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2419                 rxq->rdh_reg_addr =
2420                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2421         }
2422 #ifndef RTE_LIBRTE_XEN_DOM0
2423         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2424 #else
2425         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2426 #endif
2427         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2428
2429         /*
2430          * Certain constraints must be met in order to use the bulk buffer
2431          * allocation Rx burst function. If any of Rx queues doesn't meet them
2432          * the feature should be disabled for the whole port.
2433          */
2434         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2435                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2436                                     "preconditions - canceling the feature for "
2437                                     "the whole port[%d]",
2438                              rxq->queue_id, rxq->port_id);
2439                 adapter->rx_bulk_alloc_allowed = false;
2440         }
2441
2442         /*
2443          * Allocate software ring. Allow for space at the end of the
2444          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2445          * function does not access an invalid memory region.
2446          */
2447         len = nb_desc;
2448         if (adapter->rx_bulk_alloc_allowed)
2449                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2450
2451         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2452                                           sizeof(struct ixgbe_rx_entry) * len,
2453                                           RTE_CACHE_LINE_SIZE, socket_id);
2454         if (!rxq->sw_ring) {
2455                 ixgbe_rx_queue_release(rxq);
2456                 return (-ENOMEM);
2457         }
2458
2459         /*
2460          * Always allocate even if it's not going to be needed in order to
2461          * simplify the code.
2462          *
2463          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2464          * be requested in ixgbe_dev_rx_init(), which is called later from
2465          * dev_start() flow.
2466          */
2467         rxq->sw_sc_ring =
2468                 rte_zmalloc_socket("rxq->sw_sc_ring",
2469                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
2470                                    RTE_CACHE_LINE_SIZE, socket_id);
2471         if (!rxq->sw_sc_ring) {
2472                 ixgbe_rx_queue_release(rxq);
2473                 return (-ENOMEM);
2474         }
2475
2476         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2477                             "dma_addr=0x%"PRIx64,
2478                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2479                      rxq->rx_ring_phys_addr);
2480
2481         if (!rte_is_power_of_2(nb_desc)) {
2482                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2483                                     "preconditions - canceling the feature for "
2484                                     "the whole port[%d]",
2485                              rxq->queue_id, rxq->port_id);
2486                 adapter->rx_vec_allowed = false;
2487         } else
2488                 ixgbe_rxq_vec_setup(rxq);
2489
2490         dev->data->rx_queues[queue_idx] = rxq;
2491
2492         ixgbe_reset_rx_queue(adapter, rxq);
2493
2494         return 0;
2495 }
2496
2497 uint32_t
2498 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2499 {
2500 #define IXGBE_RXQ_SCAN_INTERVAL 4
2501         volatile union ixgbe_adv_rx_desc *rxdp;
2502         struct ixgbe_rx_queue *rxq;
2503         uint32_t desc = 0;
2504
2505         if (rx_queue_id >= dev->data->nb_rx_queues) {
2506                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2507                 return 0;
2508         }
2509
2510         rxq = dev->data->rx_queues[rx_queue_id];
2511         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2512
2513         while ((desc < rxq->nb_rx_desc) &&
2514                 (rxdp->wb.upper.status_error &
2515                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2516                 desc += IXGBE_RXQ_SCAN_INTERVAL;
2517                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2518                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2519                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2520                                 desc - rxq->nb_rx_desc]);
2521         }
2522
2523         return desc;
2524 }
2525
2526 int
2527 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2528 {
2529         volatile union ixgbe_adv_rx_desc *rxdp;
2530         struct ixgbe_rx_queue *rxq = rx_queue;
2531         uint32_t desc;
2532
2533         if (unlikely(offset >= rxq->nb_rx_desc))
2534                 return 0;
2535         desc = rxq->rx_tail + offset;
2536         if (desc >= rxq->nb_rx_desc)
2537                 desc -= rxq->nb_rx_desc;
2538
2539         rxdp = &rxq->rx_ring[desc];
2540         return !!(rxdp->wb.upper.status_error &
2541                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2542 }
2543
2544 void __attribute__((cold))
2545 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2546 {
2547         unsigned i;
2548         struct ixgbe_adapter *adapter =
2549                 (struct ixgbe_adapter *)dev->data->dev_private;
2550
2551         PMD_INIT_FUNC_TRACE();
2552
2553         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2554                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2555                 if (txq != NULL) {
2556                         txq->ops->release_mbufs(txq);
2557                         txq->ops->reset(txq);
2558                 }
2559         }
2560
2561         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2562                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2563                 if (rxq != NULL) {
2564                         ixgbe_rx_queue_release_mbufs(rxq);
2565                         ixgbe_reset_rx_queue(adapter, rxq);
2566                 }
2567         }
2568 }
2569
2570 void
2571 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2572 {
2573         unsigned i;
2574
2575         PMD_INIT_FUNC_TRACE();
2576
2577         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2578                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2579                 dev->data->rx_queues[i] = NULL;
2580         }
2581         dev->data->nb_rx_queues = 0;
2582
2583         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2584                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2585                 dev->data->tx_queues[i] = NULL;
2586         }
2587         dev->data->nb_tx_queues = 0;
2588 }
2589
2590 /*********************************************************************
2591  *
2592  *  Device RX/TX init functions
2593  *
2594  **********************************************************************/
2595
2596 /**
2597  * Receive Side Scaling (RSS)
2598  * See section 7.1.2.8 in the following document:
2599  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2600  *
2601  * Principles:
2602  * The source and destination IP addresses of the IP header and the source
2603  * and destination ports of TCP/UDP headers, if any, of received packets are
2604  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2605  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2606  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2607  * RSS output index which is used as the RX queue index where to store the
2608  * received packets.
2609  * The following output is supplied in the RX write-back descriptor:
2610  *     - 32-bit result of the Microsoft RSS hash function,
2611  *     - 4-bit RSS type field.
2612  */
2613
2614 /*
2615  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2616  * Used as the default key.
2617  */
2618 static uint8_t rss_intel_key[40] = {
2619         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2620         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2621         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2622         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2623         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2624 };
2625
2626 static void
2627 ixgbe_rss_disable(struct rte_eth_dev *dev)
2628 {
2629         struct ixgbe_hw *hw;
2630         uint32_t mrqc;
2631         uint32_t mrqc_reg;
2632
2633         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2634         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2635         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2636         mrqc &= ~IXGBE_MRQC_RSSEN;
2637         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2638 }
2639
2640 static void
2641 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2642 {
2643         uint8_t  *hash_key;
2644         uint32_t mrqc;
2645         uint32_t rss_key;
2646         uint64_t rss_hf;
2647         uint16_t i;
2648         uint32_t mrqc_reg;
2649         uint32_t rssrk_reg;
2650
2651         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2652         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2653
2654         hash_key = rss_conf->rss_key;
2655         if (hash_key != NULL) {
2656                 /* Fill in RSS hash key */
2657                 for (i = 0; i < 10; i++) {
2658                         rss_key  = hash_key[(i * 4)];
2659                         rss_key |= hash_key[(i * 4) + 1] << 8;
2660                         rss_key |= hash_key[(i * 4) + 2] << 16;
2661                         rss_key |= hash_key[(i * 4) + 3] << 24;
2662                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2663                 }
2664         }
2665
2666         /* Set configured hashing protocols in MRQC register */
2667         rss_hf = rss_conf->rss_hf;
2668         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2669         if (rss_hf & ETH_RSS_IPV4)
2670                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2671         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2672                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2673         if (rss_hf & ETH_RSS_IPV6)
2674                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2675         if (rss_hf & ETH_RSS_IPV6_EX)
2676                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2677         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2678                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2679         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2680                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2681         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2682                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2683         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2684                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2685         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2686                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2687         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2688 }
2689
2690 int
2691 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2692                           struct rte_eth_rss_conf *rss_conf)
2693 {
2694         struct ixgbe_hw *hw;
2695         uint32_t mrqc;
2696         uint64_t rss_hf;
2697         uint32_t mrqc_reg;
2698
2699         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2700
2701         if (!ixgbe_rss_update_sp(hw->mac.type)) {
2702                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2703                         "NIC.");
2704                 return -ENOTSUP;
2705         }
2706         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2707
2708         /*
2709          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2710          *     "RSS enabling cannot be done dynamically while it must be
2711          *      preceded by a software reset"
2712          * Before changing anything, first check that the update RSS operation
2713          * does not attempt to disable RSS, if RSS was enabled at
2714          * initialization time, or does not attempt to enable RSS, if RSS was
2715          * disabled at initialization time.
2716          */
2717         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2718         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2719         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2720                 if (rss_hf != 0) /* Enable RSS */
2721                         return -(EINVAL);
2722                 return 0; /* Nothing to do */
2723         }
2724         /* RSS enabled */
2725         if (rss_hf == 0) /* Disable RSS */
2726                 return -(EINVAL);
2727         ixgbe_hw_rss_hash_set(hw, rss_conf);
2728         return 0;
2729 }
2730
2731 int
2732 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2733                             struct rte_eth_rss_conf *rss_conf)
2734 {
2735         struct ixgbe_hw *hw;
2736         uint8_t *hash_key;
2737         uint32_t mrqc;
2738         uint32_t rss_key;
2739         uint64_t rss_hf;
2740         uint16_t i;
2741         uint32_t mrqc_reg;
2742         uint32_t rssrk_reg;
2743
2744         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2745         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2746         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2747         hash_key = rss_conf->rss_key;
2748         if (hash_key != NULL) {
2749                 /* Return RSS hash key */
2750                 for (i = 0; i < 10; i++) {
2751                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2752                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2753                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2754                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2755                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2756                 }
2757         }
2758
2759         /* Get RSS functions configured in MRQC register */
2760         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2761         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2762                 rss_conf->rss_hf = 0;
2763                 return 0;
2764         }
2765         rss_hf = 0;
2766         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2767                 rss_hf |= ETH_RSS_IPV4;
2768         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2769                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2770         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2771                 rss_hf |= ETH_RSS_IPV6;
2772         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2773                 rss_hf |= ETH_RSS_IPV6_EX;
2774         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2775                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2776         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2777                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2778         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2779                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2780         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2781                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2782         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2783                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2784         rss_conf->rss_hf = rss_hf;
2785         return 0;
2786 }
2787
2788 static void
2789 ixgbe_rss_configure(struct rte_eth_dev *dev)
2790 {
2791         struct rte_eth_rss_conf rss_conf;
2792         struct ixgbe_hw *hw;
2793         uint32_t reta;
2794         uint16_t i;
2795         uint16_t j;
2796         uint16_t sp_reta_size;
2797         uint32_t reta_reg;
2798
2799         PMD_INIT_FUNC_TRACE();
2800         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2801
2802         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2803
2804         /*
2805          * Fill in redirection table
2806          * The byte-swap is needed because NIC registers are in
2807          * little-endian order.
2808          */
2809         reta = 0;
2810         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2811                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2812
2813                 if (j == dev->data->nb_rx_queues)
2814                         j = 0;
2815                 reta = (reta << 8) | j;
2816                 if ((i & 3) == 3)
2817                         IXGBE_WRITE_REG(hw, reta_reg,
2818                                         rte_bswap32(reta));
2819         }
2820
2821         /*
2822          * Configure the RSS key and the RSS protocols used to compute
2823          * the RSS hash of input packets.
2824          */
2825         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2826         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2827                 ixgbe_rss_disable(dev);
2828                 return;
2829         }
2830         if (rss_conf.rss_key == NULL)
2831                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2832         ixgbe_hw_rss_hash_set(hw, &rss_conf);
2833 }
2834
2835 #define NUM_VFTA_REGISTERS 128
2836 #define NIC_RX_BUFFER_SIZE 0x200
2837 #define X550_RX_BUFFER_SIZE 0x180
2838
2839 static void
2840 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2841 {
2842         struct rte_eth_vmdq_dcb_conf *cfg;
2843         struct ixgbe_hw *hw;
2844         enum rte_eth_nb_pools num_pools;
2845         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2846         uint16_t pbsize;
2847         uint8_t nb_tcs; /* number of traffic classes */
2848         int i;
2849
2850         PMD_INIT_FUNC_TRACE();
2851         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2852         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2853         num_pools = cfg->nb_queue_pools;
2854         /* Check we have a valid number of pools */
2855         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2856                 ixgbe_rss_disable(dev);
2857                 return;
2858         }
2859         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2860         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2861
2862         /*
2863          * RXPBSIZE
2864          * split rx buffer up into sections, each for 1 traffic class
2865          */
2866         switch (hw->mac.type) {
2867         case ixgbe_mac_X550:
2868         case ixgbe_mac_X550EM_x:
2869                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2870                 break;
2871         default:
2872                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2873                 break;
2874         }
2875         for (i = 0 ; i < nb_tcs; i++) {
2876                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2877                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2878                 /* clear 10 bits. */
2879                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2880                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2881         }
2882         /* zero alloc all unused TCs */
2883         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2884                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2885                 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2886                 /* clear 10 bits. */
2887                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2888         }
2889
2890         /* MRQC: enable vmdq and dcb */
2891         mrqc = ((num_pools == ETH_16_POOLS) ? \
2892                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2893         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2894
2895         /* PFVTCTL: turn on virtualisation and set the default pool */
2896         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2897         if (cfg->enable_default_pool) {
2898                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2899         } else {
2900                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2901         }
2902
2903         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2904
2905         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2906         queue_mapping = 0;
2907         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2908                 /*
2909                  * mapping is done with 3 bits per priority,
2910                  * so shift by i*3 each time
2911                  */
2912                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2913
2914         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2915
2916         /* RTRPCS: DCB related */
2917         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2918
2919         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2920         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2921         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2922         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2923
2924         /* VFTA - enable all vlan filters */
2925         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2926                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2927         }
2928
2929         /* VFRE: pool enabling for receive - 16 or 32 */
2930         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2931                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2932
2933         /*
2934          * MPSAR - allow pools to read specific mac addresses
2935          * In this case, all pools should be able to read from mac addr 0
2936          */
2937         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2938         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2939
2940         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2941         for (i = 0; i < cfg->nb_pool_maps; i++) {
2942                 /* set vlan id in VF register and set the valid bit */
2943                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2944                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
2945                 /*
2946                  * Put the allowed pools in VFB reg. As we only have 16 or 32
2947                  * pools, we only need to use the first half of the register
2948                  * i.e. bits 0-31
2949                  */
2950                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2951         }
2952 }
2953
2954 /**
2955  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2956  * @hw: pointer to hardware structure
2957  * @dcb_config: pointer to ixgbe_dcb_config structure
2958  */
2959 static void
2960 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2961                struct ixgbe_dcb_config *dcb_config)
2962 {
2963         uint32_t reg;
2964         uint32_t q;
2965
2966         PMD_INIT_FUNC_TRACE();
2967         if (hw->mac.type != ixgbe_mac_82598EB) {
2968                 /* Disable the Tx desc arbiter so that MTQC can be changed */
2969                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2970                 reg |= IXGBE_RTTDCS_ARBDIS;
2971                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2972
2973                 /* Enable DCB for Tx with 8 TCs */
2974                 if (dcb_config->num_tcs.pg_tcs == 8) {
2975                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2976                 }
2977                 else {
2978                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2979                 }
2980                 if (dcb_config->vt_mode)
2981                     reg |= IXGBE_MTQC_VT_ENA;
2982                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2983
2984                 /* Disable drop for all queues */
2985                 for (q = 0; q < 128; q++)
2986                         IXGBE_WRITE_REG(hw, IXGBE_QDE,
2987                      (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2988
2989                 /* Enable the Tx desc arbiter */
2990                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2991                 reg &= ~IXGBE_RTTDCS_ARBDIS;
2992                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2993
2994                 /* Enable Security TX Buffer IFG for DCB */
2995                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2996                 reg |= IXGBE_SECTX_DCB;
2997                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2998         }
2999         return;
3000 }
3001
3002 /**
3003  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3004  * @dev: pointer to rte_eth_dev structure
3005  * @dcb_config: pointer to ixgbe_dcb_config structure
3006  */
3007 static void
3008 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3009                         struct ixgbe_dcb_config *dcb_config)
3010 {
3011         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3012                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3013         struct ixgbe_hw *hw =
3014                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3015
3016         PMD_INIT_FUNC_TRACE();
3017         if (hw->mac.type != ixgbe_mac_82598EB)
3018                 /*PF VF Transmit Enable*/
3019                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3020                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3021
3022         /*Configure general DCB TX parameters*/
3023         ixgbe_dcb_tx_hw_config(hw,dcb_config);
3024         return;
3025 }
3026
3027 static void
3028 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3029                         struct ixgbe_dcb_config *dcb_config)
3030 {
3031         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3032                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3033         struct ixgbe_dcb_tc_config *tc;
3034         uint8_t i,j;
3035
3036         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3037         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3038                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3039                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3040         }
3041         else {
3042                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3043                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3044         }
3045         /* User Priority to Traffic Class mapping */
3046         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3047                 j = vmdq_rx_conf->dcb_tc[i];
3048                 tc = &dcb_config->tc_config[j];
3049                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3050                                                 (uint8_t)(1 << j);
3051         }
3052 }
3053
3054 static void
3055 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3056                         struct ixgbe_dcb_config *dcb_config)
3057 {
3058         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3059                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3060         struct ixgbe_dcb_tc_config *tc;
3061         uint8_t i,j;
3062
3063         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3064         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3065                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3066                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3067         }
3068         else {
3069                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3070                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3071         }
3072
3073         /* User Priority to Traffic Class mapping */
3074         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3075                 j = vmdq_tx_conf->dcb_tc[i];
3076                 tc = &dcb_config->tc_config[j];
3077                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3078                                                 (uint8_t)(1 << j);
3079         }
3080         return;
3081 }
3082
3083 static void
3084 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3085                 struct ixgbe_dcb_config *dcb_config)
3086 {
3087         struct rte_eth_dcb_rx_conf *rx_conf =
3088                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3089         struct ixgbe_dcb_tc_config *tc;
3090         uint8_t i,j;
3091
3092         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3093         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3094
3095         /* User Priority to Traffic Class mapping */
3096         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3097                 j = rx_conf->dcb_tc[i];
3098                 tc = &dcb_config->tc_config[j];
3099                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3100                                                 (uint8_t)(1 << j);
3101         }
3102 }
3103
3104 static void
3105 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3106                 struct ixgbe_dcb_config *dcb_config)
3107 {
3108         struct rte_eth_dcb_tx_conf *tx_conf =
3109                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3110         struct ixgbe_dcb_tc_config *tc;
3111         uint8_t i,j;
3112
3113         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3114         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3115
3116         /* User Priority to Traffic Class mapping */
3117         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3118                 j = tx_conf->dcb_tc[i];
3119                 tc = &dcb_config->tc_config[j];
3120                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3121                                                 (uint8_t)(1 << j);
3122         }
3123 }
3124
3125 /**
3126  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3127  * @hw: pointer to hardware structure
3128  * @dcb_config: pointer to ixgbe_dcb_config structure
3129  */
3130 static void
3131 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3132                struct ixgbe_dcb_config *dcb_config)
3133 {
3134         uint32_t reg;
3135         uint32_t vlanctrl;
3136         uint8_t i;
3137
3138         PMD_INIT_FUNC_TRACE();
3139         /*
3140          * Disable the arbiter before changing parameters
3141          * (always enable recycle mode; WSP)
3142          */
3143         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3144         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3145
3146         if (hw->mac.type != ixgbe_mac_82598EB) {
3147                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3148                 if (dcb_config->num_tcs.pg_tcs == 4) {
3149                         if (dcb_config->vt_mode)
3150                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3151                                         IXGBE_MRQC_VMDQRT4TCEN;
3152                         else {
3153                                 /* no matter the mode is DCB or DCB_RSS, just
3154                                  * set the MRQE to RSSXTCEN. RSS is controlled
3155                                  * by RSS_FIELD
3156                                  */
3157                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3158                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3159                                         IXGBE_MRQC_RTRSS4TCEN;
3160                         }
3161                 }
3162                 if (dcb_config->num_tcs.pg_tcs == 8) {
3163                         if (dcb_config->vt_mode)
3164                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3165                                         IXGBE_MRQC_VMDQRT8TCEN;
3166                         else {
3167                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3168                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3169                                         IXGBE_MRQC_RTRSS8TCEN;
3170                         }
3171                 }
3172
3173                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3174         }
3175
3176         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3177         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3178         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3179         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3180
3181         /* VFTA - enable all vlan filters */
3182         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3183                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3184         }
3185
3186         /*
3187          * Configure Rx packet plane (recycle mode; WSP) and
3188          * enable arbiter
3189          */
3190         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3191         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3192
3193         return;
3194 }
3195
3196 static void
3197 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3198                         uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3199 {
3200         switch (hw->mac.type) {
3201         case ixgbe_mac_82598EB:
3202                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3203                 break;
3204         case ixgbe_mac_82599EB:
3205         case ixgbe_mac_X540:
3206         case ixgbe_mac_X550:
3207         case ixgbe_mac_X550EM_x:
3208                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3209                                                   tsa, map);
3210                 break;
3211         default:
3212                 break;
3213         }
3214 }
3215
3216 static void
3217 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3218                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3219 {
3220         switch (hw->mac.type) {
3221         case ixgbe_mac_82598EB:
3222                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3223                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3224                 break;
3225         case ixgbe_mac_82599EB:
3226         case ixgbe_mac_X540:
3227         case ixgbe_mac_X550:
3228         case ixgbe_mac_X550EM_x:
3229                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3230                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3231                 break;
3232         default:
3233                 break;
3234         }
3235 }
3236
3237 #define DCB_RX_CONFIG  1
3238 #define DCB_TX_CONFIG  1
3239 #define DCB_TX_PB      1024
3240 /**
3241  * ixgbe_dcb_hw_configure - Enable DCB and configure
3242  * general DCB in VT mode and non-VT mode parameters
3243  * @dev: pointer to rte_eth_dev structure
3244  * @dcb_config: pointer to ixgbe_dcb_config structure
3245  */
3246 static int
3247 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3248                         struct ixgbe_dcb_config *dcb_config)
3249 {
3250         int     ret = 0;
3251         uint8_t i,pfc_en,nb_tcs;
3252         uint16_t pbsize, rx_buffer_size;
3253         uint8_t config_dcb_rx = 0;
3254         uint8_t config_dcb_tx = 0;
3255         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3256         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3257         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3258         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3259         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3260         struct ixgbe_dcb_tc_config *tc;
3261         uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3262         struct ixgbe_hw *hw =
3263                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3264
3265         switch(dev->data->dev_conf.rxmode.mq_mode){
3266         case ETH_MQ_RX_VMDQ_DCB:
3267                 dcb_config->vt_mode = true;
3268                 if (hw->mac.type != ixgbe_mac_82598EB) {
3269                         config_dcb_rx = DCB_RX_CONFIG;
3270                         /*
3271                          *get dcb and VT rx configuration parameters
3272                          *from rte_eth_conf
3273                          */
3274                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3275                         /*Configure general VMDQ and DCB RX parameters*/
3276                         ixgbe_vmdq_dcb_configure(dev);
3277                 }
3278                 break;
3279         case ETH_MQ_RX_DCB:
3280         case ETH_MQ_RX_DCB_RSS:
3281                 dcb_config->vt_mode = false;
3282                 config_dcb_rx = DCB_RX_CONFIG;
3283                 /* Get dcb TX configuration parameters from rte_eth_conf */
3284                 ixgbe_dcb_rx_config(dev, dcb_config);
3285                 /*Configure general DCB RX parameters*/
3286                 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3287                 break;
3288         default:
3289                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3290                 break;
3291         }
3292         switch (dev->data->dev_conf.txmode.mq_mode) {
3293         case ETH_MQ_TX_VMDQ_DCB:
3294                 dcb_config->vt_mode = true;
3295                 config_dcb_tx = DCB_TX_CONFIG;
3296                 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3297                 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3298                 /*Configure general VMDQ and DCB TX parameters*/
3299                 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3300                 break;
3301
3302         case ETH_MQ_TX_DCB:
3303                 dcb_config->vt_mode = false;
3304                 config_dcb_tx = DCB_TX_CONFIG;
3305                 /*get DCB TX configuration parameters from rte_eth_conf*/
3306                 ixgbe_dcb_tx_config(dev, dcb_config);
3307                 /*Configure general DCB TX parameters*/
3308                 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3309                 break;
3310         default:
3311                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3312                 break;
3313         }
3314
3315         nb_tcs = dcb_config->num_tcs.pfc_tcs;
3316         /* Unpack map */
3317         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3318         if(nb_tcs == ETH_4_TCS) {
3319                 /* Avoid un-configured priority mapping to TC0 */
3320                 uint8_t j = 4;
3321                 uint8_t mask = 0xFF;
3322                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3323                         mask = (uint8_t)(mask & (~ (1 << map[i])));
3324                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3325                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3326                                 map[j++] = i;
3327                         mask >>= 1;
3328                 }
3329                 /* Re-configure 4 TCs BW */
3330                 for (i = 0; i < nb_tcs; i++) {
3331                         tc = &dcb_config->tc_config[i];
3332                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3333                                                 (uint8_t)(100 / nb_tcs);
3334                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3335                                                 (uint8_t)(100 / nb_tcs);
3336                 }
3337                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3338                         tc = &dcb_config->tc_config[i];
3339                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3340                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3341                 }
3342         }
3343
3344         switch (hw->mac.type) {
3345         case ixgbe_mac_X550:
3346         case ixgbe_mac_X550EM_x:
3347                 rx_buffer_size = X550_RX_BUFFER_SIZE;
3348                 break;
3349         default:
3350                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3351                 break;
3352         }
3353
3354         if(config_dcb_rx) {
3355                 /* Set RX buffer size */
3356                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3357                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3358                 for (i = 0 ; i < nb_tcs; i++) {
3359                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3360                 }
3361                 /* zero alloc all unused TCs */
3362                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3363                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3364                 }
3365         }
3366         if(config_dcb_tx) {
3367                 /* Only support an equally distributed Tx packet buffer strategy. */
3368                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3369                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3370                 for (i = 0; i < nb_tcs; i++) {
3371                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3372                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3373                 }
3374                 /* Clear unused TCs, if any, to zero buffer size*/
3375                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3376                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3377                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3378                 }
3379         }
3380
3381         /*Calculates traffic class credits*/
3382         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3383                                 IXGBE_DCB_TX_CONFIG);
3384         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3385                                 IXGBE_DCB_RX_CONFIG);
3386
3387         if(config_dcb_rx) {
3388                 /* Unpack CEE standard containers */
3389                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3390                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3391                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3392                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3393                 /* Configure PG(ETS) RX */
3394                 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3395         }
3396
3397         if(config_dcb_tx) {
3398                 /* Unpack CEE standard containers */
3399                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3400                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3401                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3402                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3403                 /* Configure PG(ETS) TX */
3404                 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3405         }
3406
3407         /*Configure queue statistics registers*/
3408         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3409
3410         /* Check if the PFC is supported */
3411         if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3412                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3413                 for (i = 0; i < nb_tcs; i++) {
3414                         /*
3415                         * If the TC count is 8,and the default high_water is 48,
3416                         * the low_water is 16 as default.
3417                         */
3418                         hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3419                         hw->fc.low_water[i] = pbsize / 4;
3420                         /* Enable pfc for this TC */
3421                         tc = &dcb_config->tc_config[i];
3422                         tc->pfc = ixgbe_dcb_pfc_enabled;
3423                 }
3424                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3425                 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3426                         pfc_en &= 0x0F;
3427                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3428         }
3429
3430         return ret;
3431 }
3432
3433 /**
3434  * ixgbe_configure_dcb - Configure DCB  Hardware
3435  * @dev: pointer to rte_eth_dev
3436  */
3437 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3438 {
3439         struct ixgbe_dcb_config *dcb_cfg =
3440                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3441         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3442
3443         PMD_INIT_FUNC_TRACE();
3444
3445         /* check support mq_mode for DCB */
3446         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3447             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3448             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3449                 return;
3450
3451         if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3452                 return;
3453
3454         /** Configure DCB hardware **/
3455         ixgbe_dcb_hw_configure(dev, dcb_cfg);
3456
3457         return;
3458 }
3459
3460 /*
3461  * VMDq only support for 10 GbE NIC.
3462  */
3463 static void
3464 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3465 {
3466         struct rte_eth_vmdq_rx_conf *cfg;
3467         struct ixgbe_hw *hw;
3468         enum rte_eth_nb_pools num_pools;
3469         uint32_t mrqc, vt_ctl, vlanctrl;
3470         uint32_t vmolr = 0;
3471         int i;
3472
3473         PMD_INIT_FUNC_TRACE();
3474         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3475         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3476         num_pools = cfg->nb_queue_pools;
3477
3478         ixgbe_rss_disable(dev);
3479
3480         /* MRQC: enable vmdq */
3481         mrqc = IXGBE_MRQC_VMDQEN;
3482         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3483
3484         /* PFVTCTL: turn on virtualisation and set the default pool */
3485         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3486         if (cfg->enable_default_pool)
3487                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3488         else
3489                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3490
3491         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3492
3493         for (i = 0; i < (int)num_pools; i++) {
3494                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3495                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3496         }
3497
3498         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3499         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3500         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3501         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3502
3503         /* VFTA - enable all vlan filters */
3504         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3505                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3506
3507         /* VFRE: pool enabling for receive - 64 */
3508         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3509         if (num_pools == ETH_64_POOLS)
3510                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3511
3512         /*
3513          * MPSAR - allow pools to read specific mac addresses
3514          * In this case, all pools should be able to read from mac addr 0
3515          */
3516         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3517         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3518
3519         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3520         for (i = 0; i < cfg->nb_pool_maps; i++) {
3521                 /* set vlan id in VF register and set the valid bit */
3522                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3523                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3524                 /*
3525                  * Put the allowed pools in VFB reg. As we only have 16 or 64
3526                  * pools, we only need to use the first half of the register
3527                  * i.e. bits 0-31
3528                  */
3529                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3530                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3531                                         (cfg->pool_map[i].pools & UINT32_MAX));
3532                 else
3533                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3534                                         ((cfg->pool_map[i].pools >> 32) \
3535                                         & UINT32_MAX));
3536
3537         }
3538
3539         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3540         if (cfg->enable_loop_back) {
3541                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3542                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3543                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3544         }
3545
3546         IXGBE_WRITE_FLUSH(hw);
3547 }
3548
3549 /*
3550  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3551  * @hw: pointer to hardware structure
3552  */
3553 static void
3554 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3555 {
3556         uint32_t reg;
3557         uint32_t q;
3558
3559         PMD_INIT_FUNC_TRACE();
3560         /*PF VF Transmit Enable*/
3561         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3562         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3563
3564         /* Disable the Tx desc arbiter so that MTQC can be changed */
3565         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3566         reg |= IXGBE_RTTDCS_ARBDIS;
3567         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3568
3569         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3570         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3571
3572         /* Disable drop for all queues */
3573         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3574                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3575                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3576
3577         /* Enable the Tx desc arbiter */
3578         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3579         reg &= ~IXGBE_RTTDCS_ARBDIS;
3580         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3581
3582         IXGBE_WRITE_FLUSH(hw);
3583
3584         return;
3585 }
3586
3587 static int __attribute__((cold))
3588 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3589 {
3590         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3591         uint64_t dma_addr;
3592         unsigned i;
3593
3594         /* Initialize software ring entries */
3595         for (i = 0; i < rxq->nb_rx_desc; i++) {
3596                 volatile union ixgbe_adv_rx_desc *rxd;
3597                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3598                 if (mbuf == NULL) {
3599                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3600                                      (unsigned) rxq->queue_id);
3601                         return (-ENOMEM);
3602                 }
3603
3604                 rte_mbuf_refcnt_set(mbuf, 1);
3605                 mbuf->next = NULL;
3606                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3607                 mbuf->nb_segs = 1;
3608                 mbuf->port = rxq->port_id;
3609
3610                 dma_addr =
3611                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3612                 rxd = &rxq->rx_ring[i];
3613                 rxd->read.hdr_addr = 0;
3614                 rxd->read.pkt_addr = dma_addr;
3615                 rxe[i].mbuf = mbuf;
3616         }
3617
3618         return 0;
3619 }
3620
3621 static int
3622 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3623 {
3624         struct ixgbe_hw *hw;
3625         uint32_t mrqc;
3626
3627         ixgbe_rss_configure(dev);
3628
3629         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3630
3631         /* MRQC: enable VF RSS */
3632         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3633         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3634         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3635         case ETH_64_POOLS:
3636                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3637                 break;
3638
3639         case ETH_32_POOLS:
3640                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3641                 break;
3642
3643         default:
3644                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3645                 return -EINVAL;
3646         }
3647
3648         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3649
3650         return 0;
3651 }
3652
3653 static int
3654 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3655 {
3656         struct ixgbe_hw *hw =
3657                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3658
3659         switch (RTE_ETH_DEV_SRIOV(dev).active) {
3660         case ETH_64_POOLS:
3661                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3662                         IXGBE_MRQC_VMDQEN);
3663                 break;
3664
3665         case ETH_32_POOLS:
3666                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3667                         IXGBE_MRQC_VMDQRT4TCEN);
3668                 break;
3669
3670         case ETH_16_POOLS:
3671                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3672                         IXGBE_MRQC_VMDQRT8TCEN);
3673                 break;
3674         default:
3675                 PMD_INIT_LOG(ERR,
3676                         "invalid pool number in IOV mode");
3677                 break;
3678         }
3679         return 0;
3680 }
3681
3682 static int
3683 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3684 {
3685         struct ixgbe_hw *hw =
3686                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3687
3688         if (hw->mac.type == ixgbe_mac_82598EB)
3689                 return 0;
3690
3691         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3692                 /*
3693                  * SRIOV inactive scheme
3694                  * any DCB/RSS w/o VMDq multi-queue setting
3695                  */
3696                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3697                 case ETH_MQ_RX_RSS:
3698                 case ETH_MQ_RX_DCB_RSS:
3699                 case ETH_MQ_RX_VMDQ_RSS:
3700                         ixgbe_rss_configure(dev);
3701                         break;
3702
3703                 case ETH_MQ_RX_VMDQ_DCB:
3704                         ixgbe_vmdq_dcb_configure(dev);
3705                         break;
3706
3707                 case ETH_MQ_RX_VMDQ_ONLY:
3708                         ixgbe_vmdq_rx_hw_configure(dev);
3709                         break;
3710
3711                 case ETH_MQ_RX_NONE:
3712                 default:
3713                         /* if mq_mode is none, disable rss mode.*/
3714                         ixgbe_rss_disable(dev);
3715                         break;
3716                 }
3717         } else {
3718                 /*
3719                  * SRIOV active scheme
3720                  * Support RSS together with VMDq & SRIOV
3721                  */
3722                 switch (dev->data->dev_conf.rxmode.mq_mode) {
3723                 case ETH_MQ_RX_RSS:
3724                 case ETH_MQ_RX_VMDQ_RSS:
3725                         ixgbe_config_vf_rss(dev);
3726                         break;
3727
3728                 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3729                 case ETH_MQ_RX_VMDQ_DCB:
3730                 case ETH_MQ_RX_VMDQ_DCB_RSS:
3731                         PMD_INIT_LOG(ERR,
3732                                 "Could not support DCB with VMDq & SRIOV");
3733                         return -1;
3734                 default:
3735                         ixgbe_config_vf_default(dev);
3736                         break;
3737                 }
3738         }
3739
3740         return 0;
3741 }
3742
3743 static int
3744 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3745 {
3746         struct ixgbe_hw *hw =
3747                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3748         uint32_t mtqc;
3749         uint32_t rttdcs;
3750
3751         if (hw->mac.type == ixgbe_mac_82598EB)
3752                 return 0;
3753
3754         /* disable arbiter before setting MTQC */
3755         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3756         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3757         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3758
3759         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3760                 /*
3761                  * SRIOV inactive scheme
3762                  * any DCB w/o VMDq multi-queue setting
3763                  */
3764                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3765                         ixgbe_vmdq_tx_hw_configure(hw);
3766                 else {
3767                         mtqc = IXGBE_MTQC_64Q_1PB;
3768                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3769                 }
3770         } else {
3771                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3772
3773                 /*
3774                  * SRIOV active scheme
3775                  * FIXME if support DCB together with VMDq & SRIOV
3776                  */
3777                 case ETH_64_POOLS:
3778                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3779                         break;
3780                 case ETH_32_POOLS:
3781                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3782                         break;
3783                 case ETH_16_POOLS:
3784                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3785                                 IXGBE_MTQC_8TC_8TQ;
3786                         break;
3787                 default:
3788                         mtqc = IXGBE_MTQC_64Q_1PB;
3789                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3790                 }
3791                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3792         }
3793
3794         /* re-enable arbiter */
3795         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3796         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3797
3798         return 0;
3799 }
3800
3801 /**
3802  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3803  *
3804  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3805  * spec rev. 3.0 chapter 8.2.3.8.13.
3806  *
3807  * @pool Memory pool of the Rx queue
3808  */
3809 static inline uint32_t
3810 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3811 {
3812         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3813
3814         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3815         uint16_t maxdesc =
3816                 IPV4_MAX_PKT_LEN /
3817                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3818
3819         if (maxdesc >= 16)
3820                 return IXGBE_RSCCTL_MAXDESC_16;
3821         else if (maxdesc >= 8)
3822                 return IXGBE_RSCCTL_MAXDESC_8;
3823         else if (maxdesc >= 4)
3824                 return IXGBE_RSCCTL_MAXDESC_4;
3825         else
3826                 return IXGBE_RSCCTL_MAXDESC_1;
3827 }
3828
3829 /**
3830  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3831  * interrupt
3832  *
3833  * (Taken from FreeBSD tree)
3834  * (yes this is all very magic and confusing :)
3835  *
3836  * @dev port handle
3837  * @entry the register array entry
3838  * @vector the MSIX vector for this queue
3839  * @type RX/TX/MISC
3840  */
3841 static void
3842 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3843 {
3844         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3845         u32 ivar, index;
3846
3847         vector |= IXGBE_IVAR_ALLOC_VAL;
3848
3849         switch (hw->mac.type) {
3850
3851         case ixgbe_mac_82598EB:
3852                 if (type == -1)
3853                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3854                 else
3855                         entry += (type * 64);
3856                 index = (entry >> 2) & 0x1F;
3857                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3858                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3859                 ivar |= (vector << (8 * (entry & 0x3)));
3860                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3861                 break;
3862
3863         case ixgbe_mac_82599EB:
3864         case ixgbe_mac_X540:
3865                 if (type == -1) { /* MISC IVAR */
3866                         index = (entry & 1) * 8;
3867                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3868                         ivar &= ~(0xFF << index);
3869                         ivar |= (vector << index);
3870                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3871                 } else {        /* RX/TX IVARS */
3872                         index = (16 * (entry & 1)) + (8 * type);
3873                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3874                         ivar &= ~(0xFF << index);
3875                         ivar |= (vector << index);
3876                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3877                 }
3878
3879                 break;
3880
3881         default:
3882                 break;
3883         }
3884 }
3885
3886 void __attribute__((cold))
3887 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3888 {
3889         uint16_t i, rx_using_sse;
3890         struct ixgbe_adapter *adapter =
3891                 (struct ixgbe_adapter *)dev->data->dev_private;
3892
3893         /*
3894          * In order to allow Vector Rx there are a few configuration
3895          * conditions to be met and Rx Bulk Allocation should be allowed.
3896          */
3897         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3898             !adapter->rx_bulk_alloc_allowed) {
3899                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3900                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
3901                                     "not enabled",
3902                              dev->data->port_id);
3903
3904                 adapter->rx_vec_allowed = false;
3905         }
3906
3907         /*
3908          * Initialize the appropriate LRO callback.
3909          *
3910          * If all queues satisfy the bulk allocation preconditions
3911          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3912          * Otherwise use a single allocation version.
3913          */
3914         if (dev->data->lro) {
3915                 if (adapter->rx_bulk_alloc_allowed) {
3916                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3917                                            "allocation version");
3918                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3919                 } else {
3920                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3921                                            "allocation version");
3922                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3923                 }
3924         } else if (dev->data->scattered_rx) {
3925                 /*
3926                  * Set the non-LRO scattered callback: there are Vector and
3927                  * single allocation versions.
3928                  */
3929                 if (adapter->rx_vec_allowed) {
3930                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3931                                             "callback (port=%d).",
3932                                      dev->data->port_id);
3933
3934                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3935                 } else if (adapter->rx_bulk_alloc_allowed) {
3936                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3937                                            "allocation callback (port=%d).",
3938                                      dev->data->port_id);
3939                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3940                 } else {
3941                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3942                                             "single allocation) "
3943                                             "Scattered Rx callback "
3944                                             "(port=%d).",
3945                                      dev->data->port_id);
3946
3947                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3948                 }
3949         /*
3950          * Below we set "simple" callbacks according to port/queues parameters.
3951          * If parameters allow we are going to choose between the following
3952          * callbacks:
3953          *    - Vector
3954          *    - Bulk Allocation
3955          *    - Single buffer allocation (the simplest one)
3956          */
3957         } else if (adapter->rx_vec_allowed) {
3958                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
3959                                     "burst size no less than %d (port=%d).",
3960                              RTE_IXGBE_DESCS_PER_LOOP,
3961                              dev->data->port_id);
3962
3963                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
3964         } else if (adapter->rx_bulk_alloc_allowed) {
3965                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
3966                                     "satisfied. Rx Burst Bulk Alloc function "
3967                                     "will be used on port=%d.",
3968                              dev->data->port_id);
3969
3970                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
3971         } else {
3972                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
3973                                     "satisfied, or Scattered Rx is requested "
3974                                     "(port=%d).",
3975                              dev->data->port_id);
3976
3977                 dev->rx_pkt_burst = ixgbe_recv_pkts;
3978         }
3979
3980         /* Propagate information about RX function choice through all queues. */
3981
3982         rx_using_sse =
3983                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
3984                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
3985
3986         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3987                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3988                 rxq->rx_using_sse = rx_using_sse;
3989         }
3990 }
3991
3992 /**
3993  * ixgbe_set_rsc - configure RSC related port HW registers
3994  *
3995  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
3996  * of 82599 Spec (x540 configuration is virtually the same).
3997  *
3998  * @dev port handle
3999  *
4000  * Returns 0 in case of success or a non-zero error code
4001  */
4002 static int
4003 ixgbe_set_rsc(struct rte_eth_dev *dev)
4004 {
4005         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4006         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4007         struct rte_eth_dev_info dev_info = { 0 };
4008         bool rsc_capable = false;
4009         uint16_t i;
4010         uint32_t rdrxctl;
4011
4012         /* Sanity check */
4013         dev->dev_ops->dev_infos_get(dev, &dev_info);
4014         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4015                 rsc_capable = true;
4016
4017         if (!rsc_capable && rx_conf->enable_lro) {
4018                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4019                                    "support it");
4020                 return -EINVAL;
4021         }
4022
4023         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4024
4025         if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4026                 /*
4027                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4028                  * 3.0 RSC configuration requires HW CRC stripping being
4029                  * enabled. If user requested both HW CRC stripping off
4030                  * and RSC on - return an error.
4031                  */
4032                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4033                                     "is disabled");
4034                 return -EINVAL;
4035         }
4036
4037         /* RFCTL configuration  */
4038         if (rsc_capable) {
4039                 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4040                 if (rx_conf->enable_lro)
4041                         /*
4042                          * Since NFS packets coalescing is not supported - clear
4043                          * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4044                          * enabled.
4045                          */
4046                         rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4047                                    IXGBE_RFCTL_NFSR_DIS);
4048                 else
4049                         rfctl |= IXGBE_RFCTL_RSC_DIS;
4050
4051                 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4052         }
4053
4054         /* If LRO hasn't been requested - we are done here. */
4055         if (!rx_conf->enable_lro)
4056                 return 0;
4057
4058         /* Set RDRXCTL.RSCACKC bit */
4059         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4060         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4061         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4062
4063         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4064         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4065                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4066                 uint32_t srrctl =
4067                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4068                 uint32_t rscctl =
4069                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4070                 uint32_t psrtype =
4071                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4072                 uint32_t eitr =
4073                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4074
4075                 /*
4076                  * ixgbe PMD doesn't support header-split at the moment.
4077                  *
4078                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4079                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4080                  * should be configured even if header split is not
4081                  * enabled. We will configure it 128 bytes following the
4082                  * recommendation in the spec.
4083                  */
4084                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4085                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4086                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4087
4088                 /*
4089                  * TODO: Consider setting the Receive Descriptor Minimum
4090                  * Threshold Size for an RSC case. This is not an obviously
4091                  * beneficiary option but the one worth considering...
4092                  */
4093
4094                 rscctl |= IXGBE_RSCCTL_RSCEN;
4095                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4096                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4097
4098                 /*
4099                  * RSC: Set ITR interval corresponding to 2K ints/s.
4100                  *
4101                  * Full-sized RSC aggregations for a 10Gb/s link will
4102                  * arrive at about 20K aggregation/s rate.
4103                  *
4104                  * 2K inst/s rate will make only 10% of the
4105                  * aggregations to be closed due to the interrupt timer
4106                  * expiration for a streaming at wire-speed case.
4107                  *
4108                  * For a sparse streaming case this setting will yield
4109                  * at most 500us latency for a single RSC aggregation.
4110                  */
4111                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4112                 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4113
4114                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4115                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4116                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4117                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4118
4119                 /*
4120                  * RSC requires the mapping of the queue to the
4121                  * interrupt vector.
4122                  */
4123                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4124         }
4125
4126         dev->data->lro = 1;
4127
4128         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4129
4130         return 0;
4131 }
4132
4133 /*
4134  * Initializes Receive Unit.
4135  */
4136 int __attribute__((cold))
4137 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4138 {
4139         struct ixgbe_hw     *hw;
4140         struct ixgbe_rx_queue *rxq;
4141         uint64_t bus_addr;
4142         uint32_t rxctrl;
4143         uint32_t fctrl;
4144         uint32_t hlreg0;
4145         uint32_t maxfrs;
4146         uint32_t srrctl;
4147         uint32_t rdrxctl;
4148         uint32_t rxcsum;
4149         uint16_t buf_size;
4150         uint16_t i;
4151         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4152         int rc;
4153
4154         PMD_INIT_FUNC_TRACE();
4155         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4156
4157         /*
4158          * Make sure receives are disabled while setting
4159          * up the RX context (registers, descriptor rings, etc.).
4160          */
4161         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4162         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4163
4164         /* Enable receipt of broadcasted frames */
4165         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4166         fctrl |= IXGBE_FCTRL_BAM;
4167         fctrl |= IXGBE_FCTRL_DPF;
4168         fctrl |= IXGBE_FCTRL_PMCF;
4169         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4170
4171         /*
4172          * Configure CRC stripping, if any.
4173          */
4174         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4175         if (rx_conf->hw_strip_crc)
4176                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4177         else
4178                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4179
4180         /*
4181          * Configure jumbo frame support, if any.
4182          */
4183         if (rx_conf->jumbo_frame == 1) {
4184                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4185                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4186                 maxfrs &= 0x0000FFFF;
4187                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4188                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4189         } else
4190                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4191
4192         /*
4193          * If loopback mode is configured for 82599, set LPBK bit.
4194          */
4195         if (hw->mac.type == ixgbe_mac_82599EB &&
4196                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4197                 hlreg0 |= IXGBE_HLREG0_LPBK;
4198         else
4199                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4200
4201         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4202
4203         /* Setup RX queues */
4204         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4205                 rxq = dev->data->rx_queues[i];
4206
4207                 /*
4208                  * Reset crc_len in case it was changed after queue setup by a
4209                  * call to configure.
4210                  */
4211                 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4212
4213                 /* Setup the Base and Length of the Rx Descriptor Rings */
4214                 bus_addr = rxq->rx_ring_phys_addr;
4215                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4216                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4217                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4218                                 (uint32_t)(bus_addr >> 32));
4219                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4220                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4221                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4222                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4223
4224                 /* Configure the SRRCTL register */
4225 #ifdef RTE_HEADER_SPLIT_ENABLE
4226                 /*
4227                  * Configure Header Split
4228                  */
4229                 if (rx_conf->header_split) {
4230                         if (hw->mac.type == ixgbe_mac_82599EB) {
4231                                 /* Must setup the PSRTYPE register */
4232                                 uint32_t psrtype;
4233                                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4234                                         IXGBE_PSRTYPE_UDPHDR   |
4235                                         IXGBE_PSRTYPE_IPV4HDR  |
4236                                         IXGBE_PSRTYPE_IPV6HDR;
4237                                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4238                         }
4239                         srrctl = ((rx_conf->split_hdr_size <<
4240                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4241                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4242                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4243                 } else
4244 #endif
4245                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4246
4247                 /* Set if packets are dropped when no descriptors available */
4248                 if (rxq->drop_en)
4249                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4250
4251                 /*
4252                  * Configure the RX buffer size in the BSIZEPACKET field of
4253                  * the SRRCTL register of the queue.
4254                  * The value is in 1 KB resolution. Valid values can be from
4255                  * 1 KB to 16 KB.
4256                  */
4257                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4258                         RTE_PKTMBUF_HEADROOM);
4259                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4260                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4261
4262                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4263
4264                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4265                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4266
4267                 /* It adds dual VLAN length for supporting dual VLAN */
4268                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4269                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4270                         dev->data->scattered_rx = 1;
4271         }
4272
4273         if (rx_conf->enable_scatter)
4274                 dev->data->scattered_rx = 1;
4275
4276         /*
4277          * Device configured with multiple RX queues.
4278          */
4279         ixgbe_dev_mq_rx_configure(dev);
4280
4281         /*
4282          * Setup the Checksum Register.
4283          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4284          * Enable IP/L4 checkum computation by hardware if requested to do so.
4285          */
4286         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4287         rxcsum |= IXGBE_RXCSUM_PCSD;
4288         if (rx_conf->hw_ip_checksum)
4289                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4290         else
4291                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4292
4293         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4294
4295         if (hw->mac.type == ixgbe_mac_82599EB ||
4296             hw->mac.type == ixgbe_mac_X540) {
4297                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4298                 if (rx_conf->hw_strip_crc)
4299                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4300                 else
4301                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4302                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4303                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4304         }
4305
4306         rc = ixgbe_set_rsc(dev);
4307         if (rc)
4308                 return rc;
4309
4310         ixgbe_set_rx_function(dev);
4311
4312         return 0;
4313 }
4314
4315 /*
4316  * Initializes Transmit Unit.
4317  */
4318 void __attribute__((cold))
4319 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4320 {
4321         struct ixgbe_hw     *hw;
4322         struct ixgbe_tx_queue *txq;
4323         uint64_t bus_addr;
4324         uint32_t hlreg0;
4325         uint32_t txctrl;
4326         uint16_t i;
4327
4328         PMD_INIT_FUNC_TRACE();
4329         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4330
4331         /* Enable TX CRC (checksum offload requirement) and hw padding
4332          * (TSO requirement) */
4333         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4334         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4335         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4336
4337         /* Setup the Base and Length of the Tx Descriptor Rings */
4338         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4339                 txq = dev->data->tx_queues[i];
4340
4341                 bus_addr = txq->tx_ring_phys_addr;
4342                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4343                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4344                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4345                                 (uint32_t)(bus_addr >> 32));
4346                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4347                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4348                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4349                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4350                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4351
4352                 /*
4353                  * Disable Tx Head Writeback RO bit, since this hoses
4354                  * bookkeeping if things aren't delivered in order.
4355                  */
4356                 switch (hw->mac.type) {
4357                         case ixgbe_mac_82598EB:
4358                                 txctrl = IXGBE_READ_REG(hw,
4359                                                         IXGBE_DCA_TXCTRL(txq->reg_idx));
4360                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4361                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4362                                                 txctrl);
4363                                 break;
4364
4365                         case ixgbe_mac_82599EB:
4366                         case ixgbe_mac_X540:
4367                         case ixgbe_mac_X550:
4368                         case ixgbe_mac_X550EM_x:
4369                         default:
4370                                 txctrl = IXGBE_READ_REG(hw,
4371                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4372                                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4373                                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4374                                                 txctrl);
4375                                 break;
4376                 }
4377         }
4378
4379         /* Device configured with multiple TX queues. */
4380         ixgbe_dev_mq_tx_configure(dev);
4381 }
4382
4383 /*
4384  * Set up link for 82599 loopback mode Tx->Rx.
4385  */
4386 static inline void __attribute__((cold))
4387 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4388 {
4389         PMD_INIT_FUNC_TRACE();
4390
4391         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4392                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4393                                 IXGBE_SUCCESS) {
4394                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4395                         /* ignore error */
4396                         return;
4397                 }
4398         }
4399
4400         /* Restart link */
4401         IXGBE_WRITE_REG(hw,
4402                         IXGBE_AUTOC,
4403                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4404         ixgbe_reset_pipeline_82599(hw);
4405
4406         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4407         msec_delay(50);
4408 }
4409
4410
4411 /*
4412  * Start Transmit and Receive Units.
4413  */
4414 int __attribute__((cold))
4415 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4416 {
4417         struct ixgbe_hw     *hw;
4418         struct ixgbe_tx_queue *txq;
4419         struct ixgbe_rx_queue *rxq;
4420         uint32_t txdctl;
4421         uint32_t dmatxctl;
4422         uint32_t rxctrl;
4423         uint16_t i;
4424         int ret = 0;
4425
4426         PMD_INIT_FUNC_TRACE();
4427         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4428
4429         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4430                 txq = dev->data->tx_queues[i];
4431                 /* Setup Transmit Threshold Registers */
4432                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4433                 txdctl |= txq->pthresh & 0x7F;
4434                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4435                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4436                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4437         }
4438
4439         if (hw->mac.type != ixgbe_mac_82598EB) {
4440                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4441                 dmatxctl |= IXGBE_DMATXCTL_TE;
4442                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4443         }
4444
4445         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4446                 txq = dev->data->tx_queues[i];
4447                 if (!txq->tx_deferred_start) {
4448                         ret = ixgbe_dev_tx_queue_start(dev, i);
4449                         if (ret < 0)
4450                                 return ret;
4451                 }
4452         }
4453
4454         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4455                 rxq = dev->data->rx_queues[i];
4456                 if (!rxq->rx_deferred_start) {
4457                         ret = ixgbe_dev_rx_queue_start(dev, i);
4458                         if (ret < 0)
4459                                 return ret;
4460                 }
4461         }
4462
4463         /* Enable Receive engine */
4464         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4465         if (hw->mac.type == ixgbe_mac_82598EB)
4466                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4467         rxctrl |= IXGBE_RXCTRL_RXEN;
4468         hw->mac.ops.enable_rx_dma(hw, rxctrl);
4469
4470         /* If loopback mode is enabled for 82599, set up the link accordingly */
4471         if (hw->mac.type == ixgbe_mac_82599EB &&
4472                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4473                 ixgbe_setup_loopback_link_82599(hw);
4474
4475         return 0;
4476 }
4477
4478 /*
4479  * Start Receive Units for specified queue.
4480  */
4481 int __attribute__((cold))
4482 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4483 {
4484         struct ixgbe_hw     *hw;
4485         struct ixgbe_rx_queue *rxq;
4486         uint32_t rxdctl;
4487         int poll_ms;
4488
4489         PMD_INIT_FUNC_TRACE();
4490         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4491
4492         if (rx_queue_id < dev->data->nb_rx_queues) {
4493                 rxq = dev->data->rx_queues[rx_queue_id];
4494
4495                 /* Allocate buffers for descriptor rings */
4496                 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4497                         PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4498                                      rx_queue_id);
4499                         return -1;
4500                 }
4501                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4502                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4503                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4504
4505                 /* Wait until RX Enable ready */
4506                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4507                 do {
4508                         rte_delay_ms(1);
4509                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4510                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4511                 if (!poll_ms)
4512                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4513                                      rx_queue_id);
4514                 rte_wmb();
4515                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4516                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4517         } else
4518                 return -1;
4519
4520         return 0;
4521 }
4522
4523 /*
4524  * Stop Receive Units for specified queue.
4525  */
4526 int __attribute__((cold))
4527 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4528 {
4529         struct ixgbe_hw     *hw;
4530         struct ixgbe_adapter *adapter =
4531                 (struct ixgbe_adapter *)dev->data->dev_private;
4532         struct ixgbe_rx_queue *rxq;
4533         uint32_t rxdctl;
4534         int poll_ms;
4535
4536         PMD_INIT_FUNC_TRACE();
4537         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4538
4539         if (rx_queue_id < dev->data->nb_rx_queues) {
4540                 rxq = dev->data->rx_queues[rx_queue_id];
4541
4542                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4543                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4544                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4545
4546                 /* Wait until RX Enable ready */
4547                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4548                 do {
4549                         rte_delay_ms(1);
4550                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4551                 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4552                 if (!poll_ms)
4553                         PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4554                                      rx_queue_id);
4555
4556                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4557
4558                 ixgbe_rx_queue_release_mbufs(rxq);
4559                 ixgbe_reset_rx_queue(adapter, rxq);
4560         } else
4561                 return -1;
4562
4563         return 0;
4564 }
4565
4566
4567 /*
4568  * Start Transmit Units for specified queue.
4569  */
4570 int __attribute__((cold))
4571 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4572 {
4573         struct ixgbe_hw     *hw;
4574         struct ixgbe_tx_queue *txq;
4575         uint32_t txdctl;
4576         int poll_ms;
4577
4578         PMD_INIT_FUNC_TRACE();
4579         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4580
4581         if (tx_queue_id < dev->data->nb_tx_queues) {
4582                 txq = dev->data->tx_queues[tx_queue_id];
4583                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4584                 txdctl |= IXGBE_TXDCTL_ENABLE;
4585                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4586
4587                 /* Wait until TX Enable ready */
4588                 if (hw->mac.type == ixgbe_mac_82599EB) {
4589                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4590                         do {
4591                                 rte_delay_ms(1);
4592                                 txdctl = IXGBE_READ_REG(hw,
4593                                         IXGBE_TXDCTL(txq->reg_idx));
4594                         } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4595                         if (!poll_ms)
4596                                 PMD_INIT_LOG(ERR, "Could not enable "
4597                                              "Tx Queue %d", tx_queue_id);
4598                 }
4599                 rte_wmb();
4600                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4601                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4602         } else
4603                 return -1;
4604
4605         return 0;
4606 }
4607
4608 /*
4609  * Stop Transmit Units for specified queue.
4610  */
4611 int __attribute__((cold))
4612 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4613 {
4614         struct ixgbe_hw     *hw;
4615         struct ixgbe_tx_queue *txq;
4616         uint32_t txdctl;
4617         uint32_t txtdh, txtdt;
4618         int poll_ms;
4619
4620         PMD_INIT_FUNC_TRACE();
4621         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4622
4623         if (tx_queue_id < dev->data->nb_tx_queues) {
4624                 txq = dev->data->tx_queues[tx_queue_id];
4625
4626                 /* Wait until TX queue is empty */
4627                 if (hw->mac.type == ixgbe_mac_82599EB) {
4628                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4629                         do {
4630                                 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4631                                 txtdh = IXGBE_READ_REG(hw,
4632                                                 IXGBE_TDH(txq->reg_idx));
4633                                 txtdt = IXGBE_READ_REG(hw,
4634                                                 IXGBE_TDT(txq->reg_idx));
4635                         } while (--poll_ms && (txtdh != txtdt));
4636                         if (!poll_ms)
4637                                 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4638                                              "when stopping.", tx_queue_id);
4639                 }
4640
4641                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4642                 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4643                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4644
4645                 /* Wait until TX Enable ready */
4646                 if (hw->mac.type == ixgbe_mac_82599EB) {
4647                         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4648                         do {
4649                                 rte_delay_ms(1);
4650                                 txdctl = IXGBE_READ_REG(hw,
4651                                                 IXGBE_TXDCTL(txq->reg_idx));
4652                         } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4653                         if (!poll_ms)
4654                                 PMD_INIT_LOG(ERR, "Could not disable "
4655                                              "Tx Queue %d", tx_queue_id);
4656                 }
4657
4658                 if (txq->ops != NULL) {
4659                         txq->ops->release_mbufs(txq);
4660                         txq->ops->reset(txq);
4661                 }
4662         } else
4663                 return -1;
4664
4665         return 0;
4666 }
4667
4668 void
4669 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4670         struct rte_eth_rxq_info *qinfo)
4671 {
4672         struct ixgbe_rx_queue *rxq;
4673
4674         rxq = dev->data->rx_queues[queue_id];
4675
4676         qinfo->mp = rxq->mb_pool;
4677         qinfo->scattered_rx = dev->data->scattered_rx;
4678         qinfo->nb_desc = rxq->nb_rx_desc;
4679
4680         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4681         qinfo->conf.rx_drop_en = rxq->drop_en;
4682         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4683 }
4684
4685 void
4686 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4687         struct rte_eth_txq_info *qinfo)
4688 {
4689         struct ixgbe_tx_queue *txq;
4690
4691         txq = dev->data->tx_queues[queue_id];
4692
4693         qinfo->nb_desc = txq->nb_tx_desc;
4694
4695         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4696         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4697         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4698
4699         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4700         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4701         qinfo->conf.txq_flags = txq->txq_flags;
4702         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4703 }
4704
4705 /*
4706  * [VF] Initializes Receive Unit.
4707  */
4708 int __attribute__((cold))
4709 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4710 {
4711         struct ixgbe_hw     *hw;
4712         struct ixgbe_rx_queue *rxq;
4713         uint64_t bus_addr;
4714         uint32_t srrctl, psrtype = 0;
4715         uint16_t buf_size;
4716         uint16_t i;
4717         int ret;
4718
4719         PMD_INIT_FUNC_TRACE();
4720         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4721
4722         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4723                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4724                         "it should be power of 2");
4725                 return -1;
4726         }
4727
4728         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4729                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4730                         "it should be equal to or less than %d",
4731                         hw->mac.max_rx_queues);
4732                 return -1;
4733         }
4734
4735         /*
4736          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4737          * disables the VF receipt of packets if the PF MTU is > 1500.
4738          * This is done to deal with 82599 limitations that imposes
4739          * the PF and all VFs to share the same MTU.
4740          * Then, the PF driver enables again the VF receipt of packet when
4741          * the VF driver issues a IXGBE_VF_SET_LPE request.
4742          * In the meantime, the VF device cannot be used, even if the VF driver
4743          * and the Guest VM network stack are ready to accept packets with a
4744          * size up to the PF MTU.
4745          * As a work-around to this PF behaviour, force the call to
4746          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4747          * VF packets received can work in all cases.
4748          */
4749         ixgbevf_rlpml_set_vf(hw,
4750                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4751
4752         /* Setup RX queues */
4753         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4754                 rxq = dev->data->rx_queues[i];
4755
4756                 /* Allocate buffers for descriptor rings */
4757                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4758                 if (ret)
4759                         return ret;
4760
4761                 /* Setup the Base and Length of the Rx Descriptor Rings */
4762                 bus_addr = rxq->rx_ring_phys_addr;
4763
4764                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4765                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4766                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4767                                 (uint32_t)(bus_addr >> 32));
4768                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4769                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4770                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4771                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4772
4773
4774                 /* Configure the SRRCTL register */
4775 #ifdef RTE_HEADER_SPLIT_ENABLE
4776                 /*
4777                  * Configure Header Split
4778                  */
4779                 if (dev->data->dev_conf.rxmode.header_split) {
4780                         srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4781                                 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4782                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
4783                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4784                 } else
4785 #endif
4786                         srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4787
4788                 /* Set if packets are dropped when no descriptors available */
4789                 if (rxq->drop_en)
4790                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4791
4792                 /*
4793                  * Configure the RX buffer size in the BSIZEPACKET field of
4794                  * the SRRCTL register of the queue.
4795                  * The value is in 1 KB resolution. Valid values can be from
4796                  * 1 KB to 16 KB.
4797                  */
4798                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4799                         RTE_PKTMBUF_HEADROOM);
4800                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4801                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4802
4803                 /*
4804                  * VF modification to write virtual function SRRCTL register
4805                  */
4806                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4807
4808                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4809                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4810
4811                 if (dev->data->dev_conf.rxmode.enable_scatter ||
4812                     /* It adds dual VLAN length for supporting dual VLAN */
4813                     (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4814                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4815                         if (!dev->data->scattered_rx)
4816                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4817                         dev->data->scattered_rx = 1;
4818                 }
4819         }
4820
4821 #ifdef RTE_HEADER_SPLIT_ENABLE
4822         if (dev->data->dev_conf.rxmode.header_split)
4823                 /* Must setup the PSRTYPE register */
4824                 psrtype = IXGBE_PSRTYPE_TCPHDR |
4825                         IXGBE_PSRTYPE_UDPHDR   |
4826                         IXGBE_PSRTYPE_IPV4HDR  |
4827                         IXGBE_PSRTYPE_IPV6HDR;
4828 #endif
4829
4830         /* Set RQPL for VF RSS according to max Rx queue */
4831         psrtype |= (dev->data->nb_rx_queues >> 1) <<
4832                 IXGBE_PSRTYPE_RQPL_SHIFT;
4833         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4834
4835         ixgbe_set_rx_function(dev);
4836
4837         return 0;
4838 }
4839
4840 /*
4841  * [VF] Initializes Transmit Unit.
4842  */
4843 void __attribute__((cold))
4844 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4845 {
4846         struct ixgbe_hw     *hw;
4847         struct ixgbe_tx_queue *txq;
4848         uint64_t bus_addr;
4849         uint32_t txctrl;
4850         uint16_t i;
4851
4852         PMD_INIT_FUNC_TRACE();
4853         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4854
4855         /* Setup the Base and Length of the Tx Descriptor Rings */
4856         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4857                 txq = dev->data->tx_queues[i];
4858                 bus_addr = txq->tx_ring_phys_addr;
4859                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4860                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4861                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4862                                 (uint32_t)(bus_addr >> 32));
4863                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4864                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4865                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4866                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4867                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4868
4869                 /*
4870                  * Disable Tx Head Writeback RO bit, since this hoses
4871                  * bookkeeping if things aren't delivered in order.
4872                  */
4873                 txctrl = IXGBE_READ_REG(hw,
4874                                 IXGBE_VFDCA_TXCTRL(i));
4875                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4876                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4877                                 txctrl);
4878         }
4879 }
4880
4881 /*
4882  * [VF] Start Transmit and Receive Units.
4883  */
4884 void __attribute__((cold))
4885 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4886 {
4887         struct ixgbe_hw     *hw;
4888         struct ixgbe_tx_queue *txq;
4889         struct ixgbe_rx_queue *rxq;
4890         uint32_t txdctl;
4891         uint32_t rxdctl;
4892         uint16_t i;
4893         int poll_ms;
4894
4895         PMD_INIT_FUNC_TRACE();
4896         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4897
4898         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4899                 txq = dev->data->tx_queues[i];
4900                 /* Setup Transmit Threshold Registers */
4901                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4902                 txdctl |= txq->pthresh & 0x7F;
4903                 txdctl |= ((txq->hthresh & 0x7F) << 8);
4904                 txdctl |= ((txq->wthresh & 0x7F) << 16);
4905                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4906         }
4907
4908         for (i = 0; i < dev->data->nb_tx_queues; i++) {
4909
4910                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4911                 txdctl |= IXGBE_TXDCTL_ENABLE;
4912                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4913
4914                 poll_ms = 10;
4915                 /* Wait until TX Enable ready */
4916                 do {
4917                         rte_delay_ms(1);
4918                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4919                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4920                 if (!poll_ms)
4921                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4922         }
4923         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4924
4925                 rxq = dev->data->rx_queues[i];
4926
4927                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4928                 rxdctl |= IXGBE_RXDCTL_ENABLE;
4929                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4930
4931                 /* Wait until RX Enable ready */
4932                 poll_ms = 10;
4933                 do {
4934                         rte_delay_ms(1);
4935                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4936                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4937                 if (!poll_ms)
4938                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4939                 rte_wmb();
4940                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4941
4942         }
4943 }
4944
4945 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4946 int __attribute__((weak))
4947 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4948 {
4949         return -1;
4950 }
4951
4952 uint16_t __attribute__((weak))
4953 ixgbe_recv_pkts_vec(
4954         void __rte_unused *rx_queue,
4955         struct rte_mbuf __rte_unused **rx_pkts,
4956         uint16_t __rte_unused nb_pkts)
4957 {
4958         return 0;
4959 }
4960
4961 uint16_t __attribute__((weak))
4962 ixgbe_recv_scattered_pkts_vec(
4963         void __rte_unused *rx_queue,
4964         struct rte_mbuf __rte_unused **rx_pkts,
4965         uint16_t __rte_unused nb_pkts)
4966 {
4967         return 0;
4968 }
4969
4970 int __attribute__((weak))
4971 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
4972 {
4973         return -1;
4974 }