net: add rte prefix to IP defines
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5
6 #include <sys/queue.h>
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdint.h>
13 #include <stdarg.h>
14 #include <unistd.h>
15 #include <inttypes.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
23 #include <rte_pci.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
38 #include <rte_udp.h>
39 #include <rte_tcp.h>
40 #include <rte_sctp.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
43 #include <rte_ip.h>
44 #include <rte_net.h>
45
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
53
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
56 #else
57 #define IXGBE_TX_IEEE1588_TMST 0
58 #endif
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK (                  \
61                 PKT_TX_OUTER_IPV6 |              \
62                 PKT_TX_OUTER_IPV4 |              \
63                 PKT_TX_IPV6 |                    \
64                 PKT_TX_IPV4 |                    \
65                 PKT_TX_VLAN_PKT |                \
66                 PKT_TX_IP_CKSUM |                \
67                 PKT_TX_L4_MASK |                 \
68                 PKT_TX_TCP_SEG |                 \
69                 PKT_TX_MACSEC |                  \
70                 PKT_TX_OUTER_IP_CKSUM |          \
71                 PKT_TX_SEC_OFFLOAD |     \
72                 IXGBE_TX_IEEE1588_TMST)
73
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75                 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
76
77 #if 1
78 #define RTE_PMD_USE_PREFETCH
79 #endif
80
81 #ifdef RTE_PMD_USE_PREFETCH
82 /*
83  * Prefetch a cache line into all cache levels.
84  */
85 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
86 #else
87 #define rte_ixgbe_prefetch(p)   do {} while (0)
88 #endif
89
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
92                                     uint16_t nb_pkts);
93 #endif
94
95 /*********************************************************************
96  *
97  *  TX functions
98  *
99  **********************************************************************/
100
101 /*
102  * Check for descriptors with their DD bit set and free mbufs.
103  * Return the total number of buffers freed.
104  */
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
107 {
108         struct ixgbe_tx_entry *txep;
109         uint32_t status;
110         int i, nb_free = 0;
111         struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
112
113         /* check DD bit on threshold descriptor */
114         status = txq->tx_ring[txq->tx_next_dd].wb.status;
115         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
116                 return 0;
117
118         /*
119          * first buffer to free from S/W ring is at index
120          * tx_next_dd - (tx_rs_thresh-1)
121          */
122         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
123
124         for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125                 /* free buffers one at a time */
126                 m = rte_pktmbuf_prefree_seg(txep->mbuf);
127                 txep->mbuf = NULL;
128
129                 if (unlikely(m == NULL))
130                         continue;
131
132                 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133                     (nb_free > 0 && m->pool != free[0]->pool)) {
134                         rte_mempool_put_bulk(free[0]->pool,
135                                              (void **)free, nb_free);
136                         nb_free = 0;
137                 }
138
139                 free[nb_free++] = m;
140         }
141
142         if (nb_free > 0)
143                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
144
145         /* buffers were freed, update counters */
146         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148         if (txq->tx_next_dd >= txq->nb_tx_desc)
149                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
150
151         return txq->tx_rs_thresh;
152 }
153
154 /* Populate 4 descriptors with data from 4 mbufs */
155 static inline void
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
157 {
158         uint64_t buf_dma_addr;
159         uint32_t pkt_len;
160         int i;
161
162         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163                 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164                 pkt_len = (*pkts)->data_len;
165
166                 /* write data to descriptor */
167                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
168
169                 txdp->read.cmd_type_len =
170                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
171
172                 txdp->read.olinfo_status =
173                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
174
175                 rte_prefetch0(&(*pkts)->pool);
176         }
177 }
178
179 /* Populate 1 descriptor with data from 1 mbuf */
180 static inline void
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
182 {
183         uint64_t buf_dma_addr;
184         uint32_t pkt_len;
185
186         buf_dma_addr = rte_mbuf_data_iova(*pkts);
187         pkt_len = (*pkts)->data_len;
188
189         /* write data to descriptor */
190         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191         txdp->read.cmd_type_len =
192                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193         txdp->read.olinfo_status =
194                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195         rte_prefetch0(&(*pkts)->pool);
196 }
197
198 /*
199  * Fill H/W descriptor ring with mbuf data.
200  * Copy mbuf pointers to the S/W ring.
201  */
202 static inline void
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
204                       uint16_t nb_pkts)
205 {
206         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208         const int N_PER_LOOP = 4;
209         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210         int mainpart, leftover;
211         int i, j;
212
213         /*
214          * Process most of the packets in chunks of N pkts.  Any
215          * leftover packets will get processed one at a time.
216          */
217         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
219         for (i = 0; i < mainpart; i += N_PER_LOOP) {
220                 /* Copy N mbuf pointers to the S/W ring */
221                 for (j = 0; j < N_PER_LOOP; ++j) {
222                         (txep + i + j)->mbuf = *(pkts + i + j);
223                 }
224                 tx4(txdp + i, pkts + i);
225         }
226
227         if (unlikely(leftover > 0)) {
228                 for (i = 0; i < leftover; ++i) {
229                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230                         tx1(txdp + mainpart + i, pkts + mainpart + i);
231                 }
232         }
233 }
234
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
237              uint16_t nb_pkts)
238 {
239         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
241         uint16_t n = 0;
242
243         /*
244          * Begin scanning the H/W ring for done descriptors when the
245          * number of available descriptors drops below tx_free_thresh.  For
246          * each done descriptor, free the associated buffer.
247          */
248         if (txq->nb_tx_free < txq->tx_free_thresh)
249                 ixgbe_tx_free_bufs(txq);
250
251         /* Only use descriptors that are available */
252         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253         if (unlikely(nb_pkts == 0))
254                 return 0;
255
256         /* Use exactly nb_pkts descriptors */
257         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
258
259         /*
260          * At this point, we know there are enough descriptors in the
261          * ring to transmit all the packets.  This assumes that each
262          * mbuf contains a single segment, and that no new offloads
263          * are expected, which would require a new context descriptor.
264          */
265
266         /*
267          * See if we're going to wrap-around. If so, handle the top
268          * of the descriptor ring first, then do the bottom.  If not,
269          * the processing looks just like the "bottom" part anyway...
270          */
271         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
274
275                 /*
276                  * We know that the last descriptor in the ring will need to
277                  * have its RS bit set because tx_rs_thresh has to be
278                  * a divisor of the ring size
279                  */
280                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
283
284                 txq->tx_tail = 0;
285         }
286
287         /* Fill H/W descriptor ring with mbuf data */
288         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
290
291         /*
292          * Determine if RS bit should be set
293          * This is what we actually want:
294          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295          * but instead of subtracting 1 and doing >=, we can just do
296          * greater than without subtracting.
297          */
298         if (txq->tx_tail > txq->tx_next_rs) {
299                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
302                                                 txq->tx_rs_thresh);
303                 if (txq->tx_next_rs >= txq->nb_tx_desc)
304                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
305         }
306
307         /*
308          * Check for wrap-around. This would only happen if we used
309          * up to the last descriptor in the ring, no more, no less.
310          */
311         if (txq->tx_tail >= txq->nb_tx_desc)
312                 txq->tx_tail = 0;
313
314         /* update tail pointer */
315         rte_wmb();
316         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
317
318         return nb_pkts;
319 }
320
321 uint16_t
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
323                        uint16_t nb_pkts)
324 {
325         uint16_t nb_tx;
326
327         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
330
331         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
332         nb_tx = 0;
333         while (nb_pkts) {
334                 uint16_t ret, n;
335
336                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338                 nb_tx = (uint16_t)(nb_tx + ret);
339                 nb_pkts = (uint16_t)(nb_pkts - ret);
340                 if (ret < n)
341                         break;
342         }
343
344         return nb_tx;
345 }
346
347 #ifdef RTE_IXGBE_INC_VECTOR
348 static uint16_t
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
350                     uint16_t nb_pkts)
351 {
352         uint16_t nb_tx = 0;
353         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354
355         while (nb_pkts) {
356                 uint16_t ret, num;
357
358                 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359                 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
360                                                  num);
361                 nb_tx += ret;
362                 nb_pkts -= ret;
363                 if (ret < num)
364                         break;
365         }
366
367         return nb_tx;
368 }
369 #endif
370
371 static inline void
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375                 __rte_unused uint64_t *mdata)
376 {
377         uint32_t type_tucmd_mlhl;
378         uint32_t mss_l4len_idx = 0;
379         uint32_t ctx_idx;
380         uint32_t vlan_macip_lens;
381         union ixgbe_tx_offload tx_offload_mask;
382         uint32_t seqnum_seed = 0;
383
384         ctx_idx = txq->ctx_curr;
385         tx_offload_mask.data[0] = 0;
386         tx_offload_mask.data[1] = 0;
387         type_tucmd_mlhl = 0;
388
389         /* Specify which HW CTX to upload. */
390         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
391
392         if (ol_flags & PKT_TX_VLAN_PKT) {
393                 tx_offload_mask.vlan_tci |= ~0;
394         }
395
396         /* check if TCP segmentation required for this packet */
397         if (ol_flags & PKT_TX_TCP_SEG) {
398                 /* implies IP cksum in IPv4 */
399                 if (ol_flags & PKT_TX_IP_CKSUM)
400                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
403                 else
404                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
407
408                 tx_offload_mask.l2_len |= ~0;
409                 tx_offload_mask.l3_len |= ~0;
410                 tx_offload_mask.l4_len |= ~0;
411                 tx_offload_mask.tso_segsz |= ~0;
412                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414         } else { /* no TSO, check if hardware checksum is needed */
415                 if (ol_flags & PKT_TX_IP_CKSUM) {
416                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417                         tx_offload_mask.l2_len |= ~0;
418                         tx_offload_mask.l3_len |= ~0;
419                 }
420
421                 switch (ol_flags & PKT_TX_L4_MASK) {
422                 case PKT_TX_UDP_CKSUM:
423                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
426                         tx_offload_mask.l2_len |= ~0;
427                         tx_offload_mask.l3_len |= ~0;
428                         break;
429                 case PKT_TX_TCP_CKSUM:
430                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
431                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
432                         mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
433                         tx_offload_mask.l2_len |= ~0;
434                         tx_offload_mask.l3_len |= ~0;
435                         break;
436                 case PKT_TX_SCTP_CKSUM:
437                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
438                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
439                         mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
440                         tx_offload_mask.l2_len |= ~0;
441                         tx_offload_mask.l3_len |= ~0;
442                         break;
443                 default:
444                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
445                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
446                         break;
447                 }
448         }
449
450         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
451                 tx_offload_mask.outer_l2_len |= ~0;
452                 tx_offload_mask.outer_l3_len |= ~0;
453                 tx_offload_mask.l2_len |= ~0;
454                 seqnum_seed |= tx_offload.outer_l3_len
455                                << IXGBE_ADVTXD_OUTER_IPLEN;
456                 seqnum_seed |= tx_offload.l2_len
457                                << IXGBE_ADVTXD_TUNNEL_LEN;
458         }
459 #ifdef RTE_LIBRTE_SECURITY
460         if (ol_flags & PKT_TX_SEC_OFFLOAD) {
461                 union ixgbe_crypto_tx_desc_md *md =
462                                 (union ixgbe_crypto_tx_desc_md *)mdata;
463                 seqnum_seed |=
464                         (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
465                 type_tucmd_mlhl |= md->enc ?
466                                 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
467                                 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
468                 type_tucmd_mlhl |=
469                         (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
470                 tx_offload_mask.sa_idx |= ~0;
471                 tx_offload_mask.sec_pad_len |= ~0;
472         }
473 #endif
474
475         txq->ctx_cache[ctx_idx].flags = ol_flags;
476         txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
477                 tx_offload_mask.data[0] & tx_offload.data[0];
478         txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
479                 tx_offload_mask.data[1] & tx_offload.data[1];
480         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
481
482         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
483         vlan_macip_lens = tx_offload.l3_len;
484         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
485                 vlan_macip_lens |= (tx_offload.outer_l2_len <<
486                                     IXGBE_ADVTXD_MACLEN_SHIFT);
487         else
488                 vlan_macip_lens |= (tx_offload.l2_len <<
489                                     IXGBE_ADVTXD_MACLEN_SHIFT);
490         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
491         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
492         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
493         ctx_txd->seqnum_seed     = seqnum_seed;
494 }
495
496 /*
497  * Check which hardware context can be used. Use the existing match
498  * or create a new context descriptor.
499  */
500 static inline uint32_t
501 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
502                    union ixgbe_tx_offload tx_offload)
503 {
504         /* If match with the current used context */
505         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
506                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
507                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
508                      & tx_offload.data[0])) &&
509                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
510                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
511                      & tx_offload.data[1]))))
512                 return txq->ctx_curr;
513
514         /* What if match with the next context  */
515         txq->ctx_curr ^= 1;
516         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
517                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
518                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
519                      & tx_offload.data[0])) &&
520                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
521                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
522                      & tx_offload.data[1]))))
523                 return txq->ctx_curr;
524
525         /* Mismatch, use the previous context */
526         return IXGBE_CTX_NUM;
527 }
528
529 static inline uint32_t
530 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
531 {
532         uint32_t tmp = 0;
533
534         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
535                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
536         if (ol_flags & PKT_TX_IP_CKSUM)
537                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
538         if (ol_flags & PKT_TX_TCP_SEG)
539                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
540         return tmp;
541 }
542
543 static inline uint32_t
544 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
545 {
546         uint32_t cmdtype = 0;
547
548         if (ol_flags & PKT_TX_VLAN_PKT)
549                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
550         if (ol_flags & PKT_TX_TCP_SEG)
551                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
552         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
553                 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
554         if (ol_flags & PKT_TX_MACSEC)
555                 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
556         return cmdtype;
557 }
558
559 /* Default RS bit threshold values */
560 #ifndef DEFAULT_TX_RS_THRESH
561 #define DEFAULT_TX_RS_THRESH   32
562 #endif
563 #ifndef DEFAULT_TX_FREE_THRESH
564 #define DEFAULT_TX_FREE_THRESH 32
565 #endif
566
567 /* Reset transmit descriptors after they have been used */
568 static inline int
569 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
570 {
571         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
572         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
573         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
574         uint16_t nb_tx_desc = txq->nb_tx_desc;
575         uint16_t desc_to_clean_to;
576         uint16_t nb_tx_to_clean;
577         uint32_t status;
578
579         /* Determine the last descriptor needing to be cleaned */
580         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
581         if (desc_to_clean_to >= nb_tx_desc)
582                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
583
584         /* Check to make sure the last descriptor to clean is done */
585         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
586         status = txr[desc_to_clean_to].wb.status;
587         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
588                 PMD_TX_FREE_LOG(DEBUG,
589                                 "TX descriptor %4u is not done"
590                                 "(port=%d queue=%d)",
591                                 desc_to_clean_to,
592                                 txq->port_id, txq->queue_id);
593                 /* Failed to clean any descriptors, better luck next time */
594                 return -(1);
595         }
596
597         /* Figure out how many descriptors will be cleaned */
598         if (last_desc_cleaned > desc_to_clean_to)
599                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
600                                                         desc_to_clean_to);
601         else
602                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
603                                                 last_desc_cleaned);
604
605         PMD_TX_FREE_LOG(DEBUG,
606                         "Cleaning %4u TX descriptors: %4u to %4u "
607                         "(port=%d queue=%d)",
608                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
609                         txq->port_id, txq->queue_id);
610
611         /*
612          * The last descriptor to clean is done, so that means all the
613          * descriptors from the last descriptor that was cleaned
614          * up to the last descriptor with the RS bit set
615          * are done. Only reset the threshold descriptor.
616          */
617         txr[desc_to_clean_to].wb.status = 0;
618
619         /* Update the txq to reflect the last descriptor that was cleaned */
620         txq->last_desc_cleaned = desc_to_clean_to;
621         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
622
623         /* No Error */
624         return 0;
625 }
626
627 uint16_t
628 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
629                 uint16_t nb_pkts)
630 {
631         struct ixgbe_tx_queue *txq;
632         struct ixgbe_tx_entry *sw_ring;
633         struct ixgbe_tx_entry *txe, *txn;
634         volatile union ixgbe_adv_tx_desc *txr;
635         volatile union ixgbe_adv_tx_desc *txd, *txp;
636         struct rte_mbuf     *tx_pkt;
637         struct rte_mbuf     *m_seg;
638         uint64_t buf_dma_addr;
639         uint32_t olinfo_status;
640         uint32_t cmd_type_len;
641         uint32_t pkt_len;
642         uint16_t slen;
643         uint64_t ol_flags;
644         uint16_t tx_id;
645         uint16_t tx_last;
646         uint16_t nb_tx;
647         uint16_t nb_used;
648         uint64_t tx_ol_req;
649         uint32_t ctx = 0;
650         uint32_t new_ctx;
651         union ixgbe_tx_offload tx_offload;
652 #ifdef RTE_LIBRTE_SECURITY
653         uint8_t use_ipsec;
654 #endif
655
656         tx_offload.data[0] = 0;
657         tx_offload.data[1] = 0;
658         txq = tx_queue;
659         sw_ring = txq->sw_ring;
660         txr     = txq->tx_ring;
661         tx_id   = txq->tx_tail;
662         txe = &sw_ring[tx_id];
663         txp = NULL;
664
665         /* Determine if the descriptor ring needs to be cleaned. */
666         if (txq->nb_tx_free < txq->tx_free_thresh)
667                 ixgbe_xmit_cleanup(txq);
668
669         rte_prefetch0(&txe->mbuf->pool);
670
671         /* TX loop */
672         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
673                 new_ctx = 0;
674                 tx_pkt = *tx_pkts++;
675                 pkt_len = tx_pkt->pkt_len;
676
677                 /*
678                  * Determine how many (if any) context descriptors
679                  * are needed for offload functionality.
680                  */
681                 ol_flags = tx_pkt->ol_flags;
682 #ifdef RTE_LIBRTE_SECURITY
683                 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
684 #endif
685
686                 /* If hardware offload required */
687                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
688                 if (tx_ol_req) {
689                         tx_offload.l2_len = tx_pkt->l2_len;
690                         tx_offload.l3_len = tx_pkt->l3_len;
691                         tx_offload.l4_len = tx_pkt->l4_len;
692                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
693                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
694                         tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
695                         tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
696 #ifdef RTE_LIBRTE_SECURITY
697                         if (use_ipsec) {
698                                 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
699                                         (union ixgbe_crypto_tx_desc_md *)
700                                                         &tx_pkt->udata64;
701                                 tx_offload.sa_idx = ipsec_mdata->sa_idx;
702                                 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
703                         }
704 #endif
705
706                         /* If new context need be built or reuse the exist ctx. */
707                         ctx = what_advctx_update(txq, tx_ol_req,
708                                 tx_offload);
709                         /* Only allocate context descriptor if required*/
710                         new_ctx = (ctx == IXGBE_CTX_NUM);
711                         ctx = txq->ctx_curr;
712                 }
713
714                 /*
715                  * Keep track of how many descriptors are used this loop
716                  * This will always be the number of segments + the number of
717                  * Context descriptors required to transmit the packet
718                  */
719                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
720
721                 if (txp != NULL &&
722                                 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
723                         /* set RS on the previous packet in the burst */
724                         txp->read.cmd_type_len |=
725                                 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
726
727                 /*
728                  * The number of descriptors that must be allocated for a
729                  * packet is the number of segments of that packet, plus 1
730                  * Context Descriptor for the hardware offload, if any.
731                  * Determine the last TX descriptor to allocate in the TX ring
732                  * for the packet, starting from the current position (tx_id)
733                  * in the ring.
734                  */
735                 tx_last = (uint16_t) (tx_id + nb_used - 1);
736
737                 /* Circular ring */
738                 if (tx_last >= txq->nb_tx_desc)
739                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
740
741                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
742                            " tx_first=%u tx_last=%u",
743                            (unsigned) txq->port_id,
744                            (unsigned) txq->queue_id,
745                            (unsigned) pkt_len,
746                            (unsigned) tx_id,
747                            (unsigned) tx_last);
748
749                 /*
750                  * Make sure there are enough TX descriptors available to
751                  * transmit the entire packet.
752                  * nb_used better be less than or equal to txq->tx_rs_thresh
753                  */
754                 if (nb_used > txq->nb_tx_free) {
755                         PMD_TX_FREE_LOG(DEBUG,
756                                         "Not enough free TX descriptors "
757                                         "nb_used=%4u nb_free=%4u "
758                                         "(port=%d queue=%d)",
759                                         nb_used, txq->nb_tx_free,
760                                         txq->port_id, txq->queue_id);
761
762                         if (ixgbe_xmit_cleanup(txq) != 0) {
763                                 /* Could not clean any descriptors */
764                                 if (nb_tx == 0)
765                                         return 0;
766                                 goto end_of_tx;
767                         }
768
769                         /* nb_used better be <= txq->tx_rs_thresh */
770                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
771                                 PMD_TX_FREE_LOG(DEBUG,
772                                         "The number of descriptors needed to "
773                                         "transmit the packet exceeds the "
774                                         "RS bit threshold. This will impact "
775                                         "performance."
776                                         "nb_used=%4u nb_free=%4u "
777                                         "tx_rs_thresh=%4u. "
778                                         "(port=%d queue=%d)",
779                                         nb_used, txq->nb_tx_free,
780                                         txq->tx_rs_thresh,
781                                         txq->port_id, txq->queue_id);
782                                 /*
783                                  * Loop here until there are enough TX
784                                  * descriptors or until the ring cannot be
785                                  * cleaned.
786                                  */
787                                 while (nb_used > txq->nb_tx_free) {
788                                         if (ixgbe_xmit_cleanup(txq) != 0) {
789                                                 /*
790                                                  * Could not clean any
791                                                  * descriptors
792                                                  */
793                                                 if (nb_tx == 0)
794                                                         return 0;
795                                                 goto end_of_tx;
796                                         }
797                                 }
798                         }
799                 }
800
801                 /*
802                  * By now there are enough free TX descriptors to transmit
803                  * the packet.
804                  */
805
806                 /*
807                  * Set common flags of all TX Data Descriptors.
808                  *
809                  * The following bits must be set in all Data Descriptors:
810                  *   - IXGBE_ADVTXD_DTYP_DATA
811                  *   - IXGBE_ADVTXD_DCMD_DEXT
812                  *
813                  * The following bits must be set in the first Data Descriptor
814                  * and are ignored in the other ones:
815                  *   - IXGBE_ADVTXD_DCMD_IFCS
816                  *   - IXGBE_ADVTXD_MAC_1588
817                  *   - IXGBE_ADVTXD_DCMD_VLE
818                  *
819                  * The following bits must only be set in the last Data
820                  * Descriptor:
821                  *   - IXGBE_TXD_CMD_EOP
822                  *
823                  * The following bits can be set in any Data Descriptor, but
824                  * are only set in the last Data Descriptor:
825                  *   - IXGBE_TXD_CMD_RS
826                  */
827                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
828                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
829
830 #ifdef RTE_LIBRTE_IEEE1588
831                 if (ol_flags & PKT_TX_IEEE1588_TMST)
832                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
833 #endif
834
835                 olinfo_status = 0;
836                 if (tx_ol_req) {
837
838                         if (ol_flags & PKT_TX_TCP_SEG) {
839                                 /* when TSO is on, paylen in descriptor is the
840                                  * not the packet len but the tcp payload len */
841                                 pkt_len -= (tx_offload.l2_len +
842                                         tx_offload.l3_len + tx_offload.l4_len);
843                         }
844
845                         /*
846                          * Setup the TX Advanced Context Descriptor if required
847                          */
848                         if (new_ctx) {
849                                 volatile struct ixgbe_adv_tx_context_desc *
850                                     ctx_txd;
851
852                                 ctx_txd = (volatile struct
853                                     ixgbe_adv_tx_context_desc *)
854                                     &txr[tx_id];
855
856                                 txn = &sw_ring[txe->next_id];
857                                 rte_prefetch0(&txn->mbuf->pool);
858
859                                 if (txe->mbuf != NULL) {
860                                         rte_pktmbuf_free_seg(txe->mbuf);
861                                         txe->mbuf = NULL;
862                                 }
863
864                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
865                                         tx_offload, &tx_pkt->udata64);
866
867                                 txe->last_id = tx_last;
868                                 tx_id = txe->next_id;
869                                 txe = txn;
870                         }
871
872                         /*
873                          * Setup the TX Advanced Data Descriptor,
874                          * This path will go through
875                          * whatever new/reuse the context descriptor
876                          */
877                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
878                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
879                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
880                 }
881
882                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
883 #ifdef RTE_LIBRTE_SECURITY
884                 if (use_ipsec)
885                         olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
886 #endif
887
888                 m_seg = tx_pkt;
889                 do {
890                         txd = &txr[tx_id];
891                         txn = &sw_ring[txe->next_id];
892                         rte_prefetch0(&txn->mbuf->pool);
893
894                         if (txe->mbuf != NULL)
895                                 rte_pktmbuf_free_seg(txe->mbuf);
896                         txe->mbuf = m_seg;
897
898                         /*
899                          * Set up Transmit Data Descriptor.
900                          */
901                         slen = m_seg->data_len;
902                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
903                         txd->read.buffer_addr =
904                                 rte_cpu_to_le_64(buf_dma_addr);
905                         txd->read.cmd_type_len =
906                                 rte_cpu_to_le_32(cmd_type_len | slen);
907                         txd->read.olinfo_status =
908                                 rte_cpu_to_le_32(olinfo_status);
909                         txe->last_id = tx_last;
910                         tx_id = txe->next_id;
911                         txe = txn;
912                         m_seg = m_seg->next;
913                 } while (m_seg != NULL);
914
915                 /*
916                  * The last packet data descriptor needs End Of Packet (EOP)
917                  */
918                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
919                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
920                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
921
922                 /* Set RS bit only on threshold packets' last descriptor */
923                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
924                         PMD_TX_FREE_LOG(DEBUG,
925                                         "Setting RS bit on TXD id="
926                                         "%4u (port=%d queue=%d)",
927                                         tx_last, txq->port_id, txq->queue_id);
928
929                         cmd_type_len |= IXGBE_TXD_CMD_RS;
930
931                         /* Update txq RS bit counters */
932                         txq->nb_tx_used = 0;
933                         txp = NULL;
934                 } else
935                         txp = txd;
936
937                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
938         }
939
940 end_of_tx:
941         /* set RS on last packet in the burst */
942         if (txp != NULL)
943                 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
944
945         rte_wmb();
946
947         /*
948          * Set the Transmit Descriptor Tail (TDT)
949          */
950         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
951                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
952                    (unsigned) tx_id, (unsigned) nb_tx);
953         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
954         txq->tx_tail = tx_id;
955
956         return nb_tx;
957 }
958
959 /*********************************************************************
960  *
961  *  TX prep functions
962  *
963  **********************************************************************/
964 uint16_t
965 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
966 {
967         int i, ret;
968         uint64_t ol_flags;
969         struct rte_mbuf *m;
970         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
971
972         for (i = 0; i < nb_pkts; i++) {
973                 m = tx_pkts[i];
974                 ol_flags = m->ol_flags;
975
976                 /**
977                  * Check if packet meets requirements for number of segments
978                  *
979                  * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
980                  *       non-TSO
981                  */
982
983                 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
984                         rte_errno = -EINVAL;
985                         return i;
986                 }
987
988                 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
989                         rte_errno = -ENOTSUP;
990                         return i;
991                 }
992
993 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
994                 ret = rte_validate_tx_offload(m);
995                 if (ret != 0) {
996                         rte_errno = ret;
997                         return i;
998                 }
999 #endif
1000                 ret = rte_net_intel_cksum_prepare(m);
1001                 if (ret != 0) {
1002                         rte_errno = ret;
1003                         return i;
1004                 }
1005         }
1006
1007         return i;
1008 }
1009
1010 /*********************************************************************
1011  *
1012  *  RX functions
1013  *
1014  **********************************************************************/
1015
1016 #define IXGBE_PACKET_TYPE_ETHER                         0X00
1017 #define IXGBE_PACKET_TYPE_IPV4                          0X01
1018 #define IXGBE_PACKET_TYPE_IPV4_TCP                      0X11
1019 #define IXGBE_PACKET_TYPE_IPV4_UDP                      0X21
1020 #define IXGBE_PACKET_TYPE_IPV4_SCTP                     0X41
1021 #define IXGBE_PACKET_TYPE_IPV4_EXT                      0X03
1022 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP                  0X13
1023 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP                  0X23
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP                 0X43
1025 #define IXGBE_PACKET_TYPE_IPV6                          0X04
1026 #define IXGBE_PACKET_TYPE_IPV6_TCP                      0X14
1027 #define IXGBE_PACKET_TYPE_IPV6_UDP                      0X24
1028 #define IXGBE_PACKET_TYPE_IPV6_SCTP                     0X44
1029 #define IXGBE_PACKET_TYPE_IPV6_EXT                      0X0C
1030 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP                  0X1C
1031 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP                  0X2C
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP                 0X4C
1033 #define IXGBE_PACKET_TYPE_IPV4_IPV6                     0X05
1034 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP                 0X15
1035 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP                 0X25
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP                0X45
1037 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6                 0X07
1038 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP             0X17
1039 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP             0X27
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP            0X47
1041 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT                 0X0D
1042 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP             0X1D
1043 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP             0X2D
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP            0X4D
1045 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT             0X0F
1046 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP         0X1F
1047 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP         0X2F
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP        0X4F
1049
1050 #define IXGBE_PACKET_TYPE_NVGRE                   0X00
1051 #define IXGBE_PACKET_TYPE_NVGRE_IPV4              0X01
1052 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP          0X11
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP          0X21
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP         0X41
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT          0X03
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0X13
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0X23
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0X43
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV6              0X04
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP          0X14
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP          0X24
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP         0X44
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT          0X0C
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0X1C
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0X2C
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0X4C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6         0X05
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0X15
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0X25
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0X0D
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1073
1074 #define IXGBE_PACKET_TYPE_VXLAN                   0X80
1075 #define IXGBE_PACKET_TYPE_VXLAN_IPV4              0X81
1076 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0X93
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0XA3
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0XC3
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV6              0X84
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP          0X94
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP          0XA4
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP         0XC4
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT          0X8C
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0X9C
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0XAC
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0XCC
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6         0X85
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0X95
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0XA5
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0X8D
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1097
1098 /**
1099  * Use 2 different table for normal packet and tunnel packet
1100  * to save the space.
1101  */
1102 const uint32_t
1103         ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1104         [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1105         [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1106                 RTE_PTYPE_L3_IPV4,
1107         [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1108                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1109         [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1110                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1111         [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1112                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1113         [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1114                 RTE_PTYPE_L3_IPV4_EXT,
1115         [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1116                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1117         [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1118                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1119         [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1120                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1121         [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1122                 RTE_PTYPE_L3_IPV6,
1123         [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1124                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1125         [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1126                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1127         [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1128                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1129         [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1130                 RTE_PTYPE_L3_IPV6_EXT,
1131         [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1132                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1133         [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1134                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1135         [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1136                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1137         [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1138                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1139                 RTE_PTYPE_INNER_L3_IPV6,
1140         [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1141                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1143         [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1144                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1146         [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1147                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1149         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1150                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1151                 RTE_PTYPE_INNER_L3_IPV6,
1152         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1153                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1155         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1156                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1158         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1159                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1161         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1162                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1163                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1164         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1165                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1167         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1168                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1170         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1171                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1173         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1174                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1175                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1176         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1177                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1179         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1180                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1182         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1183                 RTE_PTYPE_L2_ETHER |
1184                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1185                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1186 };
1187
1188 const uint32_t
1189         ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1190         [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1191                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1192                 RTE_PTYPE_INNER_L2_ETHER,
1193         [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1194                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1196         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1197                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1199         [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1200                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1202         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1203                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1205         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1206                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1208         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1211         [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1212                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1214                 RTE_PTYPE_INNER_L4_TCP,
1215         [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1216                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1217                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1218                 RTE_PTYPE_INNER_L4_TCP,
1219         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1220                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1221                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1222         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1223                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1225                 RTE_PTYPE_INNER_L4_TCP,
1226         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1227                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1228                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1229                 RTE_PTYPE_INNER_L3_IPV4,
1230         [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1231                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1232                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1233                 RTE_PTYPE_INNER_L4_UDP,
1234         [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1235                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1236                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1237                 RTE_PTYPE_INNER_L4_UDP,
1238         [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1239                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1240                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1241                 RTE_PTYPE_INNER_L4_SCTP,
1242         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1243                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1244                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1245         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1246                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1248                 RTE_PTYPE_INNER_L4_UDP,
1249         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1250                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1251                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1252                 RTE_PTYPE_INNER_L4_SCTP,
1253         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1254                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1255                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1256                 RTE_PTYPE_INNER_L3_IPV4,
1257         [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1258                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1259                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1260                 RTE_PTYPE_INNER_L4_SCTP,
1261         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1262                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1263                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1264                 RTE_PTYPE_INNER_L4_SCTP,
1265         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1266                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1267                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1268                 RTE_PTYPE_INNER_L4_TCP,
1269         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1270                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1271                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1272                 RTE_PTYPE_INNER_L4_UDP,
1273
1274         [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1275                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1276                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1277         [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1278                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1280                 RTE_PTYPE_INNER_L3_IPV4,
1281         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1282                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1283                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1284                 RTE_PTYPE_INNER_L3_IPV4_EXT,
1285         [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1286                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1287                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1288                 RTE_PTYPE_INNER_L3_IPV6,
1289         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1290                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1291                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1292                 RTE_PTYPE_INNER_L3_IPV4,
1293         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1294                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1295                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1296                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1297         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1298                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1299                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1300                 RTE_PTYPE_INNER_L3_IPV4,
1301         [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1302                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1303                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1304                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1305         [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1306                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1307                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1308                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1309         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1310                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1311                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1312                 RTE_PTYPE_INNER_L3_IPV4,
1313         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1314                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1315                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1316                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1317         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1318                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1319                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1320                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1321         [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1322                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1323                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1324                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1325         [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1326                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1327                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1328                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1329         [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1330                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1331                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1332                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1333         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1334                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1335                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1336                 RTE_PTYPE_INNER_L3_IPV4,
1337         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1338                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1339                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1340                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1341         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1342                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1343                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1344                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1345         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1346                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1347                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1348                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1349         [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1350                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1351                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1352                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1353         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1354                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1355                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1356                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1357         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1358                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1359                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1360                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1361         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1362                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1363                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1364                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1365 };
1366
1367 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1368 static inline uint32_t
1369 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1370 {
1371
1372         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1373                 return RTE_PTYPE_UNKNOWN;
1374
1375         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1376
1377         /* For tunnel packet */
1378         if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1379                 /* Remove the tunnel bit to save the space. */
1380                 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1381                 return ptype_table_tn[pkt_info];
1382         }
1383
1384         /**
1385          * For x550, if it's not tunnel,
1386          * tunnel type bit should be set to 0.
1387          * Reuse 82599's mask.
1388          */
1389         pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1390
1391         return ptype_table[pkt_info];
1392 }
1393
1394 static inline uint64_t
1395 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1396 {
1397         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1398                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1399                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1400                 PKT_RX_RSS_HASH, 0, 0, 0,
1401                 0, 0, 0,  PKT_RX_FDIR,
1402         };
1403 #ifdef RTE_LIBRTE_IEEE1588
1404         static uint64_t ip_pkt_etqf_map[8] = {
1405                 0, 0, 0, PKT_RX_IEEE1588_PTP,
1406                 0, 0, 0, 0,
1407         };
1408
1409         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1410                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1411                                 ip_rss_types_map[pkt_info & 0XF];
1412         else
1413                 return ip_rss_types_map[pkt_info & 0XF];
1414 #else
1415         return ip_rss_types_map[pkt_info & 0XF];
1416 #endif
1417 }
1418
1419 static inline uint64_t
1420 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1421 {
1422         uint64_t pkt_flags;
1423
1424         /*
1425          * Check if VLAN present only.
1426          * Do not check whether L3/L4 rx checksum done by NIC or not,
1427          * That can be found from rte_eth_rxmode.offloads flag
1428          */
1429         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  vlan_flags : 0;
1430
1431 #ifdef RTE_LIBRTE_IEEE1588
1432         if (rx_status & IXGBE_RXD_STAT_TMST)
1433                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1434 #endif
1435         return pkt_flags;
1436 }
1437
1438 static inline uint64_t
1439 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1440 {
1441         uint64_t pkt_flags;
1442
1443         /*
1444          * Bit 31: IPE, IPv4 checksum error
1445          * Bit 30: L4I, L4I integrity error
1446          */
1447         static uint64_t error_to_pkt_flags_map[4] = {
1448                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1449                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1450                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1451                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1452         };
1453         pkt_flags = error_to_pkt_flags_map[(rx_status >>
1454                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1455
1456         if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1457             (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1458                 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1459         }
1460
1461 #ifdef RTE_LIBRTE_SECURITY
1462         if (rx_status & IXGBE_RXD_STAT_SECP) {
1463                 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1464                 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1465                         pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1466         }
1467 #endif
1468
1469         return pkt_flags;
1470 }
1471
1472 /*
1473  * LOOK_AHEAD defines how many desc statuses to check beyond the
1474  * current descriptor.
1475  * It must be a pound define for optimal performance.
1476  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1477  * function only works with LOOK_AHEAD=8.
1478  */
1479 #define LOOK_AHEAD 8
1480 #if (LOOK_AHEAD != 8)
1481 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1482 #endif
1483 static inline int
1484 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1485 {
1486         volatile union ixgbe_adv_rx_desc *rxdp;
1487         struct ixgbe_rx_entry *rxep;
1488         struct rte_mbuf *mb;
1489         uint16_t pkt_len;
1490         uint64_t pkt_flags;
1491         int nb_dd;
1492         uint32_t s[LOOK_AHEAD];
1493         uint32_t pkt_info[LOOK_AHEAD];
1494         int i, j, nb_rx = 0;
1495         uint32_t status;
1496         uint64_t vlan_flags = rxq->vlan_flags;
1497
1498         /* get references to current descriptor and S/W ring entry */
1499         rxdp = &rxq->rx_ring[rxq->rx_tail];
1500         rxep = &rxq->sw_ring[rxq->rx_tail];
1501
1502         status = rxdp->wb.upper.status_error;
1503         /* check to make sure there is at least 1 packet to receive */
1504         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1505                 return 0;
1506
1507         /*
1508          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1509          * reference packets that are ready to be received.
1510          */
1511         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1512              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1513                 /* Read desc statuses backwards to avoid race condition */
1514                 for (j = 0; j < LOOK_AHEAD; j++)
1515                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1516
1517                 rte_smp_rmb();
1518
1519                 /* Compute how many status bits were set */
1520                 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1521                                 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1522                         ;
1523
1524                 for (j = 0; j < nb_dd; j++)
1525                         pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1526                                                        lo_dword.data);
1527
1528                 nb_rx += nb_dd;
1529
1530                 /* Translate descriptor info to mbuf format */
1531                 for (j = 0; j < nb_dd; ++j) {
1532                         mb = rxep[j].mbuf;
1533                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1534                                   rxq->crc_len;
1535                         mb->data_len = pkt_len;
1536                         mb->pkt_len = pkt_len;
1537                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1538
1539                         /* convert descriptor fields to rte mbuf flags */
1540                         pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1541                                 vlan_flags);
1542                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1543                         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1544                                         ((uint16_t)pkt_info[j]);
1545                         mb->ol_flags = pkt_flags;
1546                         mb->packet_type =
1547                                 ixgbe_rxd_pkt_info_to_pkt_type
1548                                         (pkt_info[j], rxq->pkt_type_mask);
1549
1550                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1551                                 mb->hash.rss = rte_le_to_cpu_32(
1552                                     rxdp[j].wb.lower.hi_dword.rss);
1553                         else if (pkt_flags & PKT_RX_FDIR) {
1554                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1555                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1556                                     IXGBE_ATR_HASH_MASK;
1557                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1558                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1559                         }
1560                 }
1561
1562                 /* Move mbuf pointers from the S/W ring to the stage */
1563                 for (j = 0; j < LOOK_AHEAD; ++j) {
1564                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1565                 }
1566
1567                 /* stop if all requested packets could not be received */
1568                 if (nb_dd != LOOK_AHEAD)
1569                         break;
1570         }
1571
1572         /* clear software ring entries so we can cleanup correctly */
1573         for (i = 0; i < nb_rx; ++i) {
1574                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1575         }
1576
1577
1578         return nb_rx;
1579 }
1580
1581 static inline int
1582 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1583 {
1584         volatile union ixgbe_adv_rx_desc *rxdp;
1585         struct ixgbe_rx_entry *rxep;
1586         struct rte_mbuf *mb;
1587         uint16_t alloc_idx;
1588         __le64 dma_addr;
1589         int diag, i;
1590
1591         /* allocate buffers in bulk directly into the S/W ring */
1592         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1593         rxep = &rxq->sw_ring[alloc_idx];
1594         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1595                                     rxq->rx_free_thresh);
1596         if (unlikely(diag != 0))
1597                 return -ENOMEM;
1598
1599         rxdp = &rxq->rx_ring[alloc_idx];
1600         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1601                 /* populate the static rte mbuf fields */
1602                 mb = rxep[i].mbuf;
1603                 if (reset_mbuf) {
1604                         mb->port = rxq->port_id;
1605                 }
1606
1607                 rte_mbuf_refcnt_set(mb, 1);
1608                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1609
1610                 /* populate the descriptors */
1611                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1612                 rxdp[i].read.hdr_addr = 0;
1613                 rxdp[i].read.pkt_addr = dma_addr;
1614         }
1615
1616         /* update state of internal queue structure */
1617         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1618         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1619                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1620
1621         /* no errors */
1622         return 0;
1623 }
1624
1625 static inline uint16_t
1626 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1627                          uint16_t nb_pkts)
1628 {
1629         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1630         int i;
1631
1632         /* how many packets are ready to return? */
1633         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1634
1635         /* copy mbuf pointers to the application's packet list */
1636         for (i = 0; i < nb_pkts; ++i)
1637                 rx_pkts[i] = stage[i];
1638
1639         /* update internal queue state */
1640         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1641         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1642
1643         return nb_pkts;
1644 }
1645
1646 static inline uint16_t
1647 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1648              uint16_t nb_pkts)
1649 {
1650         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1651         uint16_t nb_rx = 0;
1652
1653         /* Any previously recv'd pkts will be returned from the Rx stage */
1654         if (rxq->rx_nb_avail)
1655                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1656
1657         /* Scan the H/W ring for packets to receive */
1658         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1659
1660         /* update internal queue state */
1661         rxq->rx_next_avail = 0;
1662         rxq->rx_nb_avail = nb_rx;
1663         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1664
1665         /* if required, allocate new buffers to replenish descriptors */
1666         if (rxq->rx_tail > rxq->rx_free_trigger) {
1667                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1668
1669                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1670                         int i, j;
1671
1672                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1673                                    "queue_id=%u", (unsigned) rxq->port_id,
1674                                    (unsigned) rxq->queue_id);
1675
1676                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1677                                 rxq->rx_free_thresh;
1678
1679                         /*
1680                          * Need to rewind any previous receives if we cannot
1681                          * allocate new buffers to replenish the old ones.
1682                          */
1683                         rxq->rx_nb_avail = 0;
1684                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1685                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1686                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1687
1688                         return 0;
1689                 }
1690
1691                 /* update tail pointer */
1692                 rte_wmb();
1693                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1694                                             cur_free_trigger);
1695         }
1696
1697         if (rxq->rx_tail >= rxq->nb_rx_desc)
1698                 rxq->rx_tail = 0;
1699
1700         /* received any packets this loop? */
1701         if (rxq->rx_nb_avail)
1702                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1703
1704         return 0;
1705 }
1706
1707 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1708 uint16_t
1709 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1710                            uint16_t nb_pkts)
1711 {
1712         uint16_t nb_rx;
1713
1714         if (unlikely(nb_pkts == 0))
1715                 return 0;
1716
1717         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1718                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1719
1720         /* request is relatively large, chunk it up */
1721         nb_rx = 0;
1722         while (nb_pkts) {
1723                 uint16_t ret, n;
1724
1725                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1726                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1727                 nb_rx = (uint16_t)(nb_rx + ret);
1728                 nb_pkts = (uint16_t)(nb_pkts - ret);
1729                 if (ret < n)
1730                         break;
1731         }
1732
1733         return nb_rx;
1734 }
1735
1736 uint16_t
1737 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1738                 uint16_t nb_pkts)
1739 {
1740         struct ixgbe_rx_queue *rxq;
1741         volatile union ixgbe_adv_rx_desc *rx_ring;
1742         volatile union ixgbe_adv_rx_desc *rxdp;
1743         struct ixgbe_rx_entry *sw_ring;
1744         struct ixgbe_rx_entry *rxe;
1745         struct rte_mbuf *rxm;
1746         struct rte_mbuf *nmb;
1747         union ixgbe_adv_rx_desc rxd;
1748         uint64_t dma_addr;
1749         uint32_t staterr;
1750         uint32_t pkt_info;
1751         uint16_t pkt_len;
1752         uint16_t rx_id;
1753         uint16_t nb_rx;
1754         uint16_t nb_hold;
1755         uint64_t pkt_flags;
1756         uint64_t vlan_flags;
1757
1758         nb_rx = 0;
1759         nb_hold = 0;
1760         rxq = rx_queue;
1761         rx_id = rxq->rx_tail;
1762         rx_ring = rxq->rx_ring;
1763         sw_ring = rxq->sw_ring;
1764         vlan_flags = rxq->vlan_flags;
1765         while (nb_rx < nb_pkts) {
1766                 /*
1767                  * The order of operations here is important as the DD status
1768                  * bit must not be read after any other descriptor fields.
1769                  * rx_ring and rxdp are pointing to volatile data so the order
1770                  * of accesses cannot be reordered by the compiler. If they were
1771                  * not volatile, they could be reordered which could lead to
1772                  * using invalid descriptor fields when read from rxd.
1773                  */
1774                 rxdp = &rx_ring[rx_id];
1775                 staterr = rxdp->wb.upper.status_error;
1776                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1777                         break;
1778                 rxd = *rxdp;
1779
1780                 /*
1781                  * End of packet.
1782                  *
1783                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1784                  * is likely to be invalid and to be dropped by the various
1785                  * validation checks performed by the network stack.
1786                  *
1787                  * Allocate a new mbuf to replenish the RX ring descriptor.
1788                  * If the allocation fails:
1789                  *    - arrange for that RX descriptor to be the first one
1790                  *      being parsed the next time the receive function is
1791                  *      invoked [on the same queue].
1792                  *
1793                  *    - Stop parsing the RX ring and return immediately.
1794                  *
1795                  * This policy do not drop the packet received in the RX
1796                  * descriptor for which the allocation of a new mbuf failed.
1797                  * Thus, it allows that packet to be later retrieved if
1798                  * mbuf have been freed in the mean time.
1799                  * As a side effect, holding RX descriptors instead of
1800                  * systematically giving them back to the NIC may lead to
1801                  * RX ring exhaustion situations.
1802                  * However, the NIC can gracefully prevent such situations
1803                  * to happen by sending specific "back-pressure" flow control
1804                  * frames to its peer(s).
1805                  */
1806                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1807                            "ext_err_stat=0x%08x pkt_len=%u",
1808                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1809                            (unsigned) rx_id, (unsigned) staterr,
1810                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1811
1812                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1813                 if (nmb == NULL) {
1814                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1815                                    "queue_id=%u", (unsigned) rxq->port_id,
1816                                    (unsigned) rxq->queue_id);
1817                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1818                         break;
1819                 }
1820
1821                 nb_hold++;
1822                 rxe = &sw_ring[rx_id];
1823                 rx_id++;
1824                 if (rx_id == rxq->nb_rx_desc)
1825                         rx_id = 0;
1826
1827                 /* Prefetch next mbuf while processing current one. */
1828                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1829
1830                 /*
1831                  * When next RX descriptor is on a cache-line boundary,
1832                  * prefetch the next 4 RX descriptors and the next 8 pointers
1833                  * to mbufs.
1834                  */
1835                 if ((rx_id & 0x3) == 0) {
1836                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1837                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1838                 }
1839
1840                 rxm = rxe->mbuf;
1841                 rxe->mbuf = nmb;
1842                 dma_addr =
1843                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1844                 rxdp->read.hdr_addr = 0;
1845                 rxdp->read.pkt_addr = dma_addr;
1846
1847                 /*
1848                  * Initialize the returned mbuf.
1849                  * 1) setup generic mbuf fields:
1850                  *    - number of segments,
1851                  *    - next segment,
1852                  *    - packet length,
1853                  *    - RX port identifier.
1854                  * 2) integrate hardware offload data, if any:
1855                  *    - RSS flag & hash,
1856                  *    - IP checksum flag,
1857                  *    - VLAN TCI, if any,
1858                  *    - error flags.
1859                  */
1860                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1861                                       rxq->crc_len);
1862                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1863                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1864                 rxm->nb_segs = 1;
1865                 rxm->next = NULL;
1866                 rxm->pkt_len = pkt_len;
1867                 rxm->data_len = pkt_len;
1868                 rxm->port = rxq->port_id;
1869
1870                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1871                 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1872                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1873
1874                 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1875                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1876                 pkt_flags = pkt_flags |
1877                         ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1878                 rxm->ol_flags = pkt_flags;
1879                 rxm->packet_type =
1880                         ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1881                                                        rxq->pkt_type_mask);
1882
1883                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1884                         rxm->hash.rss = rte_le_to_cpu_32(
1885                                                 rxd.wb.lower.hi_dword.rss);
1886                 else if (pkt_flags & PKT_RX_FDIR) {
1887                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1888                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1889                                         IXGBE_ATR_HASH_MASK;
1890                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1891                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1892                 }
1893                 /*
1894                  * Store the mbuf address into the next entry of the array
1895                  * of returned packets.
1896                  */
1897                 rx_pkts[nb_rx++] = rxm;
1898         }
1899         rxq->rx_tail = rx_id;
1900
1901         /*
1902          * If the number of free RX descriptors is greater than the RX free
1903          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1904          * register.
1905          * Update the RDT with the value of the last processed RX descriptor
1906          * minus 1, to guarantee that the RDT register is never equal to the
1907          * RDH register, which creates a "full" ring situtation from the
1908          * hardware point of view...
1909          */
1910         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1911         if (nb_hold > rxq->rx_free_thresh) {
1912                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1913                            "nb_hold=%u nb_rx=%u",
1914                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1915                            (unsigned) rx_id, (unsigned) nb_hold,
1916                            (unsigned) nb_rx);
1917                 rx_id = (uint16_t) ((rx_id == 0) ?
1918                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1919                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1920                 nb_hold = 0;
1921         }
1922         rxq->nb_rx_hold = nb_hold;
1923         return nb_rx;
1924 }
1925
1926 /**
1927  * Detect an RSC descriptor.
1928  */
1929 static inline uint32_t
1930 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1931 {
1932         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1933                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1934 }
1935
1936 /**
1937  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1938  *
1939  * Fill the following info in the HEAD buffer of the Rx cluster:
1940  *    - RX port identifier
1941  *    - hardware offload data, if any:
1942  *      - RSS flag & hash
1943  *      - IP checksum flag
1944  *      - VLAN TCI, if any
1945  *      - error flags
1946  * @head HEAD of the packet cluster
1947  * @desc HW descriptor to get data from
1948  * @rxq Pointer to the Rx queue
1949  */
1950 static inline void
1951 ixgbe_fill_cluster_head_buf(
1952         struct rte_mbuf *head,
1953         union ixgbe_adv_rx_desc *desc,
1954         struct ixgbe_rx_queue *rxq,
1955         uint32_t staterr)
1956 {
1957         uint32_t pkt_info;
1958         uint64_t pkt_flags;
1959
1960         head->port = rxq->port_id;
1961
1962         /* The vlan_tci field is only valid when PKT_RX_VLAN is
1963          * set in the pkt_flags field.
1964          */
1965         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1966         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1967         pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1968         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1969         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1970         head->ol_flags = pkt_flags;
1971         head->packet_type =
1972                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1973
1974         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1975                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1976         else if (pkt_flags & PKT_RX_FDIR) {
1977                 head->hash.fdir.hash =
1978                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1979                                                           & IXGBE_ATR_HASH_MASK;
1980                 head->hash.fdir.id =
1981                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1982         }
1983 }
1984
1985 /**
1986  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1987  *
1988  * @rx_queue Rx queue handle
1989  * @rx_pkts table of received packets
1990  * @nb_pkts size of rx_pkts table
1991  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1992  *
1993  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1994  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1995  *
1996  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1997  * 1) When non-EOP RSC completion arrives:
1998  *    a) Update the HEAD of the current RSC aggregation cluster with the new
1999  *       segment's data length.
2000  *    b) Set the "next" pointer of the current segment to point to the segment
2001  *       at the NEXTP index.
2002  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2003  *       in the sw_rsc_ring.
2004  * 2) When EOP arrives we just update the cluster's total length and offload
2005  *    flags and deliver the cluster up to the upper layers. In our case - put it
2006  *    in the rx_pkts table.
2007  *
2008  * Returns the number of received packets/clusters (according to the "bulk
2009  * receive" interface).
2010  */
2011 static inline uint16_t
2012 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2013                     bool bulk_alloc)
2014 {
2015         struct ixgbe_rx_queue *rxq = rx_queue;
2016         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2017         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2018         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2019         uint16_t rx_id = rxq->rx_tail;
2020         uint16_t nb_rx = 0;
2021         uint16_t nb_hold = rxq->nb_rx_hold;
2022         uint16_t prev_id = rxq->rx_tail;
2023
2024         while (nb_rx < nb_pkts) {
2025                 bool eop;
2026                 struct ixgbe_rx_entry *rxe;
2027                 struct ixgbe_scattered_rx_entry *sc_entry;
2028                 struct ixgbe_scattered_rx_entry *next_sc_entry;
2029                 struct ixgbe_rx_entry *next_rxe = NULL;
2030                 struct rte_mbuf *first_seg;
2031                 struct rte_mbuf *rxm;
2032                 struct rte_mbuf *nmb = NULL;
2033                 union ixgbe_adv_rx_desc rxd;
2034                 uint16_t data_len;
2035                 uint16_t next_id;
2036                 volatile union ixgbe_adv_rx_desc *rxdp;
2037                 uint32_t staterr;
2038
2039 next_desc:
2040                 /*
2041                  * The code in this whole file uses the volatile pointer to
2042                  * ensure the read ordering of the status and the rest of the
2043                  * descriptor fields (on the compiler level only!!!). This is so
2044                  * UGLY - why not to just use the compiler barrier instead? DPDK
2045                  * even has the rte_compiler_barrier() for that.
2046                  *
2047                  * But most importantly this is just wrong because this doesn't
2048                  * ensure memory ordering in a general case at all. For
2049                  * instance, DPDK is supposed to work on Power CPUs where
2050                  * compiler barrier may just not be enough!
2051                  *
2052                  * I tried to write only this function properly to have a
2053                  * starting point (as a part of an LRO/RSC series) but the
2054                  * compiler cursed at me when I tried to cast away the
2055                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2056                  * keeping it the way it is for now.
2057                  *
2058                  * The code in this file is broken in so many other places and
2059                  * will just not work on a big endian CPU anyway therefore the
2060                  * lines below will have to be revisited together with the rest
2061                  * of the ixgbe PMD.
2062                  *
2063                  * TODO:
2064                  *    - Get rid of "volatile" and let the compiler do its job.
2065                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
2066                  *      memory ordering below.
2067                  */
2068                 rxdp = &rx_ring[rx_id];
2069                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2070
2071                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2072                         break;
2073
2074                 rxd = *rxdp;
2075
2076                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2077                                   "staterr=0x%x data_len=%u",
2078                            rxq->port_id, rxq->queue_id, rx_id, staterr,
2079                            rte_le_to_cpu_16(rxd.wb.upper.length));
2080
2081                 if (!bulk_alloc) {
2082                         nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2083                         if (nmb == NULL) {
2084                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2085                                                   "port_id=%u queue_id=%u",
2086                                            rxq->port_id, rxq->queue_id);
2087
2088                                 rte_eth_devices[rxq->port_id].data->
2089                                                         rx_mbuf_alloc_failed++;
2090                                 break;
2091                         }
2092                 } else if (nb_hold > rxq->rx_free_thresh) {
2093                         uint16_t next_rdt = rxq->rx_free_trigger;
2094
2095                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2096                                 rte_wmb();
2097                                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2098                                                             next_rdt);
2099                                 nb_hold -= rxq->rx_free_thresh;
2100                         } else {
2101                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2102                                                   "port_id=%u queue_id=%u",
2103                                            rxq->port_id, rxq->queue_id);
2104
2105                                 rte_eth_devices[rxq->port_id].data->
2106                                                         rx_mbuf_alloc_failed++;
2107                                 break;
2108                         }
2109                 }
2110
2111                 nb_hold++;
2112                 rxe = &sw_ring[rx_id];
2113                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2114
2115                 next_id = rx_id + 1;
2116                 if (next_id == rxq->nb_rx_desc)
2117                         next_id = 0;
2118
2119                 /* Prefetch next mbuf while processing current one. */
2120                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2121
2122                 /*
2123                  * When next RX descriptor is on a cache-line boundary,
2124                  * prefetch the next 4 RX descriptors and the next 4 pointers
2125                  * to mbufs.
2126                  */
2127                 if ((next_id & 0x3) == 0) {
2128                         rte_ixgbe_prefetch(&rx_ring[next_id]);
2129                         rte_ixgbe_prefetch(&sw_ring[next_id]);
2130                 }
2131
2132                 rxm = rxe->mbuf;
2133
2134                 if (!bulk_alloc) {
2135                         __le64 dma =
2136                           rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2137                         /*
2138                          * Update RX descriptor with the physical address of the
2139                          * new data buffer of the new allocated mbuf.
2140                          */
2141                         rxe->mbuf = nmb;
2142
2143                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
2144                         rxdp->read.hdr_addr = 0;
2145                         rxdp->read.pkt_addr = dma;
2146                 } else
2147                         rxe->mbuf = NULL;
2148
2149                 /*
2150                  * Set data length & data buffer address of mbuf.
2151                  */
2152                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2153                 rxm->data_len = data_len;
2154
2155                 if (!eop) {
2156                         uint16_t nextp_id;
2157                         /*
2158                          * Get next descriptor index:
2159                          *  - For RSC it's in the NEXTP field.
2160                          *  - For a scattered packet - it's just a following
2161                          *    descriptor.
2162                          */
2163                         if (ixgbe_rsc_count(&rxd))
2164                                 nextp_id =
2165                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2166                                                        IXGBE_RXDADV_NEXTP_SHIFT;
2167                         else
2168                                 nextp_id = next_id;
2169
2170                         next_sc_entry = &sw_sc_ring[nextp_id];
2171                         next_rxe = &sw_ring[nextp_id];
2172                         rte_ixgbe_prefetch(next_rxe);
2173                 }
2174
2175                 sc_entry = &sw_sc_ring[rx_id];
2176                 first_seg = sc_entry->fbuf;
2177                 sc_entry->fbuf = NULL;
2178
2179                 /*
2180                  * If this is the first buffer of the received packet,
2181                  * set the pointer to the first mbuf of the packet and
2182                  * initialize its context.
2183                  * Otherwise, update the total length and the number of segments
2184                  * of the current scattered packet, and update the pointer to
2185                  * the last mbuf of the current packet.
2186                  */
2187                 if (first_seg == NULL) {
2188                         first_seg = rxm;
2189                         first_seg->pkt_len = data_len;
2190                         first_seg->nb_segs = 1;
2191                 } else {
2192                         first_seg->pkt_len += data_len;
2193                         first_seg->nb_segs++;
2194                 }
2195
2196                 prev_id = rx_id;
2197                 rx_id = next_id;
2198
2199                 /*
2200                  * If this is not the last buffer of the received packet, update
2201                  * the pointer to the first mbuf at the NEXTP entry in the
2202                  * sw_sc_ring and continue to parse the RX ring.
2203                  */
2204                 if (!eop && next_rxe) {
2205                         rxm->next = next_rxe->mbuf;
2206                         next_sc_entry->fbuf = first_seg;
2207                         goto next_desc;
2208                 }
2209
2210                 /* Initialize the first mbuf of the returned packet */
2211                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2212
2213                 /*
2214                  * Deal with the case, when HW CRC srip is disabled.
2215                  * That can't happen when LRO is enabled, but still could
2216                  * happen for scattered RX mode.
2217                  */
2218                 first_seg->pkt_len -= rxq->crc_len;
2219                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2220                         struct rte_mbuf *lp;
2221
2222                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
2223                                 ;
2224
2225                         first_seg->nb_segs--;
2226                         lp->data_len -= rxq->crc_len - rxm->data_len;
2227                         lp->next = NULL;
2228                         rte_pktmbuf_free_seg(rxm);
2229                 } else
2230                         rxm->data_len -= rxq->crc_len;
2231
2232                 /* Prefetch data of first segment, if configured to do so. */
2233                 rte_packet_prefetch((char *)first_seg->buf_addr +
2234                         first_seg->data_off);
2235
2236                 /*
2237                  * Store the mbuf address into the next entry of the array
2238                  * of returned packets.
2239                  */
2240                 rx_pkts[nb_rx++] = first_seg;
2241         }
2242
2243         /*
2244          * Record index of the next RX descriptor to probe.
2245          */
2246         rxq->rx_tail = rx_id;
2247
2248         /*
2249          * If the number of free RX descriptors is greater than the RX free
2250          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2251          * register.
2252          * Update the RDT with the value of the last processed RX descriptor
2253          * minus 1, to guarantee that the RDT register is never equal to the
2254          * RDH register, which creates a "full" ring situtation from the
2255          * hardware point of view...
2256          */
2257         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2258                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2259                            "nb_hold=%u nb_rx=%u",
2260                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2261
2262                 rte_wmb();
2263                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2264                 nb_hold = 0;
2265         }
2266
2267         rxq->nb_rx_hold = nb_hold;
2268         return nb_rx;
2269 }
2270
2271 uint16_t
2272 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2273                                  uint16_t nb_pkts)
2274 {
2275         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2276 }
2277
2278 uint16_t
2279 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2280                                uint16_t nb_pkts)
2281 {
2282         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2283 }
2284
2285 /*********************************************************************
2286  *
2287  *  Queue management functions
2288  *
2289  **********************************************************************/
2290
2291 static void __attribute__((cold))
2292 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2293 {
2294         unsigned i;
2295
2296         if (txq->sw_ring != NULL) {
2297                 for (i = 0; i < txq->nb_tx_desc; i++) {
2298                         if (txq->sw_ring[i].mbuf != NULL) {
2299                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2300                                 txq->sw_ring[i].mbuf = NULL;
2301                         }
2302                 }
2303         }
2304 }
2305
2306 static void __attribute__((cold))
2307 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2308 {
2309         if (txq != NULL &&
2310             txq->sw_ring != NULL)
2311                 rte_free(txq->sw_ring);
2312 }
2313
2314 static void __attribute__((cold))
2315 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2316 {
2317         if (txq != NULL && txq->ops != NULL) {
2318                 txq->ops->release_mbufs(txq);
2319                 txq->ops->free_swring(txq);
2320                 rte_free(txq);
2321         }
2322 }
2323
2324 void __attribute__((cold))
2325 ixgbe_dev_tx_queue_release(void *txq)
2326 {
2327         ixgbe_tx_queue_release(txq);
2328 }
2329
2330 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2331 static void __attribute__((cold))
2332 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2333 {
2334         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2335         struct ixgbe_tx_entry *txe = txq->sw_ring;
2336         uint16_t prev, i;
2337
2338         /* Zero out HW ring memory */
2339         for (i = 0; i < txq->nb_tx_desc; i++) {
2340                 txq->tx_ring[i] = zeroed_desc;
2341         }
2342
2343         /* Initialize SW ring entries */
2344         prev = (uint16_t) (txq->nb_tx_desc - 1);
2345         for (i = 0; i < txq->nb_tx_desc; i++) {
2346                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2347
2348                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2349                 txe[i].mbuf = NULL;
2350                 txe[i].last_id = i;
2351                 txe[prev].next_id = i;
2352                 prev = i;
2353         }
2354
2355         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2356         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2357
2358         txq->tx_tail = 0;
2359         txq->nb_tx_used = 0;
2360         /*
2361          * Always allow 1 descriptor to be un-allocated to avoid
2362          * a H/W race condition
2363          */
2364         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2365         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2366         txq->ctx_curr = 0;
2367         memset((void *)&txq->ctx_cache, 0,
2368                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2369 }
2370
2371 static const struct ixgbe_txq_ops def_txq_ops = {
2372         .release_mbufs = ixgbe_tx_queue_release_mbufs,
2373         .free_swring = ixgbe_tx_free_swring,
2374         .reset = ixgbe_reset_tx_queue,
2375 };
2376
2377 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2378  * the queue parameters. Used in tx_queue_setup by primary process and then
2379  * in dev_init by secondary process when attaching to an existing ethdev.
2380  */
2381 void __attribute__((cold))
2382 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2383 {
2384         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2385         if ((txq->offloads == 0) &&
2386 #ifdef RTE_LIBRTE_SECURITY
2387                         !(txq->using_ipsec) &&
2388 #endif
2389                         (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2390                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2391                 dev->tx_pkt_prepare = NULL;
2392 #ifdef RTE_IXGBE_INC_VECTOR
2393                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2394                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2395                                         ixgbe_txq_vec_setup(txq) == 0)) {
2396                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2397                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2398                 } else
2399 #endif
2400                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2401         } else {
2402                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2403                 PMD_INIT_LOG(DEBUG,
2404                                 " - offloads = 0x%" PRIx64,
2405                                 txq->offloads);
2406                 PMD_INIT_LOG(DEBUG,
2407                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2408                                 (unsigned long)txq->tx_rs_thresh,
2409                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2410                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2411                 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2412         }
2413 }
2414
2415 uint64_t
2416 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2417 {
2418         RTE_SET_USED(dev);
2419
2420         return 0;
2421 }
2422
2423 uint64_t
2424 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2425 {
2426         uint64_t tx_offload_capa;
2427         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2428
2429         tx_offload_capa =
2430                 DEV_TX_OFFLOAD_VLAN_INSERT |
2431                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2432                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2433                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2434                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2435                 DEV_TX_OFFLOAD_TCP_TSO     |
2436                 DEV_TX_OFFLOAD_MULTI_SEGS;
2437
2438         if (hw->mac.type == ixgbe_mac_82599EB ||
2439             hw->mac.type == ixgbe_mac_X540)
2440                 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2441
2442         if (hw->mac.type == ixgbe_mac_X550 ||
2443             hw->mac.type == ixgbe_mac_X550EM_x ||
2444             hw->mac.type == ixgbe_mac_X550EM_a)
2445                 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2446
2447 #ifdef RTE_LIBRTE_SECURITY
2448         if (dev->security_ctx)
2449                 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2450 #endif
2451         return tx_offload_capa;
2452 }
2453
2454 int __attribute__((cold))
2455 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2456                          uint16_t queue_idx,
2457                          uint16_t nb_desc,
2458                          unsigned int socket_id,
2459                          const struct rte_eth_txconf *tx_conf)
2460 {
2461         const struct rte_memzone *tz;
2462         struct ixgbe_tx_queue *txq;
2463         struct ixgbe_hw     *hw;
2464         uint16_t tx_rs_thresh, tx_free_thresh;
2465         uint64_t offloads;
2466
2467         PMD_INIT_FUNC_TRACE();
2468         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2469
2470         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2471
2472         /*
2473          * Validate number of transmit descriptors.
2474          * It must not exceed hardware maximum, and must be multiple
2475          * of IXGBE_ALIGN.
2476          */
2477         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2478                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2479                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2480                 return -EINVAL;
2481         }
2482
2483         /*
2484          * The following two parameters control the setting of the RS bit on
2485          * transmit descriptors.
2486          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2487          * descriptors have been used.
2488          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2489          * descriptors are used or if the number of descriptors required
2490          * to transmit a packet is greater than the number of free TX
2491          * descriptors.
2492          * The following constraints must be satisfied:
2493          *  tx_rs_thresh must be greater than 0.
2494          *  tx_rs_thresh must be less than the size of the ring minus 2.
2495          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2496          *  tx_rs_thresh must be a divisor of the ring size.
2497          *  tx_free_thresh must be greater than 0.
2498          *  tx_free_thresh must be less than the size of the ring minus 3.
2499          *  tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2500          * One descriptor in the TX ring is used as a sentinel to avoid a
2501          * H/W race condition, hence the maximum threshold constraints.
2502          * When set to zero use default values.
2503          */
2504         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2505                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2506         /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2507         tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2508                         nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2509         if (tx_conf->tx_rs_thresh > 0)
2510                 tx_rs_thresh = tx_conf->tx_rs_thresh;
2511         if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2512                 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2513                              "exceed nb_desc. (tx_rs_thresh=%u "
2514                              "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2515                              (unsigned int)tx_rs_thresh,
2516                              (unsigned int)tx_free_thresh,
2517                              (unsigned int)nb_desc,
2518                              (int)dev->data->port_id,
2519                              (int)queue_idx);
2520                 return -(EINVAL);
2521         }
2522         if (tx_rs_thresh >= (nb_desc - 2)) {
2523                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2524                         "of TX descriptors minus 2. (tx_rs_thresh=%u "
2525                         "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2526                         (int)dev->data->port_id, (int)queue_idx);
2527                 return -(EINVAL);
2528         }
2529         if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2530                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2531                         "(tx_rs_thresh=%u port=%d queue=%d)",
2532                         DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2533                         (int)dev->data->port_id, (int)queue_idx);
2534                 return -(EINVAL);
2535         }
2536         if (tx_free_thresh >= (nb_desc - 3)) {
2537                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2538                              "tx_free_thresh must be less than the number of "
2539                              "TX descriptors minus 3. (tx_free_thresh=%u "
2540                              "port=%d queue=%d)",
2541                              (unsigned int)tx_free_thresh,
2542                              (int)dev->data->port_id, (int)queue_idx);
2543                 return -(EINVAL);
2544         }
2545         if (tx_rs_thresh > tx_free_thresh) {
2546                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2547                              "tx_free_thresh. (tx_free_thresh=%u "
2548                              "tx_rs_thresh=%u port=%d queue=%d)",
2549                              (unsigned int)tx_free_thresh,
2550                              (unsigned int)tx_rs_thresh,
2551                              (int)dev->data->port_id,
2552                              (int)queue_idx);
2553                 return -(EINVAL);
2554         }
2555         if ((nb_desc % tx_rs_thresh) != 0) {
2556                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2557                              "number of TX descriptors. (tx_rs_thresh=%u "
2558                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2559                              (int)dev->data->port_id, (int)queue_idx);
2560                 return -(EINVAL);
2561         }
2562
2563         /*
2564          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2565          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2566          * by the NIC and all descriptors are written back after the NIC
2567          * accumulates WTHRESH descriptors.
2568          */
2569         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2570                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2571                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2572                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2573                              (int)dev->data->port_id, (int)queue_idx);
2574                 return -(EINVAL);
2575         }
2576
2577         /* Free memory prior to re-allocation if needed... */
2578         if (dev->data->tx_queues[queue_idx] != NULL) {
2579                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2580                 dev->data->tx_queues[queue_idx] = NULL;
2581         }
2582
2583         /* First allocate the tx queue data structure */
2584         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2585                                  RTE_CACHE_LINE_SIZE, socket_id);
2586         if (txq == NULL)
2587                 return -ENOMEM;
2588
2589         /*
2590          * Allocate TX ring hardware descriptors. A memzone large enough to
2591          * handle the maximum ring size is allocated in order to allow for
2592          * resizing in later calls to the queue setup function.
2593          */
2594         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2595                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2596                         IXGBE_ALIGN, socket_id);
2597         if (tz == NULL) {
2598                 ixgbe_tx_queue_release(txq);
2599                 return -ENOMEM;
2600         }
2601
2602         txq->nb_tx_desc = nb_desc;
2603         txq->tx_rs_thresh = tx_rs_thresh;
2604         txq->tx_free_thresh = tx_free_thresh;
2605         txq->pthresh = tx_conf->tx_thresh.pthresh;
2606         txq->hthresh = tx_conf->tx_thresh.hthresh;
2607         txq->wthresh = tx_conf->tx_thresh.wthresh;
2608         txq->queue_id = queue_idx;
2609         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2610                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2611         txq->port_id = dev->data->port_id;
2612         txq->offloads = offloads;
2613         txq->ops = &def_txq_ops;
2614         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2615 #ifdef RTE_LIBRTE_SECURITY
2616         txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2617                         DEV_TX_OFFLOAD_SECURITY);
2618 #endif
2619
2620         /*
2621          * Modification to set VFTDT for virtual function if vf is detected
2622          */
2623         if (hw->mac.type == ixgbe_mac_82599_vf ||
2624             hw->mac.type == ixgbe_mac_X540_vf ||
2625             hw->mac.type == ixgbe_mac_X550_vf ||
2626             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2627             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2628                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2629         else
2630                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2631
2632         txq->tx_ring_phys_addr = tz->iova;
2633         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2634
2635         /* Allocate software ring */
2636         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2637                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2638                                 RTE_CACHE_LINE_SIZE, socket_id);
2639         if (txq->sw_ring == NULL) {
2640                 ixgbe_tx_queue_release(txq);
2641                 return -ENOMEM;
2642         }
2643         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2644                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2645
2646         /* set up vector or scalar TX function as appropriate */
2647         ixgbe_set_tx_function(dev, txq);
2648
2649         txq->ops->reset(txq);
2650
2651         dev->data->tx_queues[queue_idx] = txq;
2652
2653
2654         return 0;
2655 }
2656
2657 /**
2658  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2659  *
2660  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2661  * in the sw_rsc_ring is not set to NULL but rather points to the next
2662  * mbuf of this RSC aggregation (that has not been completed yet and still
2663  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2664  * will just free first "nb_segs" segments of the cluster explicitly by calling
2665  * an rte_pktmbuf_free_seg().
2666  *
2667  * @m scattered cluster head
2668  */
2669 static void __attribute__((cold))
2670 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2671 {
2672         uint16_t i, nb_segs = m->nb_segs;
2673         struct rte_mbuf *next_seg;
2674
2675         for (i = 0; i < nb_segs; i++) {
2676                 next_seg = m->next;
2677                 rte_pktmbuf_free_seg(m);
2678                 m = next_seg;
2679         }
2680 }
2681
2682 static void __attribute__((cold))
2683 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2684 {
2685         unsigned i;
2686
2687 #ifdef RTE_IXGBE_INC_VECTOR
2688         /* SSE Vector driver has a different way of releasing mbufs. */
2689         if (rxq->rx_using_sse) {
2690                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2691                 return;
2692         }
2693 #endif
2694
2695         if (rxq->sw_ring != NULL) {
2696                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2697                         if (rxq->sw_ring[i].mbuf != NULL) {
2698                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2699                                 rxq->sw_ring[i].mbuf = NULL;
2700                         }
2701                 }
2702                 if (rxq->rx_nb_avail) {
2703                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2704                                 struct rte_mbuf *mb;
2705
2706                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2707                                 rte_pktmbuf_free_seg(mb);
2708                         }
2709                         rxq->rx_nb_avail = 0;
2710                 }
2711         }
2712
2713         if (rxq->sw_sc_ring)
2714                 for (i = 0; i < rxq->nb_rx_desc; i++)
2715                         if (rxq->sw_sc_ring[i].fbuf) {
2716                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2717                                 rxq->sw_sc_ring[i].fbuf = NULL;
2718                         }
2719 }
2720
2721 static void __attribute__((cold))
2722 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2723 {
2724         if (rxq != NULL) {
2725                 ixgbe_rx_queue_release_mbufs(rxq);
2726                 rte_free(rxq->sw_ring);
2727                 rte_free(rxq->sw_sc_ring);
2728                 rte_free(rxq);
2729         }
2730 }
2731
2732 void __attribute__((cold))
2733 ixgbe_dev_rx_queue_release(void *rxq)
2734 {
2735         ixgbe_rx_queue_release(rxq);
2736 }
2737
2738 /*
2739  * Check if Rx Burst Bulk Alloc function can be used.
2740  * Return
2741  *        0: the preconditions are satisfied and the bulk allocation function
2742  *           can be used.
2743  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2744  *           function must be used.
2745  */
2746 static inline int __attribute__((cold))
2747 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2748 {
2749         int ret = 0;
2750
2751         /*
2752          * Make sure the following pre-conditions are satisfied:
2753          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2754          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2755          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2756          * Scattered packets are not supported.  This should be checked
2757          * outside of this function.
2758          */
2759         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2760                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2761                              "rxq->rx_free_thresh=%d, "
2762                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2763                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2764                 ret = -EINVAL;
2765         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2766                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2767                              "rxq->rx_free_thresh=%d, "
2768                              "rxq->nb_rx_desc=%d",
2769                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2770                 ret = -EINVAL;
2771         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2772                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2773                              "rxq->nb_rx_desc=%d, "
2774                              "rxq->rx_free_thresh=%d",
2775                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2776                 ret = -EINVAL;
2777         }
2778
2779         return ret;
2780 }
2781
2782 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2783 static void __attribute__((cold))
2784 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2785 {
2786         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2787         unsigned i;
2788         uint16_t len = rxq->nb_rx_desc;
2789
2790         /*
2791          * By default, the Rx queue setup function allocates enough memory for
2792          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2793          * extra memory at the end of the descriptor ring to be zero'd out.
2794          */
2795         if (adapter->rx_bulk_alloc_allowed)
2796                 /* zero out extra memory */
2797                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2798
2799         /*
2800          * Zero out HW ring memory. Zero out extra memory at the end of
2801          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2802          * reads extra memory as zeros.
2803          */
2804         for (i = 0; i < len; i++) {
2805                 rxq->rx_ring[i] = zeroed_desc;
2806         }
2807
2808         /*
2809          * initialize extra software ring entries. Space for these extra
2810          * entries is always allocated
2811          */
2812         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2813         for (i = rxq->nb_rx_desc; i < len; ++i) {
2814                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2815         }
2816
2817         rxq->rx_nb_avail = 0;
2818         rxq->rx_next_avail = 0;
2819         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2820         rxq->rx_tail = 0;
2821         rxq->nb_rx_hold = 0;
2822         rxq->pkt_first_seg = NULL;
2823         rxq->pkt_last_seg = NULL;
2824
2825 #ifdef RTE_IXGBE_INC_VECTOR
2826         rxq->rxrearm_start = 0;
2827         rxq->rxrearm_nb = 0;
2828 #endif
2829 }
2830
2831 static int
2832 ixgbe_is_vf(struct rte_eth_dev *dev)
2833 {
2834         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2835
2836         switch (hw->mac.type) {
2837         case ixgbe_mac_82599_vf:
2838         case ixgbe_mac_X540_vf:
2839         case ixgbe_mac_X550_vf:
2840         case ixgbe_mac_X550EM_x_vf:
2841         case ixgbe_mac_X550EM_a_vf:
2842                 return 1;
2843         default:
2844                 return 0;
2845         }
2846 }
2847
2848 uint64_t
2849 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2850 {
2851         uint64_t offloads = 0;
2852         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2853
2854         if (hw->mac.type != ixgbe_mac_82598EB)
2855                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2856
2857         return offloads;
2858 }
2859
2860 uint64_t
2861 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2862 {
2863         uint64_t offloads;
2864         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2865
2866         offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
2867                    DEV_RX_OFFLOAD_UDP_CKSUM   |
2868                    DEV_RX_OFFLOAD_TCP_CKSUM   |
2869                    DEV_RX_OFFLOAD_KEEP_CRC    |
2870                    DEV_RX_OFFLOAD_JUMBO_FRAME |
2871                    DEV_RX_OFFLOAD_VLAN_FILTER |
2872                    DEV_RX_OFFLOAD_SCATTER;
2873
2874         if (hw->mac.type == ixgbe_mac_82598EB)
2875                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2876
2877         if (ixgbe_is_vf(dev) == 0)
2878                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2879
2880         /*
2881          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2882          * mode.
2883          */
2884         if ((hw->mac.type == ixgbe_mac_82599EB ||
2885              hw->mac.type == ixgbe_mac_X540 ||
2886              hw->mac.type == ixgbe_mac_X550) &&
2887             !RTE_ETH_DEV_SRIOV(dev).active)
2888                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2889
2890         if (hw->mac.type == ixgbe_mac_82599EB ||
2891             hw->mac.type == ixgbe_mac_X540)
2892                 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2893
2894         if (hw->mac.type == ixgbe_mac_X550 ||
2895             hw->mac.type == ixgbe_mac_X550EM_x ||
2896             hw->mac.type == ixgbe_mac_X550EM_a)
2897                 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2898
2899 #ifdef RTE_LIBRTE_SECURITY
2900         if (dev->security_ctx)
2901                 offloads |= DEV_RX_OFFLOAD_SECURITY;
2902 #endif
2903
2904         return offloads;
2905 }
2906
2907 int __attribute__((cold))
2908 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2909                          uint16_t queue_idx,
2910                          uint16_t nb_desc,
2911                          unsigned int socket_id,
2912                          const struct rte_eth_rxconf *rx_conf,
2913                          struct rte_mempool *mp)
2914 {
2915         const struct rte_memzone *rz;
2916         struct ixgbe_rx_queue *rxq;
2917         struct ixgbe_hw     *hw;
2918         uint16_t len;
2919         struct ixgbe_adapter *adapter =
2920                 (struct ixgbe_adapter *)dev->data->dev_private;
2921         uint64_t offloads;
2922
2923         PMD_INIT_FUNC_TRACE();
2924         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2925
2926         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2927
2928         /*
2929          * Validate number of receive descriptors.
2930          * It must not exceed hardware maximum, and must be multiple
2931          * of IXGBE_ALIGN.
2932          */
2933         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2934                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2935                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2936                 return -EINVAL;
2937         }
2938
2939         /* Free memory prior to re-allocation if needed... */
2940         if (dev->data->rx_queues[queue_idx] != NULL) {
2941                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2942                 dev->data->rx_queues[queue_idx] = NULL;
2943         }
2944
2945         /* First allocate the rx queue data structure */
2946         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2947                                  RTE_CACHE_LINE_SIZE, socket_id);
2948         if (rxq == NULL)
2949                 return -ENOMEM;
2950         rxq->mb_pool = mp;
2951         rxq->nb_rx_desc = nb_desc;
2952         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2953         rxq->queue_id = queue_idx;
2954         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2955                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2956         rxq->port_id = dev->data->port_id;
2957         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2958                 rxq->crc_len = RTE_ETHER_CRC_LEN;
2959         else
2960                 rxq->crc_len = 0;
2961         rxq->drop_en = rx_conf->rx_drop_en;
2962         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2963         rxq->offloads = offloads;
2964
2965         /*
2966          * The packet type in RX descriptor is different for different NICs.
2967          * Some bits are used for x550 but reserved for other NICS.
2968          * So set different masks for different NICs.
2969          */
2970         if (hw->mac.type == ixgbe_mac_X550 ||
2971             hw->mac.type == ixgbe_mac_X550EM_x ||
2972             hw->mac.type == ixgbe_mac_X550EM_a ||
2973             hw->mac.type == ixgbe_mac_X550_vf ||
2974             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2975             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2976                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2977         else
2978                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2979
2980         /*
2981          * Allocate RX ring hardware descriptors. A memzone large enough to
2982          * handle the maximum ring size is allocated in order to allow for
2983          * resizing in later calls to the queue setup function.
2984          */
2985         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2986                                       RX_RING_SZ, IXGBE_ALIGN, socket_id);
2987         if (rz == NULL) {
2988                 ixgbe_rx_queue_release(rxq);
2989                 return -ENOMEM;
2990         }
2991
2992         /*
2993          * Zero init all the descriptors in the ring.
2994          */
2995         memset(rz->addr, 0, RX_RING_SZ);
2996
2997         /*
2998          * Modified to setup VFRDT for Virtual Function
2999          */
3000         if (hw->mac.type == ixgbe_mac_82599_vf ||
3001             hw->mac.type == ixgbe_mac_X540_vf ||
3002             hw->mac.type == ixgbe_mac_X550_vf ||
3003             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3004             hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3005                 rxq->rdt_reg_addr =
3006                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3007                 rxq->rdh_reg_addr =
3008                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3009         } else {
3010                 rxq->rdt_reg_addr =
3011                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3012                 rxq->rdh_reg_addr =
3013                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3014         }
3015
3016         rxq->rx_ring_phys_addr = rz->iova;
3017         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3018
3019         /*
3020          * Certain constraints must be met in order to use the bulk buffer
3021          * allocation Rx burst function. If any of Rx queues doesn't meet them
3022          * the feature should be disabled for the whole port.
3023          */
3024         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3025                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3026                                     "preconditions - canceling the feature for "
3027                                     "the whole port[%d]",
3028                              rxq->queue_id, rxq->port_id);
3029                 adapter->rx_bulk_alloc_allowed = false;
3030         }
3031
3032         /*
3033          * Allocate software ring. Allow for space at the end of the
3034          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3035          * function does not access an invalid memory region.
3036          */
3037         len = nb_desc;
3038         if (adapter->rx_bulk_alloc_allowed)
3039                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3040
3041         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3042                                           sizeof(struct ixgbe_rx_entry) * len,
3043                                           RTE_CACHE_LINE_SIZE, socket_id);
3044         if (!rxq->sw_ring) {
3045                 ixgbe_rx_queue_release(rxq);
3046                 return -ENOMEM;
3047         }
3048
3049         /*
3050          * Always allocate even if it's not going to be needed in order to
3051          * simplify the code.
3052          *
3053          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3054          * be requested in ixgbe_dev_rx_init(), which is called later from
3055          * dev_start() flow.
3056          */
3057         rxq->sw_sc_ring =
3058                 rte_zmalloc_socket("rxq->sw_sc_ring",
3059                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
3060                                    RTE_CACHE_LINE_SIZE, socket_id);
3061         if (!rxq->sw_sc_ring) {
3062                 ixgbe_rx_queue_release(rxq);
3063                 return -ENOMEM;
3064         }
3065
3066         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3067                             "dma_addr=0x%"PRIx64,
3068                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3069                      rxq->rx_ring_phys_addr);
3070
3071         if (!rte_is_power_of_2(nb_desc)) {
3072                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3073                                     "preconditions - canceling the feature for "
3074                                     "the whole port[%d]",
3075                              rxq->queue_id, rxq->port_id);
3076                 adapter->rx_vec_allowed = false;
3077         } else
3078                 ixgbe_rxq_vec_setup(rxq);
3079
3080         dev->data->rx_queues[queue_idx] = rxq;
3081
3082         ixgbe_reset_rx_queue(adapter, rxq);
3083
3084         return 0;
3085 }
3086
3087 uint32_t
3088 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3089 {
3090 #define IXGBE_RXQ_SCAN_INTERVAL 4
3091         volatile union ixgbe_adv_rx_desc *rxdp;
3092         struct ixgbe_rx_queue *rxq;
3093         uint32_t desc = 0;
3094
3095         rxq = dev->data->rx_queues[rx_queue_id];
3096         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3097
3098         while ((desc < rxq->nb_rx_desc) &&
3099                 (rxdp->wb.upper.status_error &
3100                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3101                 desc += IXGBE_RXQ_SCAN_INTERVAL;
3102                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3103                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3104                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
3105                                 desc - rxq->nb_rx_desc]);
3106         }
3107
3108         return desc;
3109 }
3110
3111 int
3112 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3113 {
3114         volatile union ixgbe_adv_rx_desc *rxdp;
3115         struct ixgbe_rx_queue *rxq = rx_queue;
3116         uint32_t desc;
3117
3118         if (unlikely(offset >= rxq->nb_rx_desc))
3119                 return 0;
3120         desc = rxq->rx_tail + offset;
3121         if (desc >= rxq->nb_rx_desc)
3122                 desc -= rxq->nb_rx_desc;
3123
3124         rxdp = &rxq->rx_ring[desc];
3125         return !!(rxdp->wb.upper.status_error &
3126                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3127 }
3128
3129 int
3130 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3131 {
3132         struct ixgbe_rx_queue *rxq = rx_queue;
3133         volatile uint32_t *status;
3134         uint32_t nb_hold, desc;
3135
3136         if (unlikely(offset >= rxq->nb_rx_desc))
3137                 return -EINVAL;
3138
3139 #ifdef RTE_IXGBE_INC_VECTOR
3140         if (rxq->rx_using_sse)
3141                 nb_hold = rxq->rxrearm_nb;
3142         else
3143 #endif
3144                 nb_hold = rxq->nb_rx_hold;
3145         if (offset >= rxq->nb_rx_desc - nb_hold)
3146                 return RTE_ETH_RX_DESC_UNAVAIL;
3147
3148         desc = rxq->rx_tail + offset;
3149         if (desc >= rxq->nb_rx_desc)
3150                 desc -= rxq->nb_rx_desc;
3151
3152         status = &rxq->rx_ring[desc].wb.upper.status_error;
3153         if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3154                 return RTE_ETH_RX_DESC_DONE;
3155
3156         return RTE_ETH_RX_DESC_AVAIL;
3157 }
3158
3159 int
3160 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3161 {
3162         struct ixgbe_tx_queue *txq = tx_queue;
3163         volatile uint32_t *status;
3164         uint32_t desc;
3165
3166         if (unlikely(offset >= txq->nb_tx_desc))
3167                 return -EINVAL;
3168
3169         desc = txq->tx_tail + offset;
3170         /* go to next desc that has the RS bit */
3171         desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3172                 txq->tx_rs_thresh;
3173         if (desc >= txq->nb_tx_desc) {
3174                 desc -= txq->nb_tx_desc;
3175                 if (desc >= txq->nb_tx_desc)
3176                         desc -= txq->nb_tx_desc;
3177         }
3178
3179         status = &txq->tx_ring[desc].wb.status;
3180         if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3181                 return RTE_ETH_TX_DESC_DONE;
3182
3183         return RTE_ETH_TX_DESC_FULL;
3184 }
3185
3186 /*
3187  * Set up link loopback for X540/X550 mode Tx->Rx.
3188  */
3189 static inline void __attribute__((cold))
3190 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3191 {
3192         uint32_t macc;
3193         PMD_INIT_FUNC_TRACE();
3194
3195         u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3196
3197         hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3198                              IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3199         macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3200
3201         if (enable) {
3202                 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3203                 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3204                 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3205                 macc |= IXGBE_MACC_FLU;
3206         } else {
3207                 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3208                 macc &= ~IXGBE_MACC_FLU;
3209         }
3210
3211         hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3212                               IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3213
3214         IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3215 }
3216
3217 void __attribute__((cold))
3218 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3219 {
3220         unsigned i;
3221         struct ixgbe_adapter *adapter =
3222                 (struct ixgbe_adapter *)dev->data->dev_private;
3223         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3224
3225         PMD_INIT_FUNC_TRACE();
3226
3227         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3228                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3229
3230                 if (txq != NULL) {
3231                         txq->ops->release_mbufs(txq);
3232                         txq->ops->reset(txq);
3233                 }
3234         }
3235
3236         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3237                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3238
3239                 if (rxq != NULL) {
3240                         ixgbe_rx_queue_release_mbufs(rxq);
3241                         ixgbe_reset_rx_queue(adapter, rxq);
3242                 }
3243         }
3244         /* If loopback mode was enabled, reconfigure the link accordingly */
3245         if (dev->data->dev_conf.lpbk_mode != 0) {
3246                 if (hw->mac.type == ixgbe_mac_X540 ||
3247                      hw->mac.type == ixgbe_mac_X550 ||
3248                      hw->mac.type == ixgbe_mac_X550EM_x ||
3249                      hw->mac.type == ixgbe_mac_X550EM_a)
3250                         ixgbe_setup_loopback_link_x540_x550(hw, false);
3251         }
3252 }
3253
3254 void
3255 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3256 {
3257         unsigned i;
3258
3259         PMD_INIT_FUNC_TRACE();
3260
3261         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3262                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3263                 dev->data->rx_queues[i] = NULL;
3264         }
3265         dev->data->nb_rx_queues = 0;
3266
3267         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3268                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3269                 dev->data->tx_queues[i] = NULL;
3270         }
3271         dev->data->nb_tx_queues = 0;
3272 }
3273
3274 /*********************************************************************
3275  *
3276  *  Device RX/TX init functions
3277  *
3278  **********************************************************************/
3279
3280 /**
3281  * Receive Side Scaling (RSS)
3282  * See section 7.1.2.8 in the following document:
3283  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3284  *
3285  * Principles:
3286  * The source and destination IP addresses of the IP header and the source
3287  * and destination ports of TCP/UDP headers, if any, of received packets are
3288  * hashed against a configurable random key to compute a 32-bit RSS hash result.
3289  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3290  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
3291  * RSS output index which is used as the RX queue index where to store the
3292  * received packets.
3293  * The following output is supplied in the RX write-back descriptor:
3294  *     - 32-bit result of the Microsoft RSS hash function,
3295  *     - 4-bit RSS type field.
3296  */
3297
3298 /*
3299  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3300  * Used as the default key.
3301  */
3302 static uint8_t rss_intel_key[40] = {
3303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3308 };
3309
3310 static void
3311 ixgbe_rss_disable(struct rte_eth_dev *dev)
3312 {
3313         struct ixgbe_hw *hw;
3314         uint32_t mrqc;
3315         uint32_t mrqc_reg;
3316
3317         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3318         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3319         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3320         mrqc &= ~IXGBE_MRQC_RSSEN;
3321         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3322 }
3323
3324 static void
3325 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3326 {
3327         uint8_t  *hash_key;
3328         uint32_t mrqc;
3329         uint32_t rss_key;
3330         uint64_t rss_hf;
3331         uint16_t i;
3332         uint32_t mrqc_reg;
3333         uint32_t rssrk_reg;
3334
3335         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3336         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3337
3338         hash_key = rss_conf->rss_key;
3339         if (hash_key != NULL) {
3340                 /* Fill in RSS hash key */
3341                 for (i = 0; i < 10; i++) {
3342                         rss_key  = hash_key[(i * 4)];
3343                         rss_key |= hash_key[(i * 4) + 1] << 8;
3344                         rss_key |= hash_key[(i * 4) + 2] << 16;
3345                         rss_key |= hash_key[(i * 4) + 3] << 24;
3346                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3347                 }
3348         }
3349
3350         /* Set configured hashing protocols in MRQC register */
3351         rss_hf = rss_conf->rss_hf;
3352         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3353         if (rss_hf & ETH_RSS_IPV4)
3354                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3355         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3356                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3357         if (rss_hf & ETH_RSS_IPV6)
3358                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3359         if (rss_hf & ETH_RSS_IPV6_EX)
3360                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3361         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3362                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3363         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3364                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3365         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3366                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3367         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3368                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3369         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3370                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3371         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3372 }
3373
3374 int
3375 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3376                           struct rte_eth_rss_conf *rss_conf)
3377 {
3378         struct ixgbe_hw *hw;
3379         uint32_t mrqc;
3380         uint64_t rss_hf;
3381         uint32_t mrqc_reg;
3382
3383         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3384
3385         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3386                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3387                         "NIC.");
3388                 return -ENOTSUP;
3389         }
3390         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3391
3392         /*
3393          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3394          *     "RSS enabling cannot be done dynamically while it must be
3395          *      preceded by a software reset"
3396          * Before changing anything, first check that the update RSS operation
3397          * does not attempt to disable RSS, if RSS was enabled at
3398          * initialization time, or does not attempt to enable RSS, if RSS was
3399          * disabled at initialization time.
3400          */
3401         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3402         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3403         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3404                 if (rss_hf != 0) /* Enable RSS */
3405                         return -(EINVAL);
3406                 return 0; /* Nothing to do */
3407         }
3408         /* RSS enabled */
3409         if (rss_hf == 0) /* Disable RSS */
3410                 return -(EINVAL);
3411         ixgbe_hw_rss_hash_set(hw, rss_conf);
3412         return 0;
3413 }
3414
3415 int
3416 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3417                             struct rte_eth_rss_conf *rss_conf)
3418 {
3419         struct ixgbe_hw *hw;
3420         uint8_t *hash_key;
3421         uint32_t mrqc;
3422         uint32_t rss_key;
3423         uint64_t rss_hf;
3424         uint16_t i;
3425         uint32_t mrqc_reg;
3426         uint32_t rssrk_reg;
3427
3428         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3429         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3430         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3431         hash_key = rss_conf->rss_key;
3432         if (hash_key != NULL) {
3433                 /* Return RSS hash key */
3434                 for (i = 0; i < 10; i++) {
3435                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3436                         hash_key[(i * 4)] = rss_key & 0x000000FF;
3437                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3438                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3439                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3440                 }
3441         }
3442
3443         /* Get RSS functions configured in MRQC register */
3444         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3445         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3446                 rss_conf->rss_hf = 0;
3447                 return 0;
3448         }
3449         rss_hf = 0;
3450         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3451                 rss_hf |= ETH_RSS_IPV4;
3452         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3453                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3454         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3455                 rss_hf |= ETH_RSS_IPV6;
3456         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3457                 rss_hf |= ETH_RSS_IPV6_EX;
3458         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3459                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3460         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3461                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3462         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3463                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3464         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3465                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3466         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3467                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3468         rss_conf->rss_hf = rss_hf;
3469         return 0;
3470 }
3471
3472 static void
3473 ixgbe_rss_configure(struct rte_eth_dev *dev)
3474 {
3475         struct rte_eth_rss_conf rss_conf;
3476         struct ixgbe_adapter *adapter;
3477         struct ixgbe_hw *hw;
3478         uint32_t reta;
3479         uint16_t i;
3480         uint16_t j;
3481         uint16_t sp_reta_size;
3482         uint32_t reta_reg;
3483
3484         PMD_INIT_FUNC_TRACE();
3485         adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3486         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3487
3488         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3489
3490         /*
3491          * Fill in redirection table
3492          * The byte-swap is needed because NIC registers are in
3493          * little-endian order.
3494          */
3495         if (adapter->rss_reta_updated == 0) {
3496                 reta = 0;
3497                 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3498                         reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3499
3500                         if (j == dev->data->nb_rx_queues)
3501                                 j = 0;
3502                         reta = (reta << 8) | j;
3503                         if ((i & 3) == 3)
3504                                 IXGBE_WRITE_REG(hw, reta_reg,
3505                                                 rte_bswap32(reta));
3506                 }
3507         }
3508
3509         /*
3510          * Configure the RSS key and the RSS protocols used to compute
3511          * the RSS hash of input packets.
3512          */
3513         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3514         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3515                 ixgbe_rss_disable(dev);
3516                 return;
3517         }
3518         if (rss_conf.rss_key == NULL)
3519                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3520         ixgbe_hw_rss_hash_set(hw, &rss_conf);
3521 }
3522
3523 #define NUM_VFTA_REGISTERS 128
3524 #define NIC_RX_BUFFER_SIZE 0x200
3525 #define X550_RX_BUFFER_SIZE 0x180
3526
3527 static void
3528 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3529 {
3530         struct rte_eth_vmdq_dcb_conf *cfg;
3531         struct ixgbe_hw *hw;
3532         enum rte_eth_nb_pools num_pools;
3533         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3534         uint16_t pbsize;
3535         uint8_t nb_tcs; /* number of traffic classes */
3536         int i;
3537
3538         PMD_INIT_FUNC_TRACE();
3539         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3540         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3541         num_pools = cfg->nb_queue_pools;
3542         /* Check we have a valid number of pools */
3543         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3544                 ixgbe_rss_disable(dev);
3545                 return;
3546         }
3547         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3548         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3549
3550         /*
3551          * RXPBSIZE
3552          * split rx buffer up into sections, each for 1 traffic class
3553          */
3554         switch (hw->mac.type) {
3555         case ixgbe_mac_X550:
3556         case ixgbe_mac_X550EM_x:
3557         case ixgbe_mac_X550EM_a:
3558                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3559                 break;
3560         default:
3561                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3562                 break;
3563         }
3564         for (i = 0; i < nb_tcs; i++) {
3565                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3566
3567                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3568                 /* clear 10 bits. */
3569                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3570                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3571         }
3572         /* zero alloc all unused TCs */
3573         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3574                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3575
3576                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3577                 /* clear 10 bits. */
3578                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3579         }
3580
3581         /* MRQC: enable vmdq and dcb */
3582         mrqc = (num_pools == ETH_16_POOLS) ?
3583                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3584         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3585
3586         /* PFVTCTL: turn on virtualisation and set the default pool */
3587         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3588         if (cfg->enable_default_pool) {
3589                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3590         } else {
3591                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3592         }
3593
3594         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3595
3596         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3597         queue_mapping = 0;
3598         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3599                 /*
3600                  * mapping is done with 3 bits per priority,
3601                  * so shift by i*3 each time
3602                  */
3603                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3604
3605         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3606
3607         /* RTRPCS: DCB related */
3608         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3609
3610         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3611         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3612         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3613         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3614
3615         /* VFTA - enable all vlan filters */
3616         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3617                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3618         }
3619
3620         /* VFRE: pool enabling for receive - 16 or 32 */
3621         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3622                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3623
3624         /*
3625          * MPSAR - allow pools to read specific mac addresses
3626          * In this case, all pools should be able to read from mac addr 0
3627          */
3628         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3629         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3630
3631         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3632         for (i = 0; i < cfg->nb_pool_maps; i++) {
3633                 /* set vlan id in VF register and set the valid bit */
3634                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3635                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
3636                 /*
3637                  * Put the allowed pools in VFB reg. As we only have 16 or 32
3638                  * pools, we only need to use the first half of the register
3639                  * i.e. bits 0-31
3640                  */
3641                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3642         }
3643 }
3644
3645 /**
3646  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3647  * @dev: pointer to eth_dev structure
3648  * @dcb_config: pointer to ixgbe_dcb_config structure
3649  */
3650 static void
3651 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3652                        struct ixgbe_dcb_config *dcb_config)
3653 {
3654         uint32_t reg;
3655         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3656
3657         PMD_INIT_FUNC_TRACE();
3658         if (hw->mac.type != ixgbe_mac_82598EB) {
3659                 /* Disable the Tx desc arbiter so that MTQC can be changed */
3660                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3661                 reg |= IXGBE_RTTDCS_ARBDIS;
3662                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3663
3664                 /* Enable DCB for Tx with 8 TCs */
3665                 if (dcb_config->num_tcs.pg_tcs == 8) {
3666                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3667                 } else {
3668                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3669                 }
3670                 if (dcb_config->vt_mode)
3671                         reg |= IXGBE_MTQC_VT_ENA;
3672                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3673
3674                 /* Enable the Tx desc arbiter */
3675                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3676                 reg &= ~IXGBE_RTTDCS_ARBDIS;
3677                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3678
3679                 /* Enable Security TX Buffer IFG for DCB */
3680                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3681                 reg |= IXGBE_SECTX_DCB;
3682                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3683         }
3684 }
3685
3686 /**
3687  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3688  * @dev: pointer to rte_eth_dev structure
3689  * @dcb_config: pointer to ixgbe_dcb_config structure
3690  */
3691 static void
3692 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3693                         struct ixgbe_dcb_config *dcb_config)
3694 {
3695         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3696                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3697         struct ixgbe_hw *hw =
3698                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3699
3700         PMD_INIT_FUNC_TRACE();
3701         if (hw->mac.type != ixgbe_mac_82598EB)
3702                 /*PF VF Transmit Enable*/
3703                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3704                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3705
3706         /*Configure general DCB TX parameters*/
3707         ixgbe_dcb_tx_hw_config(dev, dcb_config);
3708 }
3709
3710 static void
3711 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3712                         struct ixgbe_dcb_config *dcb_config)
3713 {
3714         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3715                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3716         struct ixgbe_dcb_tc_config *tc;
3717         uint8_t i, j;
3718
3719         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3720         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3721                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3722                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3723         } else {
3724                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3725                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3726         }
3727
3728         /* Initialize User Priority to Traffic Class mapping */
3729         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3730                 tc = &dcb_config->tc_config[j];
3731                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3732         }
3733
3734         /* User Priority to Traffic Class mapping */
3735         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3736                 j = vmdq_rx_conf->dcb_tc[i];
3737                 tc = &dcb_config->tc_config[j];
3738                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3739                                                 (uint8_t)(1 << i);
3740         }
3741 }
3742
3743 static void
3744 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3745                         struct ixgbe_dcb_config *dcb_config)
3746 {
3747         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3748                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3749         struct ixgbe_dcb_tc_config *tc;
3750         uint8_t i, j;
3751
3752         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3753         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3754                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3755                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3756         } else {
3757                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3758                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3759         }
3760
3761         /* Initialize User Priority to Traffic Class mapping */
3762         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3763                 tc = &dcb_config->tc_config[j];
3764                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3765         }
3766
3767         /* User Priority to Traffic Class mapping */
3768         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3769                 j = vmdq_tx_conf->dcb_tc[i];
3770                 tc = &dcb_config->tc_config[j];
3771                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3772                                                 (uint8_t)(1 << i);
3773         }
3774 }
3775
3776 static void
3777 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3778                 struct ixgbe_dcb_config *dcb_config)
3779 {
3780         struct rte_eth_dcb_rx_conf *rx_conf =
3781                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3782         struct ixgbe_dcb_tc_config *tc;
3783         uint8_t i, j;
3784
3785         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3786         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3787
3788         /* Initialize User Priority to Traffic Class mapping */
3789         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3790                 tc = &dcb_config->tc_config[j];
3791                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3792         }
3793
3794         /* User Priority to Traffic Class mapping */
3795         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3796                 j = rx_conf->dcb_tc[i];
3797                 tc = &dcb_config->tc_config[j];
3798                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3799                                                 (uint8_t)(1 << i);
3800         }
3801 }
3802
3803 static void
3804 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3805                 struct ixgbe_dcb_config *dcb_config)
3806 {
3807         struct rte_eth_dcb_tx_conf *tx_conf =
3808                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3809         struct ixgbe_dcb_tc_config *tc;
3810         uint8_t i, j;
3811
3812         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3813         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3814
3815         /* Initialize User Priority to Traffic Class mapping */
3816         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3817                 tc = &dcb_config->tc_config[j];
3818                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3819         }
3820
3821         /* User Priority to Traffic Class mapping */
3822         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3823                 j = tx_conf->dcb_tc[i];
3824                 tc = &dcb_config->tc_config[j];
3825                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3826                                                 (uint8_t)(1 << i);
3827         }
3828 }
3829
3830 /**
3831  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3832  * @dev: pointer to eth_dev structure
3833  * @dcb_config: pointer to ixgbe_dcb_config structure
3834  */
3835 static void
3836 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3837                        struct ixgbe_dcb_config *dcb_config)
3838 {
3839         uint32_t reg;
3840         uint32_t vlanctrl;
3841         uint8_t i;
3842         uint32_t q;
3843         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3844
3845         PMD_INIT_FUNC_TRACE();
3846         /*
3847          * Disable the arbiter before changing parameters
3848          * (always enable recycle mode; WSP)
3849          */
3850         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3851         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3852
3853         if (hw->mac.type != ixgbe_mac_82598EB) {
3854                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3855                 if (dcb_config->num_tcs.pg_tcs == 4) {
3856                         if (dcb_config->vt_mode)
3857                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3858                                         IXGBE_MRQC_VMDQRT4TCEN;
3859                         else {
3860                                 /* no matter the mode is DCB or DCB_RSS, just
3861                                  * set the MRQE to RSSXTCEN. RSS is controlled
3862                                  * by RSS_FIELD
3863                                  */
3864                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3865                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3866                                         IXGBE_MRQC_RTRSS4TCEN;
3867                         }
3868                 }
3869                 if (dcb_config->num_tcs.pg_tcs == 8) {
3870                         if (dcb_config->vt_mode)
3871                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3872                                         IXGBE_MRQC_VMDQRT8TCEN;
3873                         else {
3874                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3875                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3876                                         IXGBE_MRQC_RTRSS8TCEN;
3877                         }
3878                 }
3879
3880                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3881
3882                 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3883                         /* Disable drop for all queues in VMDQ mode*/
3884                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3885                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3886                                                 (IXGBE_QDE_WRITE |
3887                                                  (q << IXGBE_QDE_IDX_SHIFT)));
3888                 } else {
3889                         /* Enable drop for all queues in SRIOV mode */
3890                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3891                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3892                                                 (IXGBE_QDE_WRITE |
3893                                                  (q << IXGBE_QDE_IDX_SHIFT) |
3894                                                  IXGBE_QDE_ENABLE));
3895                 }
3896         }
3897
3898         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3899         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3900         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3901         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3902
3903         /* VFTA - enable all vlan filters */
3904         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3905                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3906         }
3907
3908         /*
3909          * Configure Rx packet plane (recycle mode; WSP) and
3910          * enable arbiter
3911          */
3912         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3913         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3914 }
3915
3916 static void
3917 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3918                         uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3919 {
3920         switch (hw->mac.type) {
3921         case ixgbe_mac_82598EB:
3922                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3923                 break;
3924         case ixgbe_mac_82599EB:
3925         case ixgbe_mac_X540:
3926         case ixgbe_mac_X550:
3927         case ixgbe_mac_X550EM_x:
3928         case ixgbe_mac_X550EM_a:
3929                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3930                                                   tsa, map);
3931                 break;
3932         default:
3933                 break;
3934         }
3935 }
3936
3937 static void
3938 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3939                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3940 {
3941         switch (hw->mac.type) {
3942         case ixgbe_mac_82598EB:
3943                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3944                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3945                 break;
3946         case ixgbe_mac_82599EB:
3947         case ixgbe_mac_X540:
3948         case ixgbe_mac_X550:
3949         case ixgbe_mac_X550EM_x:
3950         case ixgbe_mac_X550EM_a:
3951                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3952                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3953                 break;
3954         default:
3955                 break;
3956         }
3957 }
3958
3959 #define DCB_RX_CONFIG  1
3960 #define DCB_TX_CONFIG  1
3961 #define DCB_TX_PB      1024
3962 /**
3963  * ixgbe_dcb_hw_configure - Enable DCB and configure
3964  * general DCB in VT mode and non-VT mode parameters
3965  * @dev: pointer to rte_eth_dev structure
3966  * @dcb_config: pointer to ixgbe_dcb_config structure
3967  */
3968 static int
3969 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3970                         struct ixgbe_dcb_config *dcb_config)
3971 {
3972         int     ret = 0;
3973         uint8_t i, pfc_en, nb_tcs;
3974         uint16_t pbsize, rx_buffer_size;
3975         uint8_t config_dcb_rx = 0;
3976         uint8_t config_dcb_tx = 0;
3977         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3978         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3979         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3980         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982         struct ixgbe_dcb_tc_config *tc;
3983         uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3984                 RTE_ETHER_CRC_LEN;
3985         struct ixgbe_hw *hw =
3986                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3987         struct ixgbe_bw_conf *bw_conf =
3988                 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3989
3990         switch (dev->data->dev_conf.rxmode.mq_mode) {
3991         case ETH_MQ_RX_VMDQ_DCB:
3992                 dcb_config->vt_mode = true;
3993                 if (hw->mac.type != ixgbe_mac_82598EB) {
3994                         config_dcb_rx = DCB_RX_CONFIG;
3995                         /*
3996                          *get dcb and VT rx configuration parameters
3997                          *from rte_eth_conf
3998                          */
3999                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4000                         /*Configure general VMDQ and DCB RX parameters*/
4001                         ixgbe_vmdq_dcb_configure(dev);
4002                 }
4003                 break;
4004         case ETH_MQ_RX_DCB:
4005         case ETH_MQ_RX_DCB_RSS:
4006                 dcb_config->vt_mode = false;
4007                 config_dcb_rx = DCB_RX_CONFIG;
4008                 /* Get dcb TX configuration parameters from rte_eth_conf */
4009                 ixgbe_dcb_rx_config(dev, dcb_config);
4010                 /*Configure general DCB RX parameters*/
4011                 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4012                 break;
4013         default:
4014                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4015                 break;
4016         }
4017         switch (dev->data->dev_conf.txmode.mq_mode) {
4018         case ETH_MQ_TX_VMDQ_DCB:
4019                 dcb_config->vt_mode = true;
4020                 config_dcb_tx = DCB_TX_CONFIG;
4021                 /* get DCB and VT TX configuration parameters
4022                  * from rte_eth_conf
4023                  */
4024                 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4025                 /*Configure general VMDQ and DCB TX parameters*/
4026                 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4027                 break;
4028
4029         case ETH_MQ_TX_DCB:
4030                 dcb_config->vt_mode = false;
4031                 config_dcb_tx = DCB_TX_CONFIG;
4032                 /*get DCB TX configuration parameters from rte_eth_conf*/
4033                 ixgbe_dcb_tx_config(dev, dcb_config);
4034                 /*Configure general DCB TX parameters*/
4035                 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4036                 break;
4037         default:
4038                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4039                 break;
4040         }
4041
4042         nb_tcs = dcb_config->num_tcs.pfc_tcs;
4043         /* Unpack map */
4044         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4045         if (nb_tcs == ETH_4_TCS) {
4046                 /* Avoid un-configured priority mapping to TC0 */
4047                 uint8_t j = 4;
4048                 uint8_t mask = 0xFF;
4049
4050                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4051                         mask = (uint8_t)(mask & (~(1 << map[i])));
4052                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4053                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4054                                 map[j++] = i;
4055                         mask >>= 1;
4056                 }
4057                 /* Re-configure 4 TCs BW */
4058                 for (i = 0; i < nb_tcs; i++) {
4059                         tc = &dcb_config->tc_config[i];
4060                         if (bw_conf->tc_num != nb_tcs)
4061                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4062                                         (uint8_t)(100 / nb_tcs);
4063                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4064                                                 (uint8_t)(100 / nb_tcs);
4065                 }
4066                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4067                         tc = &dcb_config->tc_config[i];
4068                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4069                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4070                 }
4071         } else {
4072                 /* Re-configure 8 TCs BW */
4073                 for (i = 0; i < nb_tcs; i++) {
4074                         tc = &dcb_config->tc_config[i];
4075                         if (bw_conf->tc_num != nb_tcs)
4076                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4077                                         (uint8_t)(100 / nb_tcs + (i & 1));
4078                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4079                                 (uint8_t)(100 / nb_tcs + (i & 1));
4080                 }
4081         }
4082
4083         switch (hw->mac.type) {
4084         case ixgbe_mac_X550:
4085         case ixgbe_mac_X550EM_x:
4086         case ixgbe_mac_X550EM_a:
4087                 rx_buffer_size = X550_RX_BUFFER_SIZE;
4088                 break;
4089         default:
4090                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4091                 break;
4092         }
4093
4094         if (config_dcb_rx) {
4095                 /* Set RX buffer size */
4096                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4097                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4098
4099                 for (i = 0; i < nb_tcs; i++) {
4100                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4101                 }
4102                 /* zero alloc all unused TCs */
4103                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4104                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4105                 }
4106         }
4107         if (config_dcb_tx) {
4108                 /* Only support an equally distributed
4109                  *  Tx packet buffer strategy.
4110                  */
4111                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4112                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4113
4114                 for (i = 0; i < nb_tcs; i++) {
4115                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4116                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4117                 }
4118                 /* Clear unused TCs, if any, to zero buffer size*/
4119                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4120                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4121                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4122                 }
4123         }
4124
4125         /*Calculates traffic class credits*/
4126         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4127                                 IXGBE_DCB_TX_CONFIG);
4128         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4129                                 IXGBE_DCB_RX_CONFIG);
4130
4131         if (config_dcb_rx) {
4132                 /* Unpack CEE standard containers */
4133                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4134                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4135                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4136                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4137                 /* Configure PG(ETS) RX */
4138                 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4139         }
4140
4141         if (config_dcb_tx) {
4142                 /* Unpack CEE standard containers */
4143                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4144                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4145                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4146                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4147                 /* Configure PG(ETS) TX */
4148                 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4149         }
4150
4151         /*Configure queue statistics registers*/
4152         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4153
4154         /* Check if the PFC is supported */
4155         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4156                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4157                 for (i = 0; i < nb_tcs; i++) {
4158                         /*
4159                         * If the TC count is 8,and the default high_water is 48,
4160                         * the low_water is 16 as default.
4161                         */
4162                         hw->fc.high_water[i] = (pbsize * 3) / 4;
4163                         hw->fc.low_water[i] = pbsize / 4;
4164                         /* Enable pfc for this TC */
4165                         tc = &dcb_config->tc_config[i];
4166                         tc->pfc = ixgbe_dcb_pfc_enabled;
4167                 }
4168                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4169                 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4170                         pfc_en &= 0x0F;
4171                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4172         }
4173
4174         return ret;
4175 }
4176
4177 /**
4178  * ixgbe_configure_dcb - Configure DCB  Hardware
4179  * @dev: pointer to rte_eth_dev
4180  */
4181 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4182 {
4183         struct ixgbe_dcb_config *dcb_cfg =
4184                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4185         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4186
4187         PMD_INIT_FUNC_TRACE();
4188
4189         /* check support mq_mode for DCB */
4190         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4191             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4192             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4193                 return;
4194
4195         if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4196                 return;
4197
4198         /** Configure DCB hardware **/
4199         ixgbe_dcb_hw_configure(dev, dcb_cfg);
4200 }
4201
4202 /*
4203  * VMDq only support for 10 GbE NIC.
4204  */
4205 static void
4206 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4207 {
4208         struct rte_eth_vmdq_rx_conf *cfg;
4209         struct ixgbe_hw *hw;
4210         enum rte_eth_nb_pools num_pools;
4211         uint32_t mrqc, vt_ctl, vlanctrl;
4212         uint32_t vmolr = 0;
4213         int i;
4214
4215         PMD_INIT_FUNC_TRACE();
4216         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4217         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4218         num_pools = cfg->nb_queue_pools;
4219
4220         ixgbe_rss_disable(dev);
4221
4222         /* MRQC: enable vmdq */
4223         mrqc = IXGBE_MRQC_VMDQEN;
4224         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4225
4226         /* PFVTCTL: turn on virtualisation and set the default pool */
4227         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4228         if (cfg->enable_default_pool)
4229                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4230         else
4231                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4232
4233         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4234
4235         for (i = 0; i < (int)num_pools; i++) {
4236                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4237                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4238         }
4239
4240         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4241         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4242         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4243         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4244
4245         /* VFTA - enable all vlan filters */
4246         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4247                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4248
4249         /* VFRE: pool enabling for receive - 64 */
4250         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4251         if (num_pools == ETH_64_POOLS)
4252                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4253
4254         /*
4255          * MPSAR - allow pools to read specific mac addresses
4256          * In this case, all pools should be able to read from mac addr 0
4257          */
4258         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4259         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4260
4261         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4262         for (i = 0; i < cfg->nb_pool_maps; i++) {
4263                 /* set vlan id in VF register and set the valid bit */
4264                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4265                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4266                 /*
4267                  * Put the allowed pools in VFB reg. As we only have 16 or 64
4268                  * pools, we only need to use the first half of the register
4269                  * i.e. bits 0-31
4270                  */
4271                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4272                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4273                                         (cfg->pool_map[i].pools & UINT32_MAX));
4274                 else
4275                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4276                                         ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4277
4278         }
4279
4280         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4281         if (cfg->enable_loop_back) {
4282                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4283                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4284                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4285         }
4286
4287         IXGBE_WRITE_FLUSH(hw);
4288 }
4289
4290 /*
4291  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4292  * @hw: pointer to hardware structure
4293  */
4294 static void
4295 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4296 {
4297         uint32_t reg;
4298         uint32_t q;
4299
4300         PMD_INIT_FUNC_TRACE();
4301         /*PF VF Transmit Enable*/
4302         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4303         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4304
4305         /* Disable the Tx desc arbiter so that MTQC can be changed */
4306         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4307         reg |= IXGBE_RTTDCS_ARBDIS;
4308         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4309
4310         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4311         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4312
4313         /* Disable drop for all queues */
4314         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4315                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4316                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4317
4318         /* Enable the Tx desc arbiter */
4319         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4320         reg &= ~IXGBE_RTTDCS_ARBDIS;
4321         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4322
4323         IXGBE_WRITE_FLUSH(hw);
4324 }
4325
4326 static int __attribute__((cold))
4327 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4328 {
4329         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4330         uint64_t dma_addr;
4331         unsigned int i;
4332
4333         /* Initialize software ring entries */
4334         for (i = 0; i < rxq->nb_rx_desc; i++) {
4335                 volatile union ixgbe_adv_rx_desc *rxd;
4336                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4337
4338                 if (mbuf == NULL) {
4339                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4340                                      (unsigned) rxq->queue_id);
4341                         return -ENOMEM;
4342                 }
4343
4344                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4345                 mbuf->port = rxq->port_id;
4346
4347                 dma_addr =
4348                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4349                 rxd = &rxq->rx_ring[i];
4350                 rxd->read.hdr_addr = 0;
4351                 rxd->read.pkt_addr = dma_addr;
4352                 rxe[i].mbuf = mbuf;
4353         }
4354
4355         return 0;
4356 }
4357
4358 static int
4359 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4360 {
4361         struct ixgbe_hw *hw;
4362         uint32_t mrqc;
4363
4364         ixgbe_rss_configure(dev);
4365
4366         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4367
4368         /* MRQC: enable VF RSS */
4369         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4370         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4371         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4372         case ETH_64_POOLS:
4373                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4374                 break;
4375
4376         case ETH_32_POOLS:
4377                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4378                 break;
4379
4380         default:
4381                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4382                 return -EINVAL;
4383         }
4384
4385         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4386
4387         return 0;
4388 }
4389
4390 static int
4391 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4392 {
4393         struct ixgbe_hw *hw =
4394                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4395
4396         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4397         case ETH_64_POOLS:
4398                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4399                         IXGBE_MRQC_VMDQEN);
4400                 break;
4401
4402         case ETH_32_POOLS:
4403                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4404                         IXGBE_MRQC_VMDQRT4TCEN);
4405                 break;
4406
4407         case ETH_16_POOLS:
4408                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4409                         IXGBE_MRQC_VMDQRT8TCEN);
4410                 break;
4411         default:
4412                 PMD_INIT_LOG(ERR,
4413                         "invalid pool number in IOV mode");
4414                 break;
4415         }
4416         return 0;
4417 }
4418
4419 static int
4420 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4421 {
4422         struct ixgbe_hw *hw =
4423                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4424
4425         if (hw->mac.type == ixgbe_mac_82598EB)
4426                 return 0;
4427
4428         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4429                 /*
4430                  * SRIOV inactive scheme
4431                  * any DCB/RSS w/o VMDq multi-queue setting
4432                  */
4433                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4434                 case ETH_MQ_RX_RSS:
4435                 case ETH_MQ_RX_DCB_RSS:
4436                 case ETH_MQ_RX_VMDQ_RSS:
4437                         ixgbe_rss_configure(dev);
4438                         break;
4439
4440                 case ETH_MQ_RX_VMDQ_DCB:
4441                         ixgbe_vmdq_dcb_configure(dev);
4442                         break;
4443
4444                 case ETH_MQ_RX_VMDQ_ONLY:
4445                         ixgbe_vmdq_rx_hw_configure(dev);
4446                         break;
4447
4448                 case ETH_MQ_RX_NONE:
4449                 default:
4450                         /* if mq_mode is none, disable rss mode.*/
4451                         ixgbe_rss_disable(dev);
4452                         break;
4453                 }
4454         } else {
4455                 /* SRIOV active scheme
4456                  * Support RSS together with SRIOV.
4457                  */
4458                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4459                 case ETH_MQ_RX_RSS:
4460                 case ETH_MQ_RX_VMDQ_RSS:
4461                         ixgbe_config_vf_rss(dev);
4462                         break;
4463                 case ETH_MQ_RX_VMDQ_DCB:
4464                 case ETH_MQ_RX_DCB:
4465                 /* In SRIOV, the configuration is the same as VMDq case */
4466                         ixgbe_vmdq_dcb_configure(dev);
4467                         break;
4468                 /* DCB/RSS together with SRIOV is not supported */
4469                 case ETH_MQ_RX_VMDQ_DCB_RSS:
4470                 case ETH_MQ_RX_DCB_RSS:
4471                         PMD_INIT_LOG(ERR,
4472                                 "Could not support DCB/RSS with VMDq & SRIOV");
4473                         return -1;
4474                 default:
4475                         ixgbe_config_vf_default(dev);
4476                         break;
4477                 }
4478         }
4479
4480         return 0;
4481 }
4482
4483 static int
4484 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4485 {
4486         struct ixgbe_hw *hw =
4487                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4488         uint32_t mtqc;
4489         uint32_t rttdcs;
4490
4491         if (hw->mac.type == ixgbe_mac_82598EB)
4492                 return 0;
4493
4494         /* disable arbiter before setting MTQC */
4495         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4496         rttdcs |= IXGBE_RTTDCS_ARBDIS;
4497         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4498
4499         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4500                 /*
4501                  * SRIOV inactive scheme
4502                  * any DCB w/o VMDq multi-queue setting
4503                  */
4504                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4505                         ixgbe_vmdq_tx_hw_configure(hw);
4506                 else {
4507                         mtqc = IXGBE_MTQC_64Q_1PB;
4508                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4509                 }
4510         } else {
4511                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4512
4513                 /*
4514                  * SRIOV active scheme
4515                  * FIXME if support DCB together with VMDq & SRIOV
4516                  */
4517                 case ETH_64_POOLS:
4518                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4519                         break;
4520                 case ETH_32_POOLS:
4521                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4522                         break;
4523                 case ETH_16_POOLS:
4524                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4525                                 IXGBE_MTQC_8TC_8TQ;
4526                         break;
4527                 default:
4528                         mtqc = IXGBE_MTQC_64Q_1PB;
4529                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4530                 }
4531                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4532         }
4533
4534         /* re-enable arbiter */
4535         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4536         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4537
4538         return 0;
4539 }
4540
4541 /**
4542  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4543  *
4544  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4545  * spec rev. 3.0 chapter 8.2.3.8.13.
4546  *
4547  * @pool Memory pool of the Rx queue
4548  */
4549 static inline uint32_t
4550 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4551 {
4552         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4553
4554         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4555         uint16_t maxdesc =
4556                 RTE_IPV4_MAX_PKT_LEN /
4557                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4558
4559         if (maxdesc >= 16)
4560                 return IXGBE_RSCCTL_MAXDESC_16;
4561         else if (maxdesc >= 8)
4562                 return IXGBE_RSCCTL_MAXDESC_8;
4563         else if (maxdesc >= 4)
4564                 return IXGBE_RSCCTL_MAXDESC_4;
4565         else
4566                 return IXGBE_RSCCTL_MAXDESC_1;
4567 }
4568
4569 /**
4570  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4571  * interrupt
4572  *
4573  * (Taken from FreeBSD tree)
4574  * (yes this is all very magic and confusing :)
4575  *
4576  * @dev port handle
4577  * @entry the register array entry
4578  * @vector the MSIX vector for this queue
4579  * @type RX/TX/MISC
4580  */
4581 static void
4582 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4583 {
4584         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4585         u32 ivar, index;
4586
4587         vector |= IXGBE_IVAR_ALLOC_VAL;
4588
4589         switch (hw->mac.type) {
4590
4591         case ixgbe_mac_82598EB:
4592                 if (type == -1)
4593                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4594                 else
4595                         entry += (type * 64);
4596                 index = (entry >> 2) & 0x1F;
4597                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4598                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4599                 ivar |= (vector << (8 * (entry & 0x3)));
4600                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4601                 break;
4602
4603         case ixgbe_mac_82599EB:
4604         case ixgbe_mac_X540:
4605                 if (type == -1) { /* MISC IVAR */
4606                         index = (entry & 1) * 8;
4607                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4608                         ivar &= ~(0xFF << index);
4609                         ivar |= (vector << index);
4610                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4611                 } else {        /* RX/TX IVARS */
4612                         index = (16 * (entry & 1)) + (8 * type);
4613                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4614                         ivar &= ~(0xFF << index);
4615                         ivar |= (vector << index);
4616                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4617                 }
4618
4619                 break;
4620
4621         default:
4622                 break;
4623         }
4624 }
4625
4626 void __attribute__((cold))
4627 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4628 {
4629         uint16_t i, rx_using_sse;
4630         struct ixgbe_adapter *adapter =
4631                 (struct ixgbe_adapter *)dev->data->dev_private;
4632
4633         /*
4634          * In order to allow Vector Rx there are a few configuration
4635          * conditions to be met and Rx Bulk Allocation should be allowed.
4636          */
4637         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4638             !adapter->rx_bulk_alloc_allowed) {
4639                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4640                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
4641                                     "not enabled",
4642                              dev->data->port_id);
4643
4644                 adapter->rx_vec_allowed = false;
4645         }
4646
4647         /*
4648          * Initialize the appropriate LRO callback.
4649          *
4650          * If all queues satisfy the bulk allocation preconditions
4651          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4652          * Otherwise use a single allocation version.
4653          */
4654         if (dev->data->lro) {
4655                 if (adapter->rx_bulk_alloc_allowed) {
4656                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4657                                            "allocation version");
4658                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4659                 } else {
4660                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4661                                            "allocation version");
4662                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4663                 }
4664         } else if (dev->data->scattered_rx) {
4665                 /*
4666                  * Set the non-LRO scattered callback: there are Vector and
4667                  * single allocation versions.
4668                  */
4669                 if (adapter->rx_vec_allowed) {
4670                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4671                                             "callback (port=%d).",
4672                                      dev->data->port_id);
4673
4674                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4675                 } else if (adapter->rx_bulk_alloc_allowed) {
4676                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4677                                            "allocation callback (port=%d).",
4678                                      dev->data->port_id);
4679                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4680                 } else {
4681                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4682                                             "single allocation) "
4683                                             "Scattered Rx callback "
4684                                             "(port=%d).",
4685                                      dev->data->port_id);
4686
4687                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4688                 }
4689         /*
4690          * Below we set "simple" callbacks according to port/queues parameters.
4691          * If parameters allow we are going to choose between the following
4692          * callbacks:
4693          *    - Vector
4694          *    - Bulk Allocation
4695          *    - Single buffer allocation (the simplest one)
4696          */
4697         } else if (adapter->rx_vec_allowed) {
4698                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4699                                     "burst size no less than %d (port=%d).",
4700                              RTE_IXGBE_DESCS_PER_LOOP,
4701                              dev->data->port_id);
4702
4703                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4704         } else if (adapter->rx_bulk_alloc_allowed) {
4705                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4706                                     "satisfied. Rx Burst Bulk Alloc function "
4707                                     "will be used on port=%d.",
4708                              dev->data->port_id);
4709
4710                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4711         } else {
4712                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4713                                     "satisfied, or Scattered Rx is requested "
4714                                     "(port=%d).",
4715                              dev->data->port_id);
4716
4717                 dev->rx_pkt_burst = ixgbe_recv_pkts;
4718         }
4719
4720         /* Propagate information about RX function choice through all queues. */
4721
4722         rx_using_sse =
4723                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4724                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4725
4726         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4727                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4728
4729                 rxq->rx_using_sse = rx_using_sse;
4730 #ifdef RTE_LIBRTE_SECURITY
4731                 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4732                                 DEV_RX_OFFLOAD_SECURITY);
4733 #endif
4734         }
4735 }
4736
4737 /**
4738  * ixgbe_set_rsc - configure RSC related port HW registers
4739  *
4740  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4741  * of 82599 Spec (x540 configuration is virtually the same).
4742  *
4743  * @dev port handle
4744  *
4745  * Returns 0 in case of success or a non-zero error code
4746  */
4747 static int
4748 ixgbe_set_rsc(struct rte_eth_dev *dev)
4749 {
4750         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4751         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4752         struct rte_eth_dev_info dev_info = { 0 };
4753         bool rsc_capable = false;
4754         uint16_t i;
4755         uint32_t rdrxctl;
4756         uint32_t rfctl;
4757
4758         /* Sanity check */
4759         dev->dev_ops->dev_infos_get(dev, &dev_info);
4760         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4761                 rsc_capable = true;
4762
4763         if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4764                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4765                                    "support it");
4766                 return -EINVAL;
4767         }
4768
4769         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4770
4771         if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4772              (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4773                 /*
4774                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4775                  * 3.0 RSC configuration requires HW CRC stripping being
4776                  * enabled. If user requested both HW CRC stripping off
4777                  * and RSC on - return an error.
4778                  */
4779                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4780                                     "is disabled");
4781                 return -EINVAL;
4782         }
4783
4784         /* RFCTL configuration  */
4785         rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4786         if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4787                 /*
4788                  * Since NFS packets coalescing is not supported - clear
4789                  * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4790                  * enabled.
4791                  */
4792                 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4793                            IXGBE_RFCTL_NFSR_DIS);
4794         else
4795                 rfctl |= IXGBE_RFCTL_RSC_DIS;
4796         IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4797
4798         /* If LRO hasn't been requested - we are done here. */
4799         if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4800                 return 0;
4801
4802         /* Set RDRXCTL.RSCACKC bit */
4803         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4804         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4805         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4806
4807         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4808         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4809                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4810                 uint32_t srrctl =
4811                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4812                 uint32_t rscctl =
4813                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4814                 uint32_t psrtype =
4815                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4816                 uint32_t eitr =
4817                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4818
4819                 /*
4820                  * ixgbe PMD doesn't support header-split at the moment.
4821                  *
4822                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4823                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4824                  * should be configured even if header split is not
4825                  * enabled. We will configure it 128 bytes following the
4826                  * recommendation in the spec.
4827                  */
4828                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4829                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4830                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4831
4832                 /*
4833                  * TODO: Consider setting the Receive Descriptor Minimum
4834                  * Threshold Size for an RSC case. This is not an obviously
4835                  * beneficiary option but the one worth considering...
4836                  */
4837
4838                 rscctl |= IXGBE_RSCCTL_RSCEN;
4839                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4840                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4841
4842                 /*
4843                  * RSC: Set ITR interval corresponding to 2K ints/s.
4844                  *
4845                  * Full-sized RSC aggregations for a 10Gb/s link will
4846                  * arrive at about 20K aggregation/s rate.
4847                  *
4848                  * 2K inst/s rate will make only 10% of the
4849                  * aggregations to be closed due to the interrupt timer
4850                  * expiration for a streaming at wire-speed case.
4851                  *
4852                  * For a sparse streaming case this setting will yield
4853                  * at most 500us latency for a single RSC aggregation.
4854                  */
4855                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4856                 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4857                 eitr |= IXGBE_EITR_CNT_WDIS;
4858
4859                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4860                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4861                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4862                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4863
4864                 /*
4865                  * RSC requires the mapping of the queue to the
4866                  * interrupt vector.
4867                  */
4868                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4869         }
4870
4871         dev->data->lro = 1;
4872
4873         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4874
4875         return 0;
4876 }
4877
4878 /*
4879  * Initializes Receive Unit.
4880  */
4881 int __attribute__((cold))
4882 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4883 {
4884         struct ixgbe_hw     *hw;
4885         struct ixgbe_rx_queue *rxq;
4886         uint64_t bus_addr;
4887         uint32_t rxctrl;
4888         uint32_t fctrl;
4889         uint32_t hlreg0;
4890         uint32_t maxfrs;
4891         uint32_t srrctl;
4892         uint32_t rdrxctl;
4893         uint32_t rxcsum;
4894         uint16_t buf_size;
4895         uint16_t i;
4896         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4897         int rc;
4898
4899         PMD_INIT_FUNC_TRACE();
4900         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4901
4902         /*
4903          * Make sure receives are disabled while setting
4904          * up the RX context (registers, descriptor rings, etc.).
4905          */
4906         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4907         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4908
4909         /* Enable receipt of broadcasted frames */
4910         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4911         fctrl |= IXGBE_FCTRL_BAM;
4912         fctrl |= IXGBE_FCTRL_DPF;
4913         fctrl |= IXGBE_FCTRL_PMCF;
4914         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4915
4916         /*
4917          * Configure CRC stripping, if any.
4918          */
4919         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4920         if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4921                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4922         else
4923                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4924
4925         /*
4926          * Configure jumbo frame support, if any.
4927          */
4928         if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4929                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4930                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4931                 maxfrs &= 0x0000FFFF;
4932                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4933                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4934         } else
4935                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4936
4937         /*
4938          * If loopback mode is configured, set LPBK bit.
4939          */
4940         if (dev->data->dev_conf.lpbk_mode != 0) {
4941                 rc = ixgbe_check_supported_loopback_mode(dev);
4942                 if (rc < 0) {
4943                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4944                         return rc;
4945                 }
4946                 hlreg0 |= IXGBE_HLREG0_LPBK;
4947         } else {
4948                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4949         }
4950
4951         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4952
4953         /*
4954          * Assume no header split and no VLAN strip support
4955          * on any Rx queue first .
4956          */
4957         rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4958         /* Setup RX queues */
4959         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4960                 rxq = dev->data->rx_queues[i];
4961
4962                 /*
4963                  * Reset crc_len in case it was changed after queue setup by a
4964                  * call to configure.
4965                  */
4966                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4967                         rxq->crc_len = RTE_ETHER_CRC_LEN;
4968                 else
4969                         rxq->crc_len = 0;
4970
4971                 /* Setup the Base and Length of the Rx Descriptor Rings */
4972                 bus_addr = rxq->rx_ring_phys_addr;
4973                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4974                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4975                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4976                                 (uint32_t)(bus_addr >> 32));
4977                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4978                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4979                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4980                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4981
4982                 /* Configure the SRRCTL register */
4983                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4984
4985                 /* Set if packets are dropped when no descriptors available */
4986                 if (rxq->drop_en)
4987                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4988
4989                 /*
4990                  * Configure the RX buffer size in the BSIZEPACKET field of
4991                  * the SRRCTL register of the queue.
4992                  * The value is in 1 KB resolution. Valid values can be from
4993                  * 1 KB to 16 KB.
4994                  */
4995                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4996                         RTE_PKTMBUF_HEADROOM);
4997                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4998                            IXGBE_SRRCTL_BSIZEPKT_MASK);
4999
5000                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5001
5002                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5003                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5004
5005                 /* It adds dual VLAN length for supporting dual VLAN */
5006                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5007                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5008                         dev->data->scattered_rx = 1;
5009                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5010                         rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5011         }
5012
5013         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5014                 dev->data->scattered_rx = 1;
5015
5016         /*
5017          * Device configured with multiple RX queues.
5018          */
5019         ixgbe_dev_mq_rx_configure(dev);
5020
5021         /*
5022          * Setup the Checksum Register.
5023          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5024          * Enable IP/L4 checkum computation by hardware if requested to do so.
5025          */
5026         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5027         rxcsum |= IXGBE_RXCSUM_PCSD;
5028         if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5029                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5030         else
5031                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5032
5033         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5034
5035         if (hw->mac.type == ixgbe_mac_82599EB ||
5036             hw->mac.type == ixgbe_mac_X540) {
5037                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5038                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5039                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5040                 else
5041                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5042                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5043                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5044         }
5045
5046         rc = ixgbe_set_rsc(dev);
5047         if (rc)
5048                 return rc;
5049
5050         ixgbe_set_rx_function(dev);
5051
5052         return 0;
5053 }
5054
5055 /*
5056  * Initializes Transmit Unit.
5057  */
5058 void __attribute__((cold))
5059 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5060 {
5061         struct ixgbe_hw     *hw;
5062         struct ixgbe_tx_queue *txq;
5063         uint64_t bus_addr;
5064         uint32_t hlreg0;
5065         uint32_t txctrl;
5066         uint16_t i;
5067
5068         PMD_INIT_FUNC_TRACE();
5069         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5070
5071         /* Enable TX CRC (checksum offload requirement) and hw padding
5072          * (TSO requirement)
5073          */
5074         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5075         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5076         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5077
5078         /* Setup the Base and Length of the Tx Descriptor Rings */
5079         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5080                 txq = dev->data->tx_queues[i];
5081
5082                 bus_addr = txq->tx_ring_phys_addr;
5083                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5084                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5085                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5086                                 (uint32_t)(bus_addr >> 32));
5087                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5088                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5089                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5090                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5091                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5092
5093                 /*
5094                  * Disable Tx Head Writeback RO bit, since this hoses
5095                  * bookkeeping if things aren't delivered in order.
5096                  */
5097                 switch (hw->mac.type) {
5098                 case ixgbe_mac_82598EB:
5099                         txctrl = IXGBE_READ_REG(hw,
5100                                                 IXGBE_DCA_TXCTRL(txq->reg_idx));
5101                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5102                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5103                                         txctrl);
5104                         break;
5105
5106                 case ixgbe_mac_82599EB:
5107                 case ixgbe_mac_X540:
5108                 case ixgbe_mac_X550:
5109                 case ixgbe_mac_X550EM_x:
5110                 case ixgbe_mac_X550EM_a:
5111                 default:
5112                         txctrl = IXGBE_READ_REG(hw,
5113                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5114                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5115                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5116                                         txctrl);
5117                         break;
5118                 }
5119         }
5120
5121         /* Device configured with multiple TX queues. */
5122         ixgbe_dev_mq_tx_configure(dev);
5123 }
5124
5125 /*
5126  * Check if requested loopback mode is supported
5127  */
5128 int
5129 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5130 {
5131         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5132
5133         if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5134                 if (hw->mac.type == ixgbe_mac_82599EB ||
5135                      hw->mac.type == ixgbe_mac_X540 ||
5136                      hw->mac.type == ixgbe_mac_X550 ||
5137                      hw->mac.type == ixgbe_mac_X550EM_x ||
5138                      hw->mac.type == ixgbe_mac_X550EM_a)
5139                         return 0;
5140
5141         return -ENOTSUP;
5142 }
5143
5144 /*
5145  * Set up link for 82599 loopback mode Tx->Rx.
5146  */
5147 static inline void __attribute__((cold))
5148 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5149 {
5150         PMD_INIT_FUNC_TRACE();
5151
5152         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5153                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5154                                 IXGBE_SUCCESS) {
5155                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5156                         /* ignore error */
5157                         return;
5158                 }
5159         }
5160
5161         /* Restart link */
5162         IXGBE_WRITE_REG(hw,
5163                         IXGBE_AUTOC,
5164                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5165         ixgbe_reset_pipeline_82599(hw);
5166
5167         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5168         msec_delay(50);
5169 }
5170
5171
5172 /*
5173  * Start Transmit and Receive Units.
5174  */
5175 int __attribute__((cold))
5176 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5177 {
5178         struct ixgbe_hw     *hw;
5179         struct ixgbe_tx_queue *txq;
5180         struct ixgbe_rx_queue *rxq;
5181         uint32_t txdctl;
5182         uint32_t dmatxctl;
5183         uint32_t rxctrl;
5184         uint16_t i;
5185         int ret = 0;
5186
5187         PMD_INIT_FUNC_TRACE();
5188         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5189
5190         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5191                 txq = dev->data->tx_queues[i];
5192                 /* Setup Transmit Threshold Registers */
5193                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5194                 txdctl |= txq->pthresh & 0x7F;
5195                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5196                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5197                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5198         }
5199
5200         if (hw->mac.type != ixgbe_mac_82598EB) {
5201                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5202                 dmatxctl |= IXGBE_DMATXCTL_TE;
5203                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5204         }
5205
5206         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5207                 txq = dev->data->tx_queues[i];
5208                 if (!txq->tx_deferred_start) {
5209                         ret = ixgbe_dev_tx_queue_start(dev, i);
5210                         if (ret < 0)
5211                                 return ret;
5212                 }
5213         }
5214
5215         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5216                 rxq = dev->data->rx_queues[i];
5217                 if (!rxq->rx_deferred_start) {
5218                         ret = ixgbe_dev_rx_queue_start(dev, i);
5219                         if (ret < 0)
5220                                 return ret;
5221                 }
5222         }
5223
5224         /* Enable Receive engine */
5225         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5226         if (hw->mac.type == ixgbe_mac_82598EB)
5227                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5228         rxctrl |= IXGBE_RXCTRL_RXEN;
5229         hw->mac.ops.enable_rx_dma(hw, rxctrl);
5230
5231         /* If loopback mode is enabled, set up the link accordingly */
5232         if (dev->data->dev_conf.lpbk_mode != 0) {
5233                 if (hw->mac.type == ixgbe_mac_82599EB)
5234                         ixgbe_setup_loopback_link_82599(hw);
5235                 else if (hw->mac.type == ixgbe_mac_X540 ||
5236                      hw->mac.type == ixgbe_mac_X550 ||
5237                      hw->mac.type == ixgbe_mac_X550EM_x ||
5238                      hw->mac.type == ixgbe_mac_X550EM_a)
5239                         ixgbe_setup_loopback_link_x540_x550(hw, true);
5240         }
5241
5242 #ifdef RTE_LIBRTE_SECURITY
5243         if ((dev->data->dev_conf.rxmode.offloads &
5244                         DEV_RX_OFFLOAD_SECURITY) ||
5245                 (dev->data->dev_conf.txmode.offloads &
5246                         DEV_TX_OFFLOAD_SECURITY)) {
5247                 ret = ixgbe_crypto_enable_ipsec(dev);
5248                 if (ret != 0) {
5249                         PMD_DRV_LOG(ERR,
5250                                     "ixgbe_crypto_enable_ipsec fails with %d.",
5251                                     ret);
5252                         return ret;
5253                 }
5254         }
5255 #endif
5256
5257         return 0;
5258 }
5259
5260 /*
5261  * Start Receive Units for specified queue.
5262  */
5263 int __attribute__((cold))
5264 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5265 {
5266         struct ixgbe_hw     *hw;
5267         struct ixgbe_rx_queue *rxq;
5268         uint32_t rxdctl;
5269         int poll_ms;
5270
5271         PMD_INIT_FUNC_TRACE();
5272         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5273
5274         rxq = dev->data->rx_queues[rx_queue_id];
5275
5276         /* Allocate buffers for descriptor rings */
5277         if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5278                 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5279                              rx_queue_id);
5280                 return -1;
5281         }
5282         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5283         rxdctl |= IXGBE_RXDCTL_ENABLE;
5284         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5285
5286         /* Wait until RX Enable ready */
5287         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5288         do {
5289                 rte_delay_ms(1);
5290                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5291         } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5292         if (!poll_ms)
5293                 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5294         rte_wmb();
5295         IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5296         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5297         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5298
5299         return 0;
5300 }
5301
5302 /*
5303  * Stop Receive Units for specified queue.
5304  */
5305 int __attribute__((cold))
5306 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5307 {
5308         struct ixgbe_hw     *hw;
5309         struct ixgbe_adapter *adapter =
5310                 (struct ixgbe_adapter *)dev->data->dev_private;
5311         struct ixgbe_rx_queue *rxq;
5312         uint32_t rxdctl;
5313         int poll_ms;
5314
5315         PMD_INIT_FUNC_TRACE();
5316         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5317
5318         rxq = dev->data->rx_queues[rx_queue_id];
5319
5320         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5321         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5322         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5323
5324         /* Wait until RX Enable bit clear */
5325         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5326         do {
5327                 rte_delay_ms(1);
5328                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5329         } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5330         if (!poll_ms)
5331                 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5332
5333         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5334
5335         ixgbe_rx_queue_release_mbufs(rxq);
5336         ixgbe_reset_rx_queue(adapter, rxq);
5337         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5338
5339         return 0;
5340 }
5341
5342
5343 /*
5344  * Start Transmit Units for specified queue.
5345  */
5346 int __attribute__((cold))
5347 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5348 {
5349         struct ixgbe_hw     *hw;
5350         struct ixgbe_tx_queue *txq;
5351         uint32_t txdctl;
5352         int poll_ms;
5353
5354         PMD_INIT_FUNC_TRACE();
5355         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5356
5357         txq = dev->data->tx_queues[tx_queue_id];
5358         IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5359         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5360         txdctl |= IXGBE_TXDCTL_ENABLE;
5361         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5362
5363         /* Wait until TX Enable ready */
5364         if (hw->mac.type == ixgbe_mac_82599EB) {
5365                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5366                 do {
5367                         rte_delay_ms(1);
5368                         txdctl = IXGBE_READ_REG(hw,
5369                                 IXGBE_TXDCTL(txq->reg_idx));
5370                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5371                 if (!poll_ms)
5372                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5373                                 tx_queue_id);
5374         }
5375         rte_wmb();
5376         IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5377         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5378
5379         return 0;
5380 }
5381
5382 /*
5383  * Stop Transmit Units for specified queue.
5384  */
5385 int __attribute__((cold))
5386 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5387 {
5388         struct ixgbe_hw     *hw;
5389         struct ixgbe_tx_queue *txq;
5390         uint32_t txdctl;
5391         uint32_t txtdh, txtdt;
5392         int poll_ms;
5393
5394         PMD_INIT_FUNC_TRACE();
5395         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5396
5397         txq = dev->data->tx_queues[tx_queue_id];
5398
5399         /* Wait until TX queue is empty */
5400         if (hw->mac.type == ixgbe_mac_82599EB) {
5401                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5402                 do {
5403                         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5404                         txtdh = IXGBE_READ_REG(hw,
5405                                                IXGBE_TDH(txq->reg_idx));
5406                         txtdt = IXGBE_READ_REG(hw,
5407                                                IXGBE_TDT(txq->reg_idx));
5408                 } while (--poll_ms && (txtdh != txtdt));
5409                 if (!poll_ms)
5410                         PMD_INIT_LOG(ERR,
5411                                 "Tx Queue %d is not empty when stopping.",
5412                                 tx_queue_id);
5413         }
5414
5415         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5416         txdctl &= ~IXGBE_TXDCTL_ENABLE;
5417         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5418
5419         /* Wait until TX Enable bit clear */
5420         if (hw->mac.type == ixgbe_mac_82599EB) {
5421                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5422                 do {
5423                         rte_delay_ms(1);
5424                         txdctl = IXGBE_READ_REG(hw,
5425                                                 IXGBE_TXDCTL(txq->reg_idx));
5426                 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5427                 if (!poll_ms)
5428                         PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5429                                 tx_queue_id);
5430         }
5431
5432         if (txq->ops != NULL) {
5433                 txq->ops->release_mbufs(txq);
5434                 txq->ops->reset(txq);
5435         }
5436         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5437
5438         return 0;
5439 }
5440
5441 void
5442 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5443         struct rte_eth_rxq_info *qinfo)
5444 {
5445         struct ixgbe_rx_queue *rxq;
5446
5447         rxq = dev->data->rx_queues[queue_id];
5448
5449         qinfo->mp = rxq->mb_pool;
5450         qinfo->scattered_rx = dev->data->scattered_rx;
5451         qinfo->nb_desc = rxq->nb_rx_desc;
5452
5453         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5454         qinfo->conf.rx_drop_en = rxq->drop_en;
5455         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5456         qinfo->conf.offloads = rxq->offloads;
5457 }
5458
5459 void
5460 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5461         struct rte_eth_txq_info *qinfo)
5462 {
5463         struct ixgbe_tx_queue *txq;
5464
5465         txq = dev->data->tx_queues[queue_id];
5466
5467         qinfo->nb_desc = txq->nb_tx_desc;
5468
5469         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5470         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5471         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5472
5473         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5474         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5475         qinfo->conf.offloads = txq->offloads;
5476         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5477 }
5478
5479 /*
5480  * [VF] Initializes Receive Unit.
5481  */
5482 int __attribute__((cold))
5483 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5484 {
5485         struct ixgbe_hw     *hw;
5486         struct ixgbe_rx_queue *rxq;
5487         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5488         uint64_t bus_addr;
5489         uint32_t srrctl, psrtype = 0;
5490         uint16_t buf_size;
5491         uint16_t i;
5492         int ret;
5493
5494         PMD_INIT_FUNC_TRACE();
5495         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5496
5497         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5498                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5499                         "it should be power of 2");
5500                 return -1;
5501         }
5502
5503         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5504                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5505                         "it should be equal to or less than %d",
5506                         hw->mac.max_rx_queues);
5507                 return -1;
5508         }
5509
5510         /*
5511          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5512          * disables the VF receipt of packets if the PF MTU is > 1500.
5513          * This is done to deal with 82599 limitations that imposes
5514          * the PF and all VFs to share the same MTU.
5515          * Then, the PF driver enables again the VF receipt of packet when
5516          * the VF driver issues a IXGBE_VF_SET_LPE request.
5517          * In the meantime, the VF device cannot be used, even if the VF driver
5518          * and the Guest VM network stack are ready to accept packets with a
5519          * size up to the PF MTU.
5520          * As a work-around to this PF behaviour, force the call to
5521          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5522          * VF packets received can work in all cases.
5523          */
5524         ixgbevf_rlpml_set_vf(hw,
5525                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5526
5527         /*
5528          * Assume no header split and no VLAN strip support
5529          * on any Rx queue first .
5530          */
5531         rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5532         /* Setup RX queues */
5533         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5534                 rxq = dev->data->rx_queues[i];
5535
5536                 /* Allocate buffers for descriptor rings */
5537                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5538                 if (ret)
5539                         return ret;
5540
5541                 /* Setup the Base and Length of the Rx Descriptor Rings */
5542                 bus_addr = rxq->rx_ring_phys_addr;
5543
5544                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5545                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5546                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5547                                 (uint32_t)(bus_addr >> 32));
5548                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5549                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5550                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5551                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5552
5553
5554                 /* Configure the SRRCTL register */
5555                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5556
5557                 /* Set if packets are dropped when no descriptors available */
5558                 if (rxq->drop_en)
5559                         srrctl |= IXGBE_SRRCTL_DROP_EN;
5560
5561                 /*
5562                  * Configure the RX buffer size in the BSIZEPACKET field of
5563                  * the SRRCTL register of the queue.
5564                  * The value is in 1 KB resolution. Valid values can be from
5565                  * 1 KB to 16 KB.
5566                  */
5567                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5568                         RTE_PKTMBUF_HEADROOM);
5569                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5570                            IXGBE_SRRCTL_BSIZEPKT_MASK);
5571
5572                 /*
5573                  * VF modification to write virtual function SRRCTL register
5574                  */
5575                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5576
5577                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5578                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5579
5580                 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5581                     /* It adds dual VLAN length for supporting dual VLAN */
5582                     (rxmode->max_rx_pkt_len +
5583                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5584                         if (!dev->data->scattered_rx)
5585                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5586                         dev->data->scattered_rx = 1;
5587                 }
5588
5589                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5590                         rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5591         }
5592
5593         /* Set RQPL for VF RSS according to max Rx queue */
5594         psrtype |= (dev->data->nb_rx_queues >> 1) <<
5595                 IXGBE_PSRTYPE_RQPL_SHIFT;
5596         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5597
5598         ixgbe_set_rx_function(dev);
5599
5600         return 0;
5601 }
5602
5603 /*
5604  * [VF] Initializes Transmit Unit.
5605  */
5606 void __attribute__((cold))
5607 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5608 {
5609         struct ixgbe_hw     *hw;
5610         struct ixgbe_tx_queue *txq;
5611         uint64_t bus_addr;
5612         uint32_t txctrl;
5613         uint16_t i;
5614
5615         PMD_INIT_FUNC_TRACE();
5616         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5617
5618         /* Setup the Base and Length of the Tx Descriptor Rings */
5619         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5620                 txq = dev->data->tx_queues[i];
5621                 bus_addr = txq->tx_ring_phys_addr;
5622                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5623                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5624                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5625                                 (uint32_t)(bus_addr >> 32));
5626                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5627                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5628                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5629                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5630                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5631
5632                 /*
5633                  * Disable Tx Head Writeback RO bit, since this hoses
5634                  * bookkeeping if things aren't delivered in order.
5635                  */
5636                 txctrl = IXGBE_READ_REG(hw,
5637                                 IXGBE_VFDCA_TXCTRL(i));
5638                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5639                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5640                                 txctrl);
5641         }
5642 }
5643
5644 /*
5645  * [VF] Start Transmit and Receive Units.
5646  */
5647 void __attribute__((cold))
5648 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5649 {
5650         struct ixgbe_hw     *hw;
5651         struct ixgbe_tx_queue *txq;
5652         struct ixgbe_rx_queue *rxq;
5653         uint32_t txdctl;
5654         uint32_t rxdctl;
5655         uint16_t i;
5656         int poll_ms;
5657
5658         PMD_INIT_FUNC_TRACE();
5659         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5660
5661         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5662                 txq = dev->data->tx_queues[i];
5663                 /* Setup Transmit Threshold Registers */
5664                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5665                 txdctl |= txq->pthresh & 0x7F;
5666                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5667                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5668                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5669         }
5670
5671         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5672
5673                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5674                 txdctl |= IXGBE_TXDCTL_ENABLE;
5675                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5676
5677                 poll_ms = 10;
5678                 /* Wait until TX Enable ready */
5679                 do {
5680                         rte_delay_ms(1);
5681                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5682                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5683                 if (!poll_ms)
5684                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5685         }
5686         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5687
5688                 rxq = dev->data->rx_queues[i];
5689
5690                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5691                 rxdctl |= IXGBE_RXDCTL_ENABLE;
5692                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5693
5694                 /* Wait until RX Enable ready */
5695                 poll_ms = 10;
5696                 do {
5697                         rte_delay_ms(1);
5698                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5699                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5700                 if (!poll_ms)
5701                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5702                 rte_wmb();
5703                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5704
5705         }
5706 }
5707
5708 int
5709 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5710                     const struct rte_flow_action_rss *in)
5711 {
5712         if (in->key_len > RTE_DIM(out->key) ||
5713             in->queue_num > RTE_DIM(out->queue))
5714                 return -EINVAL;
5715         out->conf = (struct rte_flow_action_rss){
5716                 .func = in->func,
5717                 .level = in->level,
5718                 .types = in->types,
5719                 .key_len = in->key_len,
5720                 .queue_num = in->queue_num,
5721                 .key = memcpy(out->key, in->key, in->key_len),
5722                 .queue = memcpy(out->queue, in->queue,
5723                                 sizeof(*in->queue) * in->queue_num),
5724         };
5725         return 0;
5726 }
5727
5728 int
5729 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5730                       const struct rte_flow_action_rss *with)
5731 {
5732         return (comp->func == with->func &&
5733                 comp->level == with->level &&
5734                 comp->types == with->types &&
5735                 comp->key_len == with->key_len &&
5736                 comp->queue_num == with->queue_num &&
5737                 !memcmp(comp->key, with->key, with->key_len) &&
5738                 !memcmp(comp->queue, with->queue,
5739                         sizeof(*with->queue) * with->queue_num));
5740 }
5741
5742 int
5743 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5744                 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5745 {
5746         struct ixgbe_hw *hw;
5747         uint32_t reta;
5748         uint16_t i;
5749         uint16_t j;
5750         uint16_t sp_reta_size;
5751         uint32_t reta_reg;
5752         struct rte_eth_rss_conf rss_conf = {
5753                 .rss_key = conf->conf.key_len ?
5754                         (void *)(uintptr_t)conf->conf.key : NULL,
5755                 .rss_key_len = conf->conf.key_len,
5756                 .rss_hf = conf->conf.types,
5757         };
5758         struct ixgbe_filter_info *filter_info =
5759                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5760
5761         PMD_INIT_FUNC_TRACE();
5762         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5763
5764         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5765
5766         if (!add) {
5767                 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5768                                           &conf->conf)) {
5769                         ixgbe_rss_disable(dev);
5770                         memset(&filter_info->rss_info, 0,
5771                                 sizeof(struct ixgbe_rte_flow_rss_conf));
5772                         return 0;
5773                 }
5774                 return -EINVAL;
5775         }
5776
5777         if (filter_info->rss_info.conf.queue_num)
5778                 return -EINVAL;
5779         /* Fill in redirection table
5780          * The byte-swap is needed because NIC registers are in
5781          * little-endian order.
5782          */
5783         reta = 0;
5784         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5785                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5786
5787                 if (j == conf->conf.queue_num)
5788                         j = 0;
5789                 reta = (reta << 8) | conf->conf.queue[j];
5790                 if ((i & 3) == 3)
5791                         IXGBE_WRITE_REG(hw, reta_reg,
5792                                         rte_bswap32(reta));
5793         }
5794
5795         /* Configure the RSS key and the RSS protocols used to compute
5796          * the RSS hash of input packets.
5797          */
5798         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5799                 ixgbe_rss_disable(dev);
5800                 return 0;
5801         }
5802         if (rss_conf.rss_key == NULL)
5803                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5804         ixgbe_hw_rss_hash_set(hw, &rss_conf);
5805
5806         if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5807                 return -EINVAL;
5808
5809         return 0;
5810 }
5811
5812 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5813 __rte_weak int
5814 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5815 {
5816         return -1;
5817 }
5818
5819 __rte_weak uint16_t
5820 ixgbe_recv_pkts_vec(
5821         void __rte_unused *rx_queue,
5822         struct rte_mbuf __rte_unused **rx_pkts,
5823         uint16_t __rte_unused nb_pkts)
5824 {
5825         return 0;
5826 }
5827
5828 __rte_weak uint16_t
5829 ixgbe_recv_scattered_pkts_vec(
5830         void __rte_unused *rx_queue,
5831         struct rte_mbuf __rte_unused **rx_pkts,
5832         uint16_t __rte_unused nb_pkts)
5833 {
5834         return 0;
5835 }
5836
5837 __rte_weak int
5838 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5839 {
5840         return -1;
5841 }