net: add rte prefix to UDP structure
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5
6 #include <sys/queue.h>
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdint.h>
13 #include <stdarg.h>
14 #include <unistd.h>
15 #include <inttypes.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
23 #include <rte_pci.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
38 #include <rte_udp.h>
39 #include <rte_tcp.h>
40 #include <rte_sctp.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
43 #include <rte_ip.h>
44 #include <rte_net.h>
45
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
53
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
56 #else
57 #define IXGBE_TX_IEEE1588_TMST 0
58 #endif
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK (                  \
61                 PKT_TX_OUTER_IPV6 |              \
62                 PKT_TX_OUTER_IPV4 |              \
63                 PKT_TX_IPV6 |                    \
64                 PKT_TX_IPV4 |                    \
65                 PKT_TX_VLAN_PKT |                \
66                 PKT_TX_IP_CKSUM |                \
67                 PKT_TX_L4_MASK |                 \
68                 PKT_TX_TCP_SEG |                 \
69                 PKT_TX_MACSEC |                  \
70                 PKT_TX_OUTER_IP_CKSUM |          \
71                 PKT_TX_SEC_OFFLOAD |     \
72                 IXGBE_TX_IEEE1588_TMST)
73
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75                 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
76
77 #if 1
78 #define RTE_PMD_USE_PREFETCH
79 #endif
80
81 #ifdef RTE_PMD_USE_PREFETCH
82 /*
83  * Prefetch a cache line into all cache levels.
84  */
85 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
86 #else
87 #define rte_ixgbe_prefetch(p)   do {} while (0)
88 #endif
89
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
92                                     uint16_t nb_pkts);
93 #endif
94
95 /*********************************************************************
96  *
97  *  TX functions
98  *
99  **********************************************************************/
100
101 /*
102  * Check for descriptors with their DD bit set and free mbufs.
103  * Return the total number of buffers freed.
104  */
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
107 {
108         struct ixgbe_tx_entry *txep;
109         uint32_t status;
110         int i, nb_free = 0;
111         struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
112
113         /* check DD bit on threshold descriptor */
114         status = txq->tx_ring[txq->tx_next_dd].wb.status;
115         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
116                 return 0;
117
118         /*
119          * first buffer to free from S/W ring is at index
120          * tx_next_dd - (tx_rs_thresh-1)
121          */
122         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
123
124         for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125                 /* free buffers one at a time */
126                 m = rte_pktmbuf_prefree_seg(txep->mbuf);
127                 txep->mbuf = NULL;
128
129                 if (unlikely(m == NULL))
130                         continue;
131
132                 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133                     (nb_free > 0 && m->pool != free[0]->pool)) {
134                         rte_mempool_put_bulk(free[0]->pool,
135                                              (void **)free, nb_free);
136                         nb_free = 0;
137                 }
138
139                 free[nb_free++] = m;
140         }
141
142         if (nb_free > 0)
143                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
144
145         /* buffers were freed, update counters */
146         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148         if (txq->tx_next_dd >= txq->nb_tx_desc)
149                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
150
151         return txq->tx_rs_thresh;
152 }
153
154 /* Populate 4 descriptors with data from 4 mbufs */
155 static inline void
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
157 {
158         uint64_t buf_dma_addr;
159         uint32_t pkt_len;
160         int i;
161
162         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163                 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164                 pkt_len = (*pkts)->data_len;
165
166                 /* write data to descriptor */
167                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
168
169                 txdp->read.cmd_type_len =
170                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
171
172                 txdp->read.olinfo_status =
173                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
174
175                 rte_prefetch0(&(*pkts)->pool);
176         }
177 }
178
179 /* Populate 1 descriptor with data from 1 mbuf */
180 static inline void
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
182 {
183         uint64_t buf_dma_addr;
184         uint32_t pkt_len;
185
186         buf_dma_addr = rte_mbuf_data_iova(*pkts);
187         pkt_len = (*pkts)->data_len;
188
189         /* write data to descriptor */
190         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191         txdp->read.cmd_type_len =
192                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193         txdp->read.olinfo_status =
194                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195         rte_prefetch0(&(*pkts)->pool);
196 }
197
198 /*
199  * Fill H/W descriptor ring with mbuf data.
200  * Copy mbuf pointers to the S/W ring.
201  */
202 static inline void
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
204                       uint16_t nb_pkts)
205 {
206         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208         const int N_PER_LOOP = 4;
209         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210         int mainpart, leftover;
211         int i, j;
212
213         /*
214          * Process most of the packets in chunks of N pkts.  Any
215          * leftover packets will get processed one at a time.
216          */
217         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
219         for (i = 0; i < mainpart; i += N_PER_LOOP) {
220                 /* Copy N mbuf pointers to the S/W ring */
221                 for (j = 0; j < N_PER_LOOP; ++j) {
222                         (txep + i + j)->mbuf = *(pkts + i + j);
223                 }
224                 tx4(txdp + i, pkts + i);
225         }
226
227         if (unlikely(leftover > 0)) {
228                 for (i = 0; i < leftover; ++i) {
229                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230                         tx1(txdp + mainpart + i, pkts + mainpart + i);
231                 }
232         }
233 }
234
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
237              uint16_t nb_pkts)
238 {
239         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
241         uint16_t n = 0;
242
243         /*
244          * Begin scanning the H/W ring for done descriptors when the
245          * number of available descriptors drops below tx_free_thresh.  For
246          * each done descriptor, free the associated buffer.
247          */
248         if (txq->nb_tx_free < txq->tx_free_thresh)
249                 ixgbe_tx_free_bufs(txq);
250
251         /* Only use descriptors that are available */
252         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253         if (unlikely(nb_pkts == 0))
254                 return 0;
255
256         /* Use exactly nb_pkts descriptors */
257         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
258
259         /*
260          * At this point, we know there are enough descriptors in the
261          * ring to transmit all the packets.  This assumes that each
262          * mbuf contains a single segment, and that no new offloads
263          * are expected, which would require a new context descriptor.
264          */
265
266         /*
267          * See if we're going to wrap-around. If so, handle the top
268          * of the descriptor ring first, then do the bottom.  If not,
269          * the processing looks just like the "bottom" part anyway...
270          */
271         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
274
275                 /*
276                  * We know that the last descriptor in the ring will need to
277                  * have its RS bit set because tx_rs_thresh has to be
278                  * a divisor of the ring size
279                  */
280                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
283
284                 txq->tx_tail = 0;
285         }
286
287         /* Fill H/W descriptor ring with mbuf data */
288         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
290
291         /*
292          * Determine if RS bit should be set
293          * This is what we actually want:
294          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295          * but instead of subtracting 1 and doing >=, we can just do
296          * greater than without subtracting.
297          */
298         if (txq->tx_tail > txq->tx_next_rs) {
299                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
302                                                 txq->tx_rs_thresh);
303                 if (txq->tx_next_rs >= txq->nb_tx_desc)
304                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
305         }
306
307         /*
308          * Check for wrap-around. This would only happen if we used
309          * up to the last descriptor in the ring, no more, no less.
310          */
311         if (txq->tx_tail >= txq->nb_tx_desc)
312                 txq->tx_tail = 0;
313
314         /* update tail pointer */
315         rte_wmb();
316         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
317
318         return nb_pkts;
319 }
320
321 uint16_t
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
323                        uint16_t nb_pkts)
324 {
325         uint16_t nb_tx;
326
327         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
330
331         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
332         nb_tx = 0;
333         while (nb_pkts) {
334                 uint16_t ret, n;
335
336                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338                 nb_tx = (uint16_t)(nb_tx + ret);
339                 nb_pkts = (uint16_t)(nb_pkts - ret);
340                 if (ret < n)
341                         break;
342         }
343
344         return nb_tx;
345 }
346
347 #ifdef RTE_IXGBE_INC_VECTOR
348 static uint16_t
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
350                     uint16_t nb_pkts)
351 {
352         uint16_t nb_tx = 0;
353         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354
355         while (nb_pkts) {
356                 uint16_t ret, num;
357
358                 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359                 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
360                                                  num);
361                 nb_tx += ret;
362                 nb_pkts -= ret;
363                 if (ret < num)
364                         break;
365         }
366
367         return nb_tx;
368 }
369 #endif
370
371 static inline void
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375                 __rte_unused uint64_t *mdata)
376 {
377         uint32_t type_tucmd_mlhl;
378         uint32_t mss_l4len_idx = 0;
379         uint32_t ctx_idx;
380         uint32_t vlan_macip_lens;
381         union ixgbe_tx_offload tx_offload_mask;
382         uint32_t seqnum_seed = 0;
383
384         ctx_idx = txq->ctx_curr;
385         tx_offload_mask.data[0] = 0;
386         tx_offload_mask.data[1] = 0;
387         type_tucmd_mlhl = 0;
388
389         /* Specify which HW CTX to upload. */
390         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
391
392         if (ol_flags & PKT_TX_VLAN_PKT) {
393                 tx_offload_mask.vlan_tci |= ~0;
394         }
395
396         /* check if TCP segmentation required for this packet */
397         if (ol_flags & PKT_TX_TCP_SEG) {
398                 /* implies IP cksum in IPv4 */
399                 if (ol_flags & PKT_TX_IP_CKSUM)
400                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
403                 else
404                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
407
408                 tx_offload_mask.l2_len |= ~0;
409                 tx_offload_mask.l3_len |= ~0;
410                 tx_offload_mask.l4_len |= ~0;
411                 tx_offload_mask.tso_segsz |= ~0;
412                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414         } else { /* no TSO, check if hardware checksum is needed */
415                 if (ol_flags & PKT_TX_IP_CKSUM) {
416                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417                         tx_offload_mask.l2_len |= ~0;
418                         tx_offload_mask.l3_len |= ~0;
419                 }
420
421                 switch (ol_flags & PKT_TX_L4_MASK) {
422                 case PKT_TX_UDP_CKSUM:
423                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425                         mss_l4len_idx |= sizeof(struct rte_udp_hdr)
426                                 << IXGBE_ADVTXD_L4LEN_SHIFT;
427                         tx_offload_mask.l2_len |= ~0;
428                         tx_offload_mask.l3_len |= ~0;
429                         break;
430                 case PKT_TX_TCP_CKSUM:
431                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
432                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433                         mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
434                                 << IXGBE_ADVTXD_L4LEN_SHIFT;
435                         tx_offload_mask.l2_len |= ~0;
436                         tx_offload_mask.l3_len |= ~0;
437                         break;
438                 case PKT_TX_SCTP_CKSUM:
439                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
440                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
441                         mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
442                                 << IXGBE_ADVTXD_L4LEN_SHIFT;
443                         tx_offload_mask.l2_len |= ~0;
444                         tx_offload_mask.l3_len |= ~0;
445                         break;
446                 default:
447                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
448                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
449                         break;
450                 }
451         }
452
453         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
454                 tx_offload_mask.outer_l2_len |= ~0;
455                 tx_offload_mask.outer_l3_len |= ~0;
456                 tx_offload_mask.l2_len |= ~0;
457                 seqnum_seed |= tx_offload.outer_l3_len
458                                << IXGBE_ADVTXD_OUTER_IPLEN;
459                 seqnum_seed |= tx_offload.l2_len
460                                << IXGBE_ADVTXD_TUNNEL_LEN;
461         }
462 #ifdef RTE_LIBRTE_SECURITY
463         if (ol_flags & PKT_TX_SEC_OFFLOAD) {
464                 union ixgbe_crypto_tx_desc_md *md =
465                                 (union ixgbe_crypto_tx_desc_md *)mdata;
466                 seqnum_seed |=
467                         (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
468                 type_tucmd_mlhl |= md->enc ?
469                                 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
470                                 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
471                 type_tucmd_mlhl |=
472                         (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
473                 tx_offload_mask.sa_idx |= ~0;
474                 tx_offload_mask.sec_pad_len |= ~0;
475         }
476 #endif
477
478         txq->ctx_cache[ctx_idx].flags = ol_flags;
479         txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
480                 tx_offload_mask.data[0] & tx_offload.data[0];
481         txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
482                 tx_offload_mask.data[1] & tx_offload.data[1];
483         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
484
485         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
486         vlan_macip_lens = tx_offload.l3_len;
487         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
488                 vlan_macip_lens |= (tx_offload.outer_l2_len <<
489                                     IXGBE_ADVTXD_MACLEN_SHIFT);
490         else
491                 vlan_macip_lens |= (tx_offload.l2_len <<
492                                     IXGBE_ADVTXD_MACLEN_SHIFT);
493         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
494         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
495         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
496         ctx_txd->seqnum_seed     = seqnum_seed;
497 }
498
499 /*
500  * Check which hardware context can be used. Use the existing match
501  * or create a new context descriptor.
502  */
503 static inline uint32_t
504 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
505                    union ixgbe_tx_offload tx_offload)
506 {
507         /* If match with the current used context */
508         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
509                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
510                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
511                      & tx_offload.data[0])) &&
512                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
513                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
514                      & tx_offload.data[1]))))
515                 return txq->ctx_curr;
516
517         /* What if match with the next context  */
518         txq->ctx_curr ^= 1;
519         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
520                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
521                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
522                      & tx_offload.data[0])) &&
523                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
524                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
525                      & tx_offload.data[1]))))
526                 return txq->ctx_curr;
527
528         /* Mismatch, use the previous context */
529         return IXGBE_CTX_NUM;
530 }
531
532 static inline uint32_t
533 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
534 {
535         uint32_t tmp = 0;
536
537         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
538                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
539         if (ol_flags & PKT_TX_IP_CKSUM)
540                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
541         if (ol_flags & PKT_TX_TCP_SEG)
542                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
543         return tmp;
544 }
545
546 static inline uint32_t
547 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
548 {
549         uint32_t cmdtype = 0;
550
551         if (ol_flags & PKT_TX_VLAN_PKT)
552                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
553         if (ol_flags & PKT_TX_TCP_SEG)
554                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
555         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
556                 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
557         if (ol_flags & PKT_TX_MACSEC)
558                 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
559         return cmdtype;
560 }
561
562 /* Default RS bit threshold values */
563 #ifndef DEFAULT_TX_RS_THRESH
564 #define DEFAULT_TX_RS_THRESH   32
565 #endif
566 #ifndef DEFAULT_TX_FREE_THRESH
567 #define DEFAULT_TX_FREE_THRESH 32
568 #endif
569
570 /* Reset transmit descriptors after they have been used */
571 static inline int
572 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
573 {
574         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
575         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
576         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
577         uint16_t nb_tx_desc = txq->nb_tx_desc;
578         uint16_t desc_to_clean_to;
579         uint16_t nb_tx_to_clean;
580         uint32_t status;
581
582         /* Determine the last descriptor needing to be cleaned */
583         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
584         if (desc_to_clean_to >= nb_tx_desc)
585                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
586
587         /* Check to make sure the last descriptor to clean is done */
588         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
589         status = txr[desc_to_clean_to].wb.status;
590         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
591                 PMD_TX_FREE_LOG(DEBUG,
592                                 "TX descriptor %4u is not done"
593                                 "(port=%d queue=%d)",
594                                 desc_to_clean_to,
595                                 txq->port_id, txq->queue_id);
596                 /* Failed to clean any descriptors, better luck next time */
597                 return -(1);
598         }
599
600         /* Figure out how many descriptors will be cleaned */
601         if (last_desc_cleaned > desc_to_clean_to)
602                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
603                                                         desc_to_clean_to);
604         else
605                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
606                                                 last_desc_cleaned);
607
608         PMD_TX_FREE_LOG(DEBUG,
609                         "Cleaning %4u TX descriptors: %4u to %4u "
610                         "(port=%d queue=%d)",
611                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
612                         txq->port_id, txq->queue_id);
613
614         /*
615          * The last descriptor to clean is done, so that means all the
616          * descriptors from the last descriptor that was cleaned
617          * up to the last descriptor with the RS bit set
618          * are done. Only reset the threshold descriptor.
619          */
620         txr[desc_to_clean_to].wb.status = 0;
621
622         /* Update the txq to reflect the last descriptor that was cleaned */
623         txq->last_desc_cleaned = desc_to_clean_to;
624         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
625
626         /* No Error */
627         return 0;
628 }
629
630 uint16_t
631 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
632                 uint16_t nb_pkts)
633 {
634         struct ixgbe_tx_queue *txq;
635         struct ixgbe_tx_entry *sw_ring;
636         struct ixgbe_tx_entry *txe, *txn;
637         volatile union ixgbe_adv_tx_desc *txr;
638         volatile union ixgbe_adv_tx_desc *txd, *txp;
639         struct rte_mbuf     *tx_pkt;
640         struct rte_mbuf     *m_seg;
641         uint64_t buf_dma_addr;
642         uint32_t olinfo_status;
643         uint32_t cmd_type_len;
644         uint32_t pkt_len;
645         uint16_t slen;
646         uint64_t ol_flags;
647         uint16_t tx_id;
648         uint16_t tx_last;
649         uint16_t nb_tx;
650         uint16_t nb_used;
651         uint64_t tx_ol_req;
652         uint32_t ctx = 0;
653         uint32_t new_ctx;
654         union ixgbe_tx_offload tx_offload;
655 #ifdef RTE_LIBRTE_SECURITY
656         uint8_t use_ipsec;
657 #endif
658
659         tx_offload.data[0] = 0;
660         tx_offload.data[1] = 0;
661         txq = tx_queue;
662         sw_ring = txq->sw_ring;
663         txr     = txq->tx_ring;
664         tx_id   = txq->tx_tail;
665         txe = &sw_ring[tx_id];
666         txp = NULL;
667
668         /* Determine if the descriptor ring needs to be cleaned. */
669         if (txq->nb_tx_free < txq->tx_free_thresh)
670                 ixgbe_xmit_cleanup(txq);
671
672         rte_prefetch0(&txe->mbuf->pool);
673
674         /* TX loop */
675         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
676                 new_ctx = 0;
677                 tx_pkt = *tx_pkts++;
678                 pkt_len = tx_pkt->pkt_len;
679
680                 /*
681                  * Determine how many (if any) context descriptors
682                  * are needed for offload functionality.
683                  */
684                 ol_flags = tx_pkt->ol_flags;
685 #ifdef RTE_LIBRTE_SECURITY
686                 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
687 #endif
688
689                 /* If hardware offload required */
690                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
691                 if (tx_ol_req) {
692                         tx_offload.l2_len = tx_pkt->l2_len;
693                         tx_offload.l3_len = tx_pkt->l3_len;
694                         tx_offload.l4_len = tx_pkt->l4_len;
695                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
696                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
697                         tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
698                         tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
699 #ifdef RTE_LIBRTE_SECURITY
700                         if (use_ipsec) {
701                                 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
702                                         (union ixgbe_crypto_tx_desc_md *)
703                                                         &tx_pkt->udata64;
704                                 tx_offload.sa_idx = ipsec_mdata->sa_idx;
705                                 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
706                         }
707 #endif
708
709                         /* If new context need be built or reuse the exist ctx. */
710                         ctx = what_advctx_update(txq, tx_ol_req,
711                                 tx_offload);
712                         /* Only allocate context descriptor if required*/
713                         new_ctx = (ctx == IXGBE_CTX_NUM);
714                         ctx = txq->ctx_curr;
715                 }
716
717                 /*
718                  * Keep track of how many descriptors are used this loop
719                  * This will always be the number of segments + the number of
720                  * Context descriptors required to transmit the packet
721                  */
722                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
723
724                 if (txp != NULL &&
725                                 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
726                         /* set RS on the previous packet in the burst */
727                         txp->read.cmd_type_len |=
728                                 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
729
730                 /*
731                  * The number of descriptors that must be allocated for a
732                  * packet is the number of segments of that packet, plus 1
733                  * Context Descriptor for the hardware offload, if any.
734                  * Determine the last TX descriptor to allocate in the TX ring
735                  * for the packet, starting from the current position (tx_id)
736                  * in the ring.
737                  */
738                 tx_last = (uint16_t) (tx_id + nb_used - 1);
739
740                 /* Circular ring */
741                 if (tx_last >= txq->nb_tx_desc)
742                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
743
744                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
745                            " tx_first=%u tx_last=%u",
746                            (unsigned) txq->port_id,
747                            (unsigned) txq->queue_id,
748                            (unsigned) pkt_len,
749                            (unsigned) tx_id,
750                            (unsigned) tx_last);
751
752                 /*
753                  * Make sure there are enough TX descriptors available to
754                  * transmit the entire packet.
755                  * nb_used better be less than or equal to txq->tx_rs_thresh
756                  */
757                 if (nb_used > txq->nb_tx_free) {
758                         PMD_TX_FREE_LOG(DEBUG,
759                                         "Not enough free TX descriptors "
760                                         "nb_used=%4u nb_free=%4u "
761                                         "(port=%d queue=%d)",
762                                         nb_used, txq->nb_tx_free,
763                                         txq->port_id, txq->queue_id);
764
765                         if (ixgbe_xmit_cleanup(txq) != 0) {
766                                 /* Could not clean any descriptors */
767                                 if (nb_tx == 0)
768                                         return 0;
769                                 goto end_of_tx;
770                         }
771
772                         /* nb_used better be <= txq->tx_rs_thresh */
773                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
774                                 PMD_TX_FREE_LOG(DEBUG,
775                                         "The number of descriptors needed to "
776                                         "transmit the packet exceeds the "
777                                         "RS bit threshold. This will impact "
778                                         "performance."
779                                         "nb_used=%4u nb_free=%4u "
780                                         "tx_rs_thresh=%4u. "
781                                         "(port=%d queue=%d)",
782                                         nb_used, txq->nb_tx_free,
783                                         txq->tx_rs_thresh,
784                                         txq->port_id, txq->queue_id);
785                                 /*
786                                  * Loop here until there are enough TX
787                                  * descriptors or until the ring cannot be
788                                  * cleaned.
789                                  */
790                                 while (nb_used > txq->nb_tx_free) {
791                                         if (ixgbe_xmit_cleanup(txq) != 0) {
792                                                 /*
793                                                  * Could not clean any
794                                                  * descriptors
795                                                  */
796                                                 if (nb_tx == 0)
797                                                         return 0;
798                                                 goto end_of_tx;
799                                         }
800                                 }
801                         }
802                 }
803
804                 /*
805                  * By now there are enough free TX descriptors to transmit
806                  * the packet.
807                  */
808
809                 /*
810                  * Set common flags of all TX Data Descriptors.
811                  *
812                  * The following bits must be set in all Data Descriptors:
813                  *   - IXGBE_ADVTXD_DTYP_DATA
814                  *   - IXGBE_ADVTXD_DCMD_DEXT
815                  *
816                  * The following bits must be set in the first Data Descriptor
817                  * and are ignored in the other ones:
818                  *   - IXGBE_ADVTXD_DCMD_IFCS
819                  *   - IXGBE_ADVTXD_MAC_1588
820                  *   - IXGBE_ADVTXD_DCMD_VLE
821                  *
822                  * The following bits must only be set in the last Data
823                  * Descriptor:
824                  *   - IXGBE_TXD_CMD_EOP
825                  *
826                  * The following bits can be set in any Data Descriptor, but
827                  * are only set in the last Data Descriptor:
828                  *   - IXGBE_TXD_CMD_RS
829                  */
830                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
831                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
832
833 #ifdef RTE_LIBRTE_IEEE1588
834                 if (ol_flags & PKT_TX_IEEE1588_TMST)
835                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
836 #endif
837
838                 olinfo_status = 0;
839                 if (tx_ol_req) {
840
841                         if (ol_flags & PKT_TX_TCP_SEG) {
842                                 /* when TSO is on, paylen in descriptor is the
843                                  * not the packet len but the tcp payload len */
844                                 pkt_len -= (tx_offload.l2_len +
845                                         tx_offload.l3_len + tx_offload.l4_len);
846                         }
847
848                         /*
849                          * Setup the TX Advanced Context Descriptor if required
850                          */
851                         if (new_ctx) {
852                                 volatile struct ixgbe_adv_tx_context_desc *
853                                     ctx_txd;
854
855                                 ctx_txd = (volatile struct
856                                     ixgbe_adv_tx_context_desc *)
857                                     &txr[tx_id];
858
859                                 txn = &sw_ring[txe->next_id];
860                                 rte_prefetch0(&txn->mbuf->pool);
861
862                                 if (txe->mbuf != NULL) {
863                                         rte_pktmbuf_free_seg(txe->mbuf);
864                                         txe->mbuf = NULL;
865                                 }
866
867                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
868                                         tx_offload, &tx_pkt->udata64);
869
870                                 txe->last_id = tx_last;
871                                 tx_id = txe->next_id;
872                                 txe = txn;
873                         }
874
875                         /*
876                          * Setup the TX Advanced Data Descriptor,
877                          * This path will go through
878                          * whatever new/reuse the context descriptor
879                          */
880                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
881                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
882                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
883                 }
884
885                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
886 #ifdef RTE_LIBRTE_SECURITY
887                 if (use_ipsec)
888                         olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
889 #endif
890
891                 m_seg = tx_pkt;
892                 do {
893                         txd = &txr[tx_id];
894                         txn = &sw_ring[txe->next_id];
895                         rte_prefetch0(&txn->mbuf->pool);
896
897                         if (txe->mbuf != NULL)
898                                 rte_pktmbuf_free_seg(txe->mbuf);
899                         txe->mbuf = m_seg;
900
901                         /*
902                          * Set up Transmit Data Descriptor.
903                          */
904                         slen = m_seg->data_len;
905                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
906                         txd->read.buffer_addr =
907                                 rte_cpu_to_le_64(buf_dma_addr);
908                         txd->read.cmd_type_len =
909                                 rte_cpu_to_le_32(cmd_type_len | slen);
910                         txd->read.olinfo_status =
911                                 rte_cpu_to_le_32(olinfo_status);
912                         txe->last_id = tx_last;
913                         tx_id = txe->next_id;
914                         txe = txn;
915                         m_seg = m_seg->next;
916                 } while (m_seg != NULL);
917
918                 /*
919                  * The last packet data descriptor needs End Of Packet (EOP)
920                  */
921                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
922                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
923                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
924
925                 /* Set RS bit only on threshold packets' last descriptor */
926                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
927                         PMD_TX_FREE_LOG(DEBUG,
928                                         "Setting RS bit on TXD id="
929                                         "%4u (port=%d queue=%d)",
930                                         tx_last, txq->port_id, txq->queue_id);
931
932                         cmd_type_len |= IXGBE_TXD_CMD_RS;
933
934                         /* Update txq RS bit counters */
935                         txq->nb_tx_used = 0;
936                         txp = NULL;
937                 } else
938                         txp = txd;
939
940                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
941         }
942
943 end_of_tx:
944         /* set RS on last packet in the burst */
945         if (txp != NULL)
946                 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
947
948         rte_wmb();
949
950         /*
951          * Set the Transmit Descriptor Tail (TDT)
952          */
953         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
954                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
955                    (unsigned) tx_id, (unsigned) nb_tx);
956         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
957         txq->tx_tail = tx_id;
958
959         return nb_tx;
960 }
961
962 /*********************************************************************
963  *
964  *  TX prep functions
965  *
966  **********************************************************************/
967 uint16_t
968 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
969 {
970         int i, ret;
971         uint64_t ol_flags;
972         struct rte_mbuf *m;
973         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
974
975         for (i = 0; i < nb_pkts; i++) {
976                 m = tx_pkts[i];
977                 ol_flags = m->ol_flags;
978
979                 /**
980                  * Check if packet meets requirements for number of segments
981                  *
982                  * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
983                  *       non-TSO
984                  */
985
986                 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
987                         rte_errno = -EINVAL;
988                         return i;
989                 }
990
991                 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
992                         rte_errno = -ENOTSUP;
993                         return i;
994                 }
995
996 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
997                 ret = rte_validate_tx_offload(m);
998                 if (ret != 0) {
999                         rte_errno = ret;
1000                         return i;
1001                 }
1002 #endif
1003                 ret = rte_net_intel_cksum_prepare(m);
1004                 if (ret != 0) {
1005                         rte_errno = ret;
1006                         return i;
1007                 }
1008         }
1009
1010         return i;
1011 }
1012
1013 /*********************************************************************
1014  *
1015  *  RX functions
1016  *
1017  **********************************************************************/
1018
1019 #define IXGBE_PACKET_TYPE_ETHER                         0X00
1020 #define IXGBE_PACKET_TYPE_IPV4                          0X01
1021 #define IXGBE_PACKET_TYPE_IPV4_TCP                      0X11
1022 #define IXGBE_PACKET_TYPE_IPV4_UDP                      0X21
1023 #define IXGBE_PACKET_TYPE_IPV4_SCTP                     0X41
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT                      0X03
1025 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP                  0X13
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP                  0X23
1027 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP                 0X43
1028 #define IXGBE_PACKET_TYPE_IPV6                          0X04
1029 #define IXGBE_PACKET_TYPE_IPV6_TCP                      0X14
1030 #define IXGBE_PACKET_TYPE_IPV6_UDP                      0X24
1031 #define IXGBE_PACKET_TYPE_IPV6_SCTP                     0X44
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT                      0X0C
1033 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP                  0X1C
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP                  0X2C
1035 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP                 0X4C
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6                     0X05
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP                 0X15
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP                 0X25
1039 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP                0X45
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6                 0X07
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP             0X17
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP             0X27
1043 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP            0X47
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT                 0X0D
1045 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP             0X1D
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP             0X2D
1047 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP            0X4D
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT             0X0F
1049 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP         0X1F
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP         0X2F
1051 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP        0X4F
1052
1053 #define IXGBE_PACKET_TYPE_NVGRE                   0X00
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4              0X01
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP          0X11
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP          0X21
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP         0X41
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT          0X03
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0X13
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0X23
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0X43
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6              0X04
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP          0X14
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP          0X24
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP         0X44
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT          0X0C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0X1C
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0X2C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0X4C
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6         0X05
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0X15
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0X25
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0X0D
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1075 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1076
1077 #define IXGBE_PACKET_TYPE_VXLAN                   0X80
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4              0X81
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0X93
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0XA3
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0XC3
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6              0X84
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP          0X94
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP          0XA4
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP         0XC4
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT          0X8C
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0X9C
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0XAC
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0XCC
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6         0X85
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0X95
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0XA5
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0X8D
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1099 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1100
1101 /**
1102  * Use 2 different table for normal packet and tunnel packet
1103  * to save the space.
1104  */
1105 const uint32_t
1106         ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1107         [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1108         [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1109                 RTE_PTYPE_L3_IPV4,
1110         [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1111                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1112         [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1113                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1114         [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1115                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1116         [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1117                 RTE_PTYPE_L3_IPV4_EXT,
1118         [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1119                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1120         [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1121                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1122         [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1123                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1124         [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1125                 RTE_PTYPE_L3_IPV6,
1126         [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1127                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1128         [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1129                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1130         [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1131                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1132         [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1133                 RTE_PTYPE_L3_IPV6_EXT,
1134         [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1135                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1136         [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1137                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1138         [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1139                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1140         [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1141                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142                 RTE_PTYPE_INNER_L3_IPV6,
1143         [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1144                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1146         [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1147                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1149         [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1150                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1151                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1152         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1153                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154                 RTE_PTYPE_INNER_L3_IPV6,
1155         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1156                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1158         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1159                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1161         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1162                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1163                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1164         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1165                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1167         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1168                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1170         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1171                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1173         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1174                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1175                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1176         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1177                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1179         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1180                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1182         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1183                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1184                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1185         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1186                 RTE_PTYPE_L2_ETHER |
1187                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1188                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1189 };
1190
1191 const uint32_t
1192         ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1193         [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1194                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195                 RTE_PTYPE_INNER_L2_ETHER,
1196         [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1197                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1199         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1200                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1202         [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1203                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1205         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1206                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1208         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1211         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1212                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1214         [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1215                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1216                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1217                 RTE_PTYPE_INNER_L4_TCP,
1218         [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1219                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1220                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1221                 RTE_PTYPE_INNER_L4_TCP,
1222         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1223                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1225         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1226                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1227                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1228                 RTE_PTYPE_INNER_L4_TCP,
1229         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1230                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1231                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1232                 RTE_PTYPE_INNER_L3_IPV4,
1233         [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1234                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1235                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1236                 RTE_PTYPE_INNER_L4_UDP,
1237         [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1238                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1239                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1240                 RTE_PTYPE_INNER_L4_UDP,
1241         [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1242                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1243                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1244                 RTE_PTYPE_INNER_L4_SCTP,
1245         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1246                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1248         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1249                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1250                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1251                 RTE_PTYPE_INNER_L4_UDP,
1252         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1253                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1254                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1255                 RTE_PTYPE_INNER_L4_SCTP,
1256         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1257                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1258                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1259                 RTE_PTYPE_INNER_L3_IPV4,
1260         [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1261                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1262                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1263                 RTE_PTYPE_INNER_L4_SCTP,
1264         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1265                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1266                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1267                 RTE_PTYPE_INNER_L4_SCTP,
1268         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1269                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1270                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1271                 RTE_PTYPE_INNER_L4_TCP,
1272         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1273                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1274                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1275                 RTE_PTYPE_INNER_L4_UDP,
1276
1277         [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1278                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1280         [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1281                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1282                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1283                 RTE_PTYPE_INNER_L3_IPV4,
1284         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1285                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1286                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1287                 RTE_PTYPE_INNER_L3_IPV4_EXT,
1288         [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1289                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1290                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1291                 RTE_PTYPE_INNER_L3_IPV6,
1292         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1293                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1294                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1295                 RTE_PTYPE_INNER_L3_IPV4,
1296         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1297                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1298                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1299                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1300         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1301                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1302                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1303                 RTE_PTYPE_INNER_L3_IPV4,
1304         [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1305                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1306                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1307                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1308         [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1309                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1310                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1311                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1312         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1313                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1314                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1315                 RTE_PTYPE_INNER_L3_IPV4,
1316         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1317                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1318                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1319                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1320         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1321                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1322                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1323                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1324         [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1325                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1326                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1327                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1328         [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1329                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1330                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1331                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1332         [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1333                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1334                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1335                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1336         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1337                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1338                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1339                 RTE_PTYPE_INNER_L3_IPV4,
1340         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1341                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1342                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1343                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1344         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1345                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1346                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1347                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1348         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1349                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1350                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1351                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1352         [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1353                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1354                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1355                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1356         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1357                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1358                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1359                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1360         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1361                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1362                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1363                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1364         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1365                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1366                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1367                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1368 };
1369
1370 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1371 static inline uint32_t
1372 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1373 {
1374
1375         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1376                 return RTE_PTYPE_UNKNOWN;
1377
1378         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1379
1380         /* For tunnel packet */
1381         if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1382                 /* Remove the tunnel bit to save the space. */
1383                 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1384                 return ptype_table_tn[pkt_info];
1385         }
1386
1387         /**
1388          * For x550, if it's not tunnel,
1389          * tunnel type bit should be set to 0.
1390          * Reuse 82599's mask.
1391          */
1392         pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1393
1394         return ptype_table[pkt_info];
1395 }
1396
1397 static inline uint64_t
1398 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1399 {
1400         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1401                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1402                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1403                 PKT_RX_RSS_HASH, 0, 0, 0,
1404                 0, 0, 0,  PKT_RX_FDIR,
1405         };
1406 #ifdef RTE_LIBRTE_IEEE1588
1407         static uint64_t ip_pkt_etqf_map[8] = {
1408                 0, 0, 0, PKT_RX_IEEE1588_PTP,
1409                 0, 0, 0, 0,
1410         };
1411
1412         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1413                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1414                                 ip_rss_types_map[pkt_info & 0XF];
1415         else
1416                 return ip_rss_types_map[pkt_info & 0XF];
1417 #else
1418         return ip_rss_types_map[pkt_info & 0XF];
1419 #endif
1420 }
1421
1422 static inline uint64_t
1423 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1424 {
1425         uint64_t pkt_flags;
1426
1427         /*
1428          * Check if VLAN present only.
1429          * Do not check whether L3/L4 rx checksum done by NIC or not,
1430          * That can be found from rte_eth_rxmode.offloads flag
1431          */
1432         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  vlan_flags : 0;
1433
1434 #ifdef RTE_LIBRTE_IEEE1588
1435         if (rx_status & IXGBE_RXD_STAT_TMST)
1436                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1437 #endif
1438         return pkt_flags;
1439 }
1440
1441 static inline uint64_t
1442 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1443 {
1444         uint64_t pkt_flags;
1445
1446         /*
1447          * Bit 31: IPE, IPv4 checksum error
1448          * Bit 30: L4I, L4I integrity error
1449          */
1450         static uint64_t error_to_pkt_flags_map[4] = {
1451                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1452                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1453                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1454                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1455         };
1456         pkt_flags = error_to_pkt_flags_map[(rx_status >>
1457                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1458
1459         if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1460             (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1461                 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1462         }
1463
1464 #ifdef RTE_LIBRTE_SECURITY
1465         if (rx_status & IXGBE_RXD_STAT_SECP) {
1466                 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1467                 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1468                         pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1469         }
1470 #endif
1471
1472         return pkt_flags;
1473 }
1474
1475 /*
1476  * LOOK_AHEAD defines how many desc statuses to check beyond the
1477  * current descriptor.
1478  * It must be a pound define for optimal performance.
1479  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1480  * function only works with LOOK_AHEAD=8.
1481  */
1482 #define LOOK_AHEAD 8
1483 #if (LOOK_AHEAD != 8)
1484 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1485 #endif
1486 static inline int
1487 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1488 {
1489         volatile union ixgbe_adv_rx_desc *rxdp;
1490         struct ixgbe_rx_entry *rxep;
1491         struct rte_mbuf *mb;
1492         uint16_t pkt_len;
1493         uint64_t pkt_flags;
1494         int nb_dd;
1495         uint32_t s[LOOK_AHEAD];
1496         uint32_t pkt_info[LOOK_AHEAD];
1497         int i, j, nb_rx = 0;
1498         uint32_t status;
1499         uint64_t vlan_flags = rxq->vlan_flags;
1500
1501         /* get references to current descriptor and S/W ring entry */
1502         rxdp = &rxq->rx_ring[rxq->rx_tail];
1503         rxep = &rxq->sw_ring[rxq->rx_tail];
1504
1505         status = rxdp->wb.upper.status_error;
1506         /* check to make sure there is at least 1 packet to receive */
1507         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1508                 return 0;
1509
1510         /*
1511          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1512          * reference packets that are ready to be received.
1513          */
1514         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1515              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1516                 /* Read desc statuses backwards to avoid race condition */
1517                 for (j = 0; j < LOOK_AHEAD; j++)
1518                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1519
1520                 rte_smp_rmb();
1521
1522                 /* Compute how many status bits were set */
1523                 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1524                                 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1525                         ;
1526
1527                 for (j = 0; j < nb_dd; j++)
1528                         pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1529                                                        lo_dword.data);
1530
1531                 nb_rx += nb_dd;
1532
1533                 /* Translate descriptor info to mbuf format */
1534                 for (j = 0; j < nb_dd; ++j) {
1535                         mb = rxep[j].mbuf;
1536                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1537                                   rxq->crc_len;
1538                         mb->data_len = pkt_len;
1539                         mb->pkt_len = pkt_len;
1540                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1541
1542                         /* convert descriptor fields to rte mbuf flags */
1543                         pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1544                                 vlan_flags);
1545                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1546                         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1547                                         ((uint16_t)pkt_info[j]);
1548                         mb->ol_flags = pkt_flags;
1549                         mb->packet_type =
1550                                 ixgbe_rxd_pkt_info_to_pkt_type
1551                                         (pkt_info[j], rxq->pkt_type_mask);
1552
1553                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1554                                 mb->hash.rss = rte_le_to_cpu_32(
1555                                     rxdp[j].wb.lower.hi_dword.rss);
1556                         else if (pkt_flags & PKT_RX_FDIR) {
1557                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1558                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1559                                     IXGBE_ATR_HASH_MASK;
1560                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1561                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1562                         }
1563                 }
1564
1565                 /* Move mbuf pointers from the S/W ring to the stage */
1566                 for (j = 0; j < LOOK_AHEAD; ++j) {
1567                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1568                 }
1569
1570                 /* stop if all requested packets could not be received */
1571                 if (nb_dd != LOOK_AHEAD)
1572                         break;
1573         }
1574
1575         /* clear software ring entries so we can cleanup correctly */
1576         for (i = 0; i < nb_rx; ++i) {
1577                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1578         }
1579
1580
1581         return nb_rx;
1582 }
1583
1584 static inline int
1585 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1586 {
1587         volatile union ixgbe_adv_rx_desc *rxdp;
1588         struct ixgbe_rx_entry *rxep;
1589         struct rte_mbuf *mb;
1590         uint16_t alloc_idx;
1591         __le64 dma_addr;
1592         int diag, i;
1593
1594         /* allocate buffers in bulk directly into the S/W ring */
1595         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1596         rxep = &rxq->sw_ring[alloc_idx];
1597         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1598                                     rxq->rx_free_thresh);
1599         if (unlikely(diag != 0))
1600                 return -ENOMEM;
1601
1602         rxdp = &rxq->rx_ring[alloc_idx];
1603         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1604                 /* populate the static rte mbuf fields */
1605                 mb = rxep[i].mbuf;
1606                 if (reset_mbuf) {
1607                         mb->port = rxq->port_id;
1608                 }
1609
1610                 rte_mbuf_refcnt_set(mb, 1);
1611                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1612
1613                 /* populate the descriptors */
1614                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1615                 rxdp[i].read.hdr_addr = 0;
1616                 rxdp[i].read.pkt_addr = dma_addr;
1617         }
1618
1619         /* update state of internal queue structure */
1620         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1621         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1622                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1623
1624         /* no errors */
1625         return 0;
1626 }
1627
1628 static inline uint16_t
1629 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1630                          uint16_t nb_pkts)
1631 {
1632         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1633         int i;
1634
1635         /* how many packets are ready to return? */
1636         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1637
1638         /* copy mbuf pointers to the application's packet list */
1639         for (i = 0; i < nb_pkts; ++i)
1640                 rx_pkts[i] = stage[i];
1641
1642         /* update internal queue state */
1643         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1644         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1645
1646         return nb_pkts;
1647 }
1648
1649 static inline uint16_t
1650 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1651              uint16_t nb_pkts)
1652 {
1653         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1654         uint16_t nb_rx = 0;
1655
1656         /* Any previously recv'd pkts will be returned from the Rx stage */
1657         if (rxq->rx_nb_avail)
1658                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1659
1660         /* Scan the H/W ring for packets to receive */
1661         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1662
1663         /* update internal queue state */
1664         rxq->rx_next_avail = 0;
1665         rxq->rx_nb_avail = nb_rx;
1666         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1667
1668         /* if required, allocate new buffers to replenish descriptors */
1669         if (rxq->rx_tail > rxq->rx_free_trigger) {
1670                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1671
1672                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1673                         int i, j;
1674
1675                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1676                                    "queue_id=%u", (unsigned) rxq->port_id,
1677                                    (unsigned) rxq->queue_id);
1678
1679                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1680                                 rxq->rx_free_thresh;
1681
1682                         /*
1683                          * Need to rewind any previous receives if we cannot
1684                          * allocate new buffers to replenish the old ones.
1685                          */
1686                         rxq->rx_nb_avail = 0;
1687                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1688                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1689                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1690
1691                         return 0;
1692                 }
1693
1694                 /* update tail pointer */
1695                 rte_wmb();
1696                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1697                                             cur_free_trigger);
1698         }
1699
1700         if (rxq->rx_tail >= rxq->nb_rx_desc)
1701                 rxq->rx_tail = 0;
1702
1703         /* received any packets this loop? */
1704         if (rxq->rx_nb_avail)
1705                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1706
1707         return 0;
1708 }
1709
1710 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1711 uint16_t
1712 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1713                            uint16_t nb_pkts)
1714 {
1715         uint16_t nb_rx;
1716
1717         if (unlikely(nb_pkts == 0))
1718                 return 0;
1719
1720         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1721                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1722
1723         /* request is relatively large, chunk it up */
1724         nb_rx = 0;
1725         while (nb_pkts) {
1726                 uint16_t ret, n;
1727
1728                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1729                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1730                 nb_rx = (uint16_t)(nb_rx + ret);
1731                 nb_pkts = (uint16_t)(nb_pkts - ret);
1732                 if (ret < n)
1733                         break;
1734         }
1735
1736         return nb_rx;
1737 }
1738
1739 uint16_t
1740 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1741                 uint16_t nb_pkts)
1742 {
1743         struct ixgbe_rx_queue *rxq;
1744         volatile union ixgbe_adv_rx_desc *rx_ring;
1745         volatile union ixgbe_adv_rx_desc *rxdp;
1746         struct ixgbe_rx_entry *sw_ring;
1747         struct ixgbe_rx_entry *rxe;
1748         struct rte_mbuf *rxm;
1749         struct rte_mbuf *nmb;
1750         union ixgbe_adv_rx_desc rxd;
1751         uint64_t dma_addr;
1752         uint32_t staterr;
1753         uint32_t pkt_info;
1754         uint16_t pkt_len;
1755         uint16_t rx_id;
1756         uint16_t nb_rx;
1757         uint16_t nb_hold;
1758         uint64_t pkt_flags;
1759         uint64_t vlan_flags;
1760
1761         nb_rx = 0;
1762         nb_hold = 0;
1763         rxq = rx_queue;
1764         rx_id = rxq->rx_tail;
1765         rx_ring = rxq->rx_ring;
1766         sw_ring = rxq->sw_ring;
1767         vlan_flags = rxq->vlan_flags;
1768         while (nb_rx < nb_pkts) {
1769                 /*
1770                  * The order of operations here is important as the DD status
1771                  * bit must not be read after any other descriptor fields.
1772                  * rx_ring and rxdp are pointing to volatile data so the order
1773                  * of accesses cannot be reordered by the compiler. If they were
1774                  * not volatile, they could be reordered which could lead to
1775                  * using invalid descriptor fields when read from rxd.
1776                  */
1777                 rxdp = &rx_ring[rx_id];
1778                 staterr = rxdp->wb.upper.status_error;
1779                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1780                         break;
1781                 rxd = *rxdp;
1782
1783                 /*
1784                  * End of packet.
1785                  *
1786                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1787                  * is likely to be invalid and to be dropped by the various
1788                  * validation checks performed by the network stack.
1789                  *
1790                  * Allocate a new mbuf to replenish the RX ring descriptor.
1791                  * If the allocation fails:
1792                  *    - arrange for that RX descriptor to be the first one
1793                  *      being parsed the next time the receive function is
1794                  *      invoked [on the same queue].
1795                  *
1796                  *    - Stop parsing the RX ring and return immediately.
1797                  *
1798                  * This policy do not drop the packet received in the RX
1799                  * descriptor for which the allocation of a new mbuf failed.
1800                  * Thus, it allows that packet to be later retrieved if
1801                  * mbuf have been freed in the mean time.
1802                  * As a side effect, holding RX descriptors instead of
1803                  * systematically giving them back to the NIC may lead to
1804                  * RX ring exhaustion situations.
1805                  * However, the NIC can gracefully prevent such situations
1806                  * to happen by sending specific "back-pressure" flow control
1807                  * frames to its peer(s).
1808                  */
1809                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1810                            "ext_err_stat=0x%08x pkt_len=%u",
1811                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1812                            (unsigned) rx_id, (unsigned) staterr,
1813                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1814
1815                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1816                 if (nmb == NULL) {
1817                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1818                                    "queue_id=%u", (unsigned) rxq->port_id,
1819                                    (unsigned) rxq->queue_id);
1820                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1821                         break;
1822                 }
1823
1824                 nb_hold++;
1825                 rxe = &sw_ring[rx_id];
1826                 rx_id++;
1827                 if (rx_id == rxq->nb_rx_desc)
1828                         rx_id = 0;
1829
1830                 /* Prefetch next mbuf while processing current one. */
1831                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1832
1833                 /*
1834                  * When next RX descriptor is on a cache-line boundary,
1835                  * prefetch the next 4 RX descriptors and the next 8 pointers
1836                  * to mbufs.
1837                  */
1838                 if ((rx_id & 0x3) == 0) {
1839                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1840                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1841                 }
1842
1843                 rxm = rxe->mbuf;
1844                 rxe->mbuf = nmb;
1845                 dma_addr =
1846                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1847                 rxdp->read.hdr_addr = 0;
1848                 rxdp->read.pkt_addr = dma_addr;
1849
1850                 /*
1851                  * Initialize the returned mbuf.
1852                  * 1) setup generic mbuf fields:
1853                  *    - number of segments,
1854                  *    - next segment,
1855                  *    - packet length,
1856                  *    - RX port identifier.
1857                  * 2) integrate hardware offload data, if any:
1858                  *    - RSS flag & hash,
1859                  *    - IP checksum flag,
1860                  *    - VLAN TCI, if any,
1861                  *    - error flags.
1862                  */
1863                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1864                                       rxq->crc_len);
1865                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1866                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1867                 rxm->nb_segs = 1;
1868                 rxm->next = NULL;
1869                 rxm->pkt_len = pkt_len;
1870                 rxm->data_len = pkt_len;
1871                 rxm->port = rxq->port_id;
1872
1873                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1874                 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1875                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1876
1877                 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1878                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1879                 pkt_flags = pkt_flags |
1880                         ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1881                 rxm->ol_flags = pkt_flags;
1882                 rxm->packet_type =
1883                         ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1884                                                        rxq->pkt_type_mask);
1885
1886                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1887                         rxm->hash.rss = rte_le_to_cpu_32(
1888                                                 rxd.wb.lower.hi_dword.rss);
1889                 else if (pkt_flags & PKT_RX_FDIR) {
1890                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1891                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1892                                         IXGBE_ATR_HASH_MASK;
1893                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1894                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1895                 }
1896                 /*
1897                  * Store the mbuf address into the next entry of the array
1898                  * of returned packets.
1899                  */
1900                 rx_pkts[nb_rx++] = rxm;
1901         }
1902         rxq->rx_tail = rx_id;
1903
1904         /*
1905          * If the number of free RX descriptors is greater than the RX free
1906          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1907          * register.
1908          * Update the RDT with the value of the last processed RX descriptor
1909          * minus 1, to guarantee that the RDT register is never equal to the
1910          * RDH register, which creates a "full" ring situtation from the
1911          * hardware point of view...
1912          */
1913         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1914         if (nb_hold > rxq->rx_free_thresh) {
1915                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1916                            "nb_hold=%u nb_rx=%u",
1917                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1918                            (unsigned) rx_id, (unsigned) nb_hold,
1919                            (unsigned) nb_rx);
1920                 rx_id = (uint16_t) ((rx_id == 0) ?
1921                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1922                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1923                 nb_hold = 0;
1924         }
1925         rxq->nb_rx_hold = nb_hold;
1926         return nb_rx;
1927 }
1928
1929 /**
1930  * Detect an RSC descriptor.
1931  */
1932 static inline uint32_t
1933 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1934 {
1935         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1936                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1937 }
1938
1939 /**
1940  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1941  *
1942  * Fill the following info in the HEAD buffer of the Rx cluster:
1943  *    - RX port identifier
1944  *    - hardware offload data, if any:
1945  *      - RSS flag & hash
1946  *      - IP checksum flag
1947  *      - VLAN TCI, if any
1948  *      - error flags
1949  * @head HEAD of the packet cluster
1950  * @desc HW descriptor to get data from
1951  * @rxq Pointer to the Rx queue
1952  */
1953 static inline void
1954 ixgbe_fill_cluster_head_buf(
1955         struct rte_mbuf *head,
1956         union ixgbe_adv_rx_desc *desc,
1957         struct ixgbe_rx_queue *rxq,
1958         uint32_t staterr)
1959 {
1960         uint32_t pkt_info;
1961         uint64_t pkt_flags;
1962
1963         head->port = rxq->port_id;
1964
1965         /* The vlan_tci field is only valid when PKT_RX_VLAN is
1966          * set in the pkt_flags field.
1967          */
1968         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1969         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1970         pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1971         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1972         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1973         head->ol_flags = pkt_flags;
1974         head->packet_type =
1975                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1976
1977         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1978                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1979         else if (pkt_flags & PKT_RX_FDIR) {
1980                 head->hash.fdir.hash =
1981                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1982                                                           & IXGBE_ATR_HASH_MASK;
1983                 head->hash.fdir.id =
1984                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1985         }
1986 }
1987
1988 /**
1989  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1990  *
1991  * @rx_queue Rx queue handle
1992  * @rx_pkts table of received packets
1993  * @nb_pkts size of rx_pkts table
1994  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1995  *
1996  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1997  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1998  *
1999  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
2000  * 1) When non-EOP RSC completion arrives:
2001  *    a) Update the HEAD of the current RSC aggregation cluster with the new
2002  *       segment's data length.
2003  *    b) Set the "next" pointer of the current segment to point to the segment
2004  *       at the NEXTP index.
2005  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2006  *       in the sw_rsc_ring.
2007  * 2) When EOP arrives we just update the cluster's total length and offload
2008  *    flags and deliver the cluster up to the upper layers. In our case - put it
2009  *    in the rx_pkts table.
2010  *
2011  * Returns the number of received packets/clusters (according to the "bulk
2012  * receive" interface).
2013  */
2014 static inline uint16_t
2015 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2016                     bool bulk_alloc)
2017 {
2018         struct ixgbe_rx_queue *rxq = rx_queue;
2019         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2020         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2021         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2022         uint16_t rx_id = rxq->rx_tail;
2023         uint16_t nb_rx = 0;
2024         uint16_t nb_hold = rxq->nb_rx_hold;
2025         uint16_t prev_id = rxq->rx_tail;
2026
2027         while (nb_rx < nb_pkts) {
2028                 bool eop;
2029                 struct ixgbe_rx_entry *rxe;
2030                 struct ixgbe_scattered_rx_entry *sc_entry;
2031                 struct ixgbe_scattered_rx_entry *next_sc_entry;
2032                 struct ixgbe_rx_entry *next_rxe = NULL;
2033                 struct rte_mbuf *first_seg;
2034                 struct rte_mbuf *rxm;
2035                 struct rte_mbuf *nmb = NULL;
2036                 union ixgbe_adv_rx_desc rxd;
2037                 uint16_t data_len;
2038                 uint16_t next_id;
2039                 volatile union ixgbe_adv_rx_desc *rxdp;
2040                 uint32_t staterr;
2041
2042 next_desc:
2043                 /*
2044                  * The code in this whole file uses the volatile pointer to
2045                  * ensure the read ordering of the status and the rest of the
2046                  * descriptor fields (on the compiler level only!!!). This is so
2047                  * UGLY - why not to just use the compiler barrier instead? DPDK
2048                  * even has the rte_compiler_barrier() for that.
2049                  *
2050                  * But most importantly this is just wrong because this doesn't
2051                  * ensure memory ordering in a general case at all. For
2052                  * instance, DPDK is supposed to work on Power CPUs where
2053                  * compiler barrier may just not be enough!
2054                  *
2055                  * I tried to write only this function properly to have a
2056                  * starting point (as a part of an LRO/RSC series) but the
2057                  * compiler cursed at me when I tried to cast away the
2058                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2059                  * keeping it the way it is for now.
2060                  *
2061                  * The code in this file is broken in so many other places and
2062                  * will just not work on a big endian CPU anyway therefore the
2063                  * lines below will have to be revisited together with the rest
2064                  * of the ixgbe PMD.
2065                  *
2066                  * TODO:
2067                  *    - Get rid of "volatile" and let the compiler do its job.
2068                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
2069                  *      memory ordering below.
2070                  */
2071                 rxdp = &rx_ring[rx_id];
2072                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2073
2074                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2075                         break;
2076
2077                 rxd = *rxdp;
2078
2079                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2080                                   "staterr=0x%x data_len=%u",
2081                            rxq->port_id, rxq->queue_id, rx_id, staterr,
2082                            rte_le_to_cpu_16(rxd.wb.upper.length));
2083
2084                 if (!bulk_alloc) {
2085                         nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2086                         if (nmb == NULL) {
2087                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2088                                                   "port_id=%u queue_id=%u",
2089                                            rxq->port_id, rxq->queue_id);
2090
2091                                 rte_eth_devices[rxq->port_id].data->
2092                                                         rx_mbuf_alloc_failed++;
2093                                 break;
2094                         }
2095                 } else if (nb_hold > rxq->rx_free_thresh) {
2096                         uint16_t next_rdt = rxq->rx_free_trigger;
2097
2098                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2099                                 rte_wmb();
2100                                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2101                                                             next_rdt);
2102                                 nb_hold -= rxq->rx_free_thresh;
2103                         } else {
2104                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2105                                                   "port_id=%u queue_id=%u",
2106                                            rxq->port_id, rxq->queue_id);
2107
2108                                 rte_eth_devices[rxq->port_id].data->
2109                                                         rx_mbuf_alloc_failed++;
2110                                 break;
2111                         }
2112                 }
2113
2114                 nb_hold++;
2115                 rxe = &sw_ring[rx_id];
2116                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2117
2118                 next_id = rx_id + 1;
2119                 if (next_id == rxq->nb_rx_desc)
2120                         next_id = 0;
2121
2122                 /* Prefetch next mbuf while processing current one. */
2123                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2124
2125                 /*
2126                  * When next RX descriptor is on a cache-line boundary,
2127                  * prefetch the next 4 RX descriptors and the next 4 pointers
2128                  * to mbufs.
2129                  */
2130                 if ((next_id & 0x3) == 0) {
2131                         rte_ixgbe_prefetch(&rx_ring[next_id]);
2132                         rte_ixgbe_prefetch(&sw_ring[next_id]);
2133                 }
2134
2135                 rxm = rxe->mbuf;
2136
2137                 if (!bulk_alloc) {
2138                         __le64 dma =
2139                           rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2140                         /*
2141                          * Update RX descriptor with the physical address of the
2142                          * new data buffer of the new allocated mbuf.
2143                          */
2144                         rxe->mbuf = nmb;
2145
2146                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
2147                         rxdp->read.hdr_addr = 0;
2148                         rxdp->read.pkt_addr = dma;
2149                 } else
2150                         rxe->mbuf = NULL;
2151
2152                 /*
2153                  * Set data length & data buffer address of mbuf.
2154                  */
2155                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2156                 rxm->data_len = data_len;
2157
2158                 if (!eop) {
2159                         uint16_t nextp_id;
2160                         /*
2161                          * Get next descriptor index:
2162                          *  - For RSC it's in the NEXTP field.
2163                          *  - For a scattered packet - it's just a following
2164                          *    descriptor.
2165                          */
2166                         if (ixgbe_rsc_count(&rxd))
2167                                 nextp_id =
2168                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2169                                                        IXGBE_RXDADV_NEXTP_SHIFT;
2170                         else
2171                                 nextp_id = next_id;
2172
2173                         next_sc_entry = &sw_sc_ring[nextp_id];
2174                         next_rxe = &sw_ring[nextp_id];
2175                         rte_ixgbe_prefetch(next_rxe);
2176                 }
2177
2178                 sc_entry = &sw_sc_ring[rx_id];
2179                 first_seg = sc_entry->fbuf;
2180                 sc_entry->fbuf = NULL;
2181
2182                 /*
2183                  * If this is the first buffer of the received packet,
2184                  * set the pointer to the first mbuf of the packet and
2185                  * initialize its context.
2186                  * Otherwise, update the total length and the number of segments
2187                  * of the current scattered packet, and update the pointer to
2188                  * the last mbuf of the current packet.
2189                  */
2190                 if (first_seg == NULL) {
2191                         first_seg = rxm;
2192                         first_seg->pkt_len = data_len;
2193                         first_seg->nb_segs = 1;
2194                 } else {
2195                         first_seg->pkt_len += data_len;
2196                         first_seg->nb_segs++;
2197                 }
2198
2199                 prev_id = rx_id;
2200                 rx_id = next_id;
2201
2202                 /*
2203                  * If this is not the last buffer of the received packet, update
2204                  * the pointer to the first mbuf at the NEXTP entry in the
2205                  * sw_sc_ring and continue to parse the RX ring.
2206                  */
2207                 if (!eop && next_rxe) {
2208                         rxm->next = next_rxe->mbuf;
2209                         next_sc_entry->fbuf = first_seg;
2210                         goto next_desc;
2211                 }
2212
2213                 /* Initialize the first mbuf of the returned packet */
2214                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2215
2216                 /*
2217                  * Deal with the case, when HW CRC srip is disabled.
2218                  * That can't happen when LRO is enabled, but still could
2219                  * happen for scattered RX mode.
2220                  */
2221                 first_seg->pkt_len -= rxq->crc_len;
2222                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2223                         struct rte_mbuf *lp;
2224
2225                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
2226                                 ;
2227
2228                         first_seg->nb_segs--;
2229                         lp->data_len -= rxq->crc_len - rxm->data_len;
2230                         lp->next = NULL;
2231                         rte_pktmbuf_free_seg(rxm);
2232                 } else
2233                         rxm->data_len -= rxq->crc_len;
2234
2235                 /* Prefetch data of first segment, if configured to do so. */
2236                 rte_packet_prefetch((char *)first_seg->buf_addr +
2237                         first_seg->data_off);
2238
2239                 /*
2240                  * Store the mbuf address into the next entry of the array
2241                  * of returned packets.
2242                  */
2243                 rx_pkts[nb_rx++] = first_seg;
2244         }
2245
2246         /*
2247          * Record index of the next RX descriptor to probe.
2248          */
2249         rxq->rx_tail = rx_id;
2250
2251         /*
2252          * If the number of free RX descriptors is greater than the RX free
2253          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2254          * register.
2255          * Update the RDT with the value of the last processed RX descriptor
2256          * minus 1, to guarantee that the RDT register is never equal to the
2257          * RDH register, which creates a "full" ring situtation from the
2258          * hardware point of view...
2259          */
2260         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2261                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2262                            "nb_hold=%u nb_rx=%u",
2263                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2264
2265                 rte_wmb();
2266                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2267                 nb_hold = 0;
2268         }
2269
2270         rxq->nb_rx_hold = nb_hold;
2271         return nb_rx;
2272 }
2273
2274 uint16_t
2275 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2276                                  uint16_t nb_pkts)
2277 {
2278         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2279 }
2280
2281 uint16_t
2282 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2283                                uint16_t nb_pkts)
2284 {
2285         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2286 }
2287
2288 /*********************************************************************
2289  *
2290  *  Queue management functions
2291  *
2292  **********************************************************************/
2293
2294 static void __attribute__((cold))
2295 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2296 {
2297         unsigned i;
2298
2299         if (txq->sw_ring != NULL) {
2300                 for (i = 0; i < txq->nb_tx_desc; i++) {
2301                         if (txq->sw_ring[i].mbuf != NULL) {
2302                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2303                                 txq->sw_ring[i].mbuf = NULL;
2304                         }
2305                 }
2306         }
2307 }
2308
2309 static void __attribute__((cold))
2310 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2311 {
2312         if (txq != NULL &&
2313             txq->sw_ring != NULL)
2314                 rte_free(txq->sw_ring);
2315 }
2316
2317 static void __attribute__((cold))
2318 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2319 {
2320         if (txq != NULL && txq->ops != NULL) {
2321                 txq->ops->release_mbufs(txq);
2322                 txq->ops->free_swring(txq);
2323                 rte_free(txq);
2324         }
2325 }
2326
2327 void __attribute__((cold))
2328 ixgbe_dev_tx_queue_release(void *txq)
2329 {
2330         ixgbe_tx_queue_release(txq);
2331 }
2332
2333 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2334 static void __attribute__((cold))
2335 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2336 {
2337         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2338         struct ixgbe_tx_entry *txe = txq->sw_ring;
2339         uint16_t prev, i;
2340
2341         /* Zero out HW ring memory */
2342         for (i = 0; i < txq->nb_tx_desc; i++) {
2343                 txq->tx_ring[i] = zeroed_desc;
2344         }
2345
2346         /* Initialize SW ring entries */
2347         prev = (uint16_t) (txq->nb_tx_desc - 1);
2348         for (i = 0; i < txq->nb_tx_desc; i++) {
2349                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2350
2351                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2352                 txe[i].mbuf = NULL;
2353                 txe[i].last_id = i;
2354                 txe[prev].next_id = i;
2355                 prev = i;
2356         }
2357
2358         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2359         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2360
2361         txq->tx_tail = 0;
2362         txq->nb_tx_used = 0;
2363         /*
2364          * Always allow 1 descriptor to be un-allocated to avoid
2365          * a H/W race condition
2366          */
2367         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2368         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2369         txq->ctx_curr = 0;
2370         memset((void *)&txq->ctx_cache, 0,
2371                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2372 }
2373
2374 static const struct ixgbe_txq_ops def_txq_ops = {
2375         .release_mbufs = ixgbe_tx_queue_release_mbufs,
2376         .free_swring = ixgbe_tx_free_swring,
2377         .reset = ixgbe_reset_tx_queue,
2378 };
2379
2380 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2381  * the queue parameters. Used in tx_queue_setup by primary process and then
2382  * in dev_init by secondary process when attaching to an existing ethdev.
2383  */
2384 void __attribute__((cold))
2385 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2386 {
2387         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2388         if ((txq->offloads == 0) &&
2389 #ifdef RTE_LIBRTE_SECURITY
2390                         !(txq->using_ipsec) &&
2391 #endif
2392                         (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2393                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2394                 dev->tx_pkt_prepare = NULL;
2395 #ifdef RTE_IXGBE_INC_VECTOR
2396                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2397                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2398                                         ixgbe_txq_vec_setup(txq) == 0)) {
2399                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2400                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2401                 } else
2402 #endif
2403                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2404         } else {
2405                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2406                 PMD_INIT_LOG(DEBUG,
2407                                 " - offloads = 0x%" PRIx64,
2408                                 txq->offloads);
2409                 PMD_INIT_LOG(DEBUG,
2410                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2411                                 (unsigned long)txq->tx_rs_thresh,
2412                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2413                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2414                 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2415         }
2416 }
2417
2418 uint64_t
2419 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2420 {
2421         RTE_SET_USED(dev);
2422
2423         return 0;
2424 }
2425
2426 uint64_t
2427 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2428 {
2429         uint64_t tx_offload_capa;
2430         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2431
2432         tx_offload_capa =
2433                 DEV_TX_OFFLOAD_VLAN_INSERT |
2434                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2435                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2436                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2437                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2438                 DEV_TX_OFFLOAD_TCP_TSO     |
2439                 DEV_TX_OFFLOAD_MULTI_SEGS;
2440
2441         if (hw->mac.type == ixgbe_mac_82599EB ||
2442             hw->mac.type == ixgbe_mac_X540)
2443                 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2444
2445         if (hw->mac.type == ixgbe_mac_X550 ||
2446             hw->mac.type == ixgbe_mac_X550EM_x ||
2447             hw->mac.type == ixgbe_mac_X550EM_a)
2448                 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2449
2450 #ifdef RTE_LIBRTE_SECURITY
2451         if (dev->security_ctx)
2452                 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2453 #endif
2454         return tx_offload_capa;
2455 }
2456
2457 int __attribute__((cold))
2458 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2459                          uint16_t queue_idx,
2460                          uint16_t nb_desc,
2461                          unsigned int socket_id,
2462                          const struct rte_eth_txconf *tx_conf)
2463 {
2464         const struct rte_memzone *tz;
2465         struct ixgbe_tx_queue *txq;
2466         struct ixgbe_hw     *hw;
2467         uint16_t tx_rs_thresh, tx_free_thresh;
2468         uint64_t offloads;
2469
2470         PMD_INIT_FUNC_TRACE();
2471         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2472
2473         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2474
2475         /*
2476          * Validate number of transmit descriptors.
2477          * It must not exceed hardware maximum, and must be multiple
2478          * of IXGBE_ALIGN.
2479          */
2480         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2481                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2482                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2483                 return -EINVAL;
2484         }
2485
2486         /*
2487          * The following two parameters control the setting of the RS bit on
2488          * transmit descriptors.
2489          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2490          * descriptors have been used.
2491          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2492          * descriptors are used or if the number of descriptors required
2493          * to transmit a packet is greater than the number of free TX
2494          * descriptors.
2495          * The following constraints must be satisfied:
2496          *  tx_rs_thresh must be greater than 0.
2497          *  tx_rs_thresh must be less than the size of the ring minus 2.
2498          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2499          *  tx_rs_thresh must be a divisor of the ring size.
2500          *  tx_free_thresh must be greater than 0.
2501          *  tx_free_thresh must be less than the size of the ring minus 3.
2502          *  tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2503          * One descriptor in the TX ring is used as a sentinel to avoid a
2504          * H/W race condition, hence the maximum threshold constraints.
2505          * When set to zero use default values.
2506          */
2507         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2508                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2509         /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2510         tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2511                         nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2512         if (tx_conf->tx_rs_thresh > 0)
2513                 tx_rs_thresh = tx_conf->tx_rs_thresh;
2514         if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2515                 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2516                              "exceed nb_desc. (tx_rs_thresh=%u "
2517                              "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2518                              (unsigned int)tx_rs_thresh,
2519                              (unsigned int)tx_free_thresh,
2520                              (unsigned int)nb_desc,
2521                              (int)dev->data->port_id,
2522                              (int)queue_idx);
2523                 return -(EINVAL);
2524         }
2525         if (tx_rs_thresh >= (nb_desc - 2)) {
2526                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2527                         "of TX descriptors minus 2. (tx_rs_thresh=%u "
2528                         "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2529                         (int)dev->data->port_id, (int)queue_idx);
2530                 return -(EINVAL);
2531         }
2532         if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2533                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2534                         "(tx_rs_thresh=%u port=%d queue=%d)",
2535                         DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2536                         (int)dev->data->port_id, (int)queue_idx);
2537                 return -(EINVAL);
2538         }
2539         if (tx_free_thresh >= (nb_desc - 3)) {
2540                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2541                              "tx_free_thresh must be less than the number of "
2542                              "TX descriptors minus 3. (tx_free_thresh=%u "
2543                              "port=%d queue=%d)",
2544                              (unsigned int)tx_free_thresh,
2545                              (int)dev->data->port_id, (int)queue_idx);
2546                 return -(EINVAL);
2547         }
2548         if (tx_rs_thresh > tx_free_thresh) {
2549                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2550                              "tx_free_thresh. (tx_free_thresh=%u "
2551                              "tx_rs_thresh=%u port=%d queue=%d)",
2552                              (unsigned int)tx_free_thresh,
2553                              (unsigned int)tx_rs_thresh,
2554                              (int)dev->data->port_id,
2555                              (int)queue_idx);
2556                 return -(EINVAL);
2557         }
2558         if ((nb_desc % tx_rs_thresh) != 0) {
2559                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2560                              "number of TX descriptors. (tx_rs_thresh=%u "
2561                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2562                              (int)dev->data->port_id, (int)queue_idx);
2563                 return -(EINVAL);
2564         }
2565
2566         /*
2567          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2568          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2569          * by the NIC and all descriptors are written back after the NIC
2570          * accumulates WTHRESH descriptors.
2571          */
2572         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2573                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2574                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2575                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2576                              (int)dev->data->port_id, (int)queue_idx);
2577                 return -(EINVAL);
2578         }
2579
2580         /* Free memory prior to re-allocation if needed... */
2581         if (dev->data->tx_queues[queue_idx] != NULL) {
2582                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2583                 dev->data->tx_queues[queue_idx] = NULL;
2584         }
2585
2586         /* First allocate the tx queue data structure */
2587         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2588                                  RTE_CACHE_LINE_SIZE, socket_id);
2589         if (txq == NULL)
2590                 return -ENOMEM;
2591
2592         /*
2593          * Allocate TX ring hardware descriptors. A memzone large enough to
2594          * handle the maximum ring size is allocated in order to allow for
2595          * resizing in later calls to the queue setup function.
2596          */
2597         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2598                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2599                         IXGBE_ALIGN, socket_id);
2600         if (tz == NULL) {
2601                 ixgbe_tx_queue_release(txq);
2602                 return -ENOMEM;
2603         }
2604
2605         txq->nb_tx_desc = nb_desc;
2606         txq->tx_rs_thresh = tx_rs_thresh;
2607         txq->tx_free_thresh = tx_free_thresh;
2608         txq->pthresh = tx_conf->tx_thresh.pthresh;
2609         txq->hthresh = tx_conf->tx_thresh.hthresh;
2610         txq->wthresh = tx_conf->tx_thresh.wthresh;
2611         txq->queue_id = queue_idx;
2612         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2613                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2614         txq->port_id = dev->data->port_id;
2615         txq->offloads = offloads;
2616         txq->ops = &def_txq_ops;
2617         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2618 #ifdef RTE_LIBRTE_SECURITY
2619         txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2620                         DEV_TX_OFFLOAD_SECURITY);
2621 #endif
2622
2623         /*
2624          * Modification to set VFTDT for virtual function if vf is detected
2625          */
2626         if (hw->mac.type == ixgbe_mac_82599_vf ||
2627             hw->mac.type == ixgbe_mac_X540_vf ||
2628             hw->mac.type == ixgbe_mac_X550_vf ||
2629             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2630             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2631                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2632         else
2633                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2634
2635         txq->tx_ring_phys_addr = tz->iova;
2636         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2637
2638         /* Allocate software ring */
2639         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2640                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2641                                 RTE_CACHE_LINE_SIZE, socket_id);
2642         if (txq->sw_ring == NULL) {
2643                 ixgbe_tx_queue_release(txq);
2644                 return -ENOMEM;
2645         }
2646         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2647                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2648
2649         /* set up vector or scalar TX function as appropriate */
2650         ixgbe_set_tx_function(dev, txq);
2651
2652         txq->ops->reset(txq);
2653
2654         dev->data->tx_queues[queue_idx] = txq;
2655
2656
2657         return 0;
2658 }
2659
2660 /**
2661  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2662  *
2663  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2664  * in the sw_rsc_ring is not set to NULL but rather points to the next
2665  * mbuf of this RSC aggregation (that has not been completed yet and still
2666  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2667  * will just free first "nb_segs" segments of the cluster explicitly by calling
2668  * an rte_pktmbuf_free_seg().
2669  *
2670  * @m scattered cluster head
2671  */
2672 static void __attribute__((cold))
2673 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2674 {
2675         uint16_t i, nb_segs = m->nb_segs;
2676         struct rte_mbuf *next_seg;
2677
2678         for (i = 0; i < nb_segs; i++) {
2679                 next_seg = m->next;
2680                 rte_pktmbuf_free_seg(m);
2681                 m = next_seg;
2682         }
2683 }
2684
2685 static void __attribute__((cold))
2686 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2687 {
2688         unsigned i;
2689
2690 #ifdef RTE_IXGBE_INC_VECTOR
2691         /* SSE Vector driver has a different way of releasing mbufs. */
2692         if (rxq->rx_using_sse) {
2693                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2694                 return;
2695         }
2696 #endif
2697
2698         if (rxq->sw_ring != NULL) {
2699                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2700                         if (rxq->sw_ring[i].mbuf != NULL) {
2701                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2702                                 rxq->sw_ring[i].mbuf = NULL;
2703                         }
2704                 }
2705                 if (rxq->rx_nb_avail) {
2706                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2707                                 struct rte_mbuf *mb;
2708
2709                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2710                                 rte_pktmbuf_free_seg(mb);
2711                         }
2712                         rxq->rx_nb_avail = 0;
2713                 }
2714         }
2715
2716         if (rxq->sw_sc_ring)
2717                 for (i = 0; i < rxq->nb_rx_desc; i++)
2718                         if (rxq->sw_sc_ring[i].fbuf) {
2719                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2720                                 rxq->sw_sc_ring[i].fbuf = NULL;
2721                         }
2722 }
2723
2724 static void __attribute__((cold))
2725 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2726 {
2727         if (rxq != NULL) {
2728                 ixgbe_rx_queue_release_mbufs(rxq);
2729                 rte_free(rxq->sw_ring);
2730                 rte_free(rxq->sw_sc_ring);
2731                 rte_free(rxq);
2732         }
2733 }
2734
2735 void __attribute__((cold))
2736 ixgbe_dev_rx_queue_release(void *rxq)
2737 {
2738         ixgbe_rx_queue_release(rxq);
2739 }
2740
2741 /*
2742  * Check if Rx Burst Bulk Alloc function can be used.
2743  * Return
2744  *        0: the preconditions are satisfied and the bulk allocation function
2745  *           can be used.
2746  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2747  *           function must be used.
2748  */
2749 static inline int __attribute__((cold))
2750 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2751 {
2752         int ret = 0;
2753
2754         /*
2755          * Make sure the following pre-conditions are satisfied:
2756          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2757          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2758          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2759          * Scattered packets are not supported.  This should be checked
2760          * outside of this function.
2761          */
2762         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2763                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2764                              "rxq->rx_free_thresh=%d, "
2765                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2766                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2767                 ret = -EINVAL;
2768         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2769                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2770                              "rxq->rx_free_thresh=%d, "
2771                              "rxq->nb_rx_desc=%d",
2772                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2773                 ret = -EINVAL;
2774         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2775                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2776                              "rxq->nb_rx_desc=%d, "
2777                              "rxq->rx_free_thresh=%d",
2778                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2779                 ret = -EINVAL;
2780         }
2781
2782         return ret;
2783 }
2784
2785 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2786 static void __attribute__((cold))
2787 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2788 {
2789         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2790         unsigned i;
2791         uint16_t len = rxq->nb_rx_desc;
2792
2793         /*
2794          * By default, the Rx queue setup function allocates enough memory for
2795          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2796          * extra memory at the end of the descriptor ring to be zero'd out.
2797          */
2798         if (adapter->rx_bulk_alloc_allowed)
2799                 /* zero out extra memory */
2800                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2801
2802         /*
2803          * Zero out HW ring memory. Zero out extra memory at the end of
2804          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2805          * reads extra memory as zeros.
2806          */
2807         for (i = 0; i < len; i++) {
2808                 rxq->rx_ring[i] = zeroed_desc;
2809         }
2810
2811         /*
2812          * initialize extra software ring entries. Space for these extra
2813          * entries is always allocated
2814          */
2815         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2816         for (i = rxq->nb_rx_desc; i < len; ++i) {
2817                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2818         }
2819
2820         rxq->rx_nb_avail = 0;
2821         rxq->rx_next_avail = 0;
2822         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2823         rxq->rx_tail = 0;
2824         rxq->nb_rx_hold = 0;
2825         rxq->pkt_first_seg = NULL;
2826         rxq->pkt_last_seg = NULL;
2827
2828 #ifdef RTE_IXGBE_INC_VECTOR
2829         rxq->rxrearm_start = 0;
2830         rxq->rxrearm_nb = 0;
2831 #endif
2832 }
2833
2834 static int
2835 ixgbe_is_vf(struct rte_eth_dev *dev)
2836 {
2837         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2838
2839         switch (hw->mac.type) {
2840         case ixgbe_mac_82599_vf:
2841         case ixgbe_mac_X540_vf:
2842         case ixgbe_mac_X550_vf:
2843         case ixgbe_mac_X550EM_x_vf:
2844         case ixgbe_mac_X550EM_a_vf:
2845                 return 1;
2846         default:
2847                 return 0;
2848         }
2849 }
2850
2851 uint64_t
2852 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2853 {
2854         uint64_t offloads = 0;
2855         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2856
2857         if (hw->mac.type != ixgbe_mac_82598EB)
2858                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2859
2860         return offloads;
2861 }
2862
2863 uint64_t
2864 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2865 {
2866         uint64_t offloads;
2867         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2868
2869         offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
2870                    DEV_RX_OFFLOAD_UDP_CKSUM   |
2871                    DEV_RX_OFFLOAD_TCP_CKSUM   |
2872                    DEV_RX_OFFLOAD_KEEP_CRC    |
2873                    DEV_RX_OFFLOAD_JUMBO_FRAME |
2874                    DEV_RX_OFFLOAD_VLAN_FILTER |
2875                    DEV_RX_OFFLOAD_SCATTER;
2876
2877         if (hw->mac.type == ixgbe_mac_82598EB)
2878                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2879
2880         if (ixgbe_is_vf(dev) == 0)
2881                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2882
2883         /*
2884          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2885          * mode.
2886          */
2887         if ((hw->mac.type == ixgbe_mac_82599EB ||
2888              hw->mac.type == ixgbe_mac_X540 ||
2889              hw->mac.type == ixgbe_mac_X550) &&
2890             !RTE_ETH_DEV_SRIOV(dev).active)
2891                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2892
2893         if (hw->mac.type == ixgbe_mac_82599EB ||
2894             hw->mac.type == ixgbe_mac_X540)
2895                 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2896
2897         if (hw->mac.type == ixgbe_mac_X550 ||
2898             hw->mac.type == ixgbe_mac_X550EM_x ||
2899             hw->mac.type == ixgbe_mac_X550EM_a)
2900                 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2901
2902 #ifdef RTE_LIBRTE_SECURITY
2903         if (dev->security_ctx)
2904                 offloads |= DEV_RX_OFFLOAD_SECURITY;
2905 #endif
2906
2907         return offloads;
2908 }
2909
2910 int __attribute__((cold))
2911 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2912                          uint16_t queue_idx,
2913                          uint16_t nb_desc,
2914                          unsigned int socket_id,
2915                          const struct rte_eth_rxconf *rx_conf,
2916                          struct rte_mempool *mp)
2917 {
2918         const struct rte_memzone *rz;
2919         struct ixgbe_rx_queue *rxq;
2920         struct ixgbe_hw     *hw;
2921         uint16_t len;
2922         struct ixgbe_adapter *adapter =
2923                 (struct ixgbe_adapter *)dev->data->dev_private;
2924         uint64_t offloads;
2925
2926         PMD_INIT_FUNC_TRACE();
2927         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2928
2929         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2930
2931         /*
2932          * Validate number of receive descriptors.
2933          * It must not exceed hardware maximum, and must be multiple
2934          * of IXGBE_ALIGN.
2935          */
2936         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2937                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2938                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2939                 return -EINVAL;
2940         }
2941
2942         /* Free memory prior to re-allocation if needed... */
2943         if (dev->data->rx_queues[queue_idx] != NULL) {
2944                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2945                 dev->data->rx_queues[queue_idx] = NULL;
2946         }
2947
2948         /* First allocate the rx queue data structure */
2949         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2950                                  RTE_CACHE_LINE_SIZE, socket_id);
2951         if (rxq == NULL)
2952                 return -ENOMEM;
2953         rxq->mb_pool = mp;
2954         rxq->nb_rx_desc = nb_desc;
2955         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2956         rxq->queue_id = queue_idx;
2957         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2958                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2959         rxq->port_id = dev->data->port_id;
2960         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2961                 rxq->crc_len = RTE_ETHER_CRC_LEN;
2962         else
2963                 rxq->crc_len = 0;
2964         rxq->drop_en = rx_conf->rx_drop_en;
2965         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2966         rxq->offloads = offloads;
2967
2968         /*
2969          * The packet type in RX descriptor is different for different NICs.
2970          * Some bits are used for x550 but reserved for other NICS.
2971          * So set different masks for different NICs.
2972          */
2973         if (hw->mac.type == ixgbe_mac_X550 ||
2974             hw->mac.type == ixgbe_mac_X550EM_x ||
2975             hw->mac.type == ixgbe_mac_X550EM_a ||
2976             hw->mac.type == ixgbe_mac_X550_vf ||
2977             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2978             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2979                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2980         else
2981                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2982
2983         /*
2984          * Allocate RX ring hardware descriptors. A memzone large enough to
2985          * handle the maximum ring size is allocated in order to allow for
2986          * resizing in later calls to the queue setup function.
2987          */
2988         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2989                                       RX_RING_SZ, IXGBE_ALIGN, socket_id);
2990         if (rz == NULL) {
2991                 ixgbe_rx_queue_release(rxq);
2992                 return -ENOMEM;
2993         }
2994
2995         /*
2996          * Zero init all the descriptors in the ring.
2997          */
2998         memset(rz->addr, 0, RX_RING_SZ);
2999
3000         /*
3001          * Modified to setup VFRDT for Virtual Function
3002          */
3003         if (hw->mac.type == ixgbe_mac_82599_vf ||
3004             hw->mac.type == ixgbe_mac_X540_vf ||
3005             hw->mac.type == ixgbe_mac_X550_vf ||
3006             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3007             hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3008                 rxq->rdt_reg_addr =
3009                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3010                 rxq->rdh_reg_addr =
3011                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3012         } else {
3013                 rxq->rdt_reg_addr =
3014                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3015                 rxq->rdh_reg_addr =
3016                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3017         }
3018
3019         rxq->rx_ring_phys_addr = rz->iova;
3020         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3021
3022         /*
3023          * Certain constraints must be met in order to use the bulk buffer
3024          * allocation Rx burst function. If any of Rx queues doesn't meet them
3025          * the feature should be disabled for the whole port.
3026          */
3027         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3028                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3029                                     "preconditions - canceling the feature for "
3030                                     "the whole port[%d]",
3031                              rxq->queue_id, rxq->port_id);
3032                 adapter->rx_bulk_alloc_allowed = false;
3033         }
3034
3035         /*
3036          * Allocate software ring. Allow for space at the end of the
3037          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3038          * function does not access an invalid memory region.
3039          */
3040         len = nb_desc;
3041         if (adapter->rx_bulk_alloc_allowed)
3042                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3043
3044         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3045                                           sizeof(struct ixgbe_rx_entry) * len,
3046                                           RTE_CACHE_LINE_SIZE, socket_id);
3047         if (!rxq->sw_ring) {
3048                 ixgbe_rx_queue_release(rxq);
3049                 return -ENOMEM;
3050         }
3051
3052         /*
3053          * Always allocate even if it's not going to be needed in order to
3054          * simplify the code.
3055          *
3056          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3057          * be requested in ixgbe_dev_rx_init(), which is called later from
3058          * dev_start() flow.
3059          */
3060         rxq->sw_sc_ring =
3061                 rte_zmalloc_socket("rxq->sw_sc_ring",
3062                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
3063                                    RTE_CACHE_LINE_SIZE, socket_id);
3064         if (!rxq->sw_sc_ring) {
3065                 ixgbe_rx_queue_release(rxq);
3066                 return -ENOMEM;
3067         }
3068
3069         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3070                             "dma_addr=0x%"PRIx64,
3071                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3072                      rxq->rx_ring_phys_addr);
3073
3074         if (!rte_is_power_of_2(nb_desc)) {
3075                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3076                                     "preconditions - canceling the feature for "
3077                                     "the whole port[%d]",
3078                              rxq->queue_id, rxq->port_id);
3079                 adapter->rx_vec_allowed = false;
3080         } else
3081                 ixgbe_rxq_vec_setup(rxq);
3082
3083         dev->data->rx_queues[queue_idx] = rxq;
3084
3085         ixgbe_reset_rx_queue(adapter, rxq);
3086
3087         return 0;
3088 }
3089
3090 uint32_t
3091 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3092 {
3093 #define IXGBE_RXQ_SCAN_INTERVAL 4
3094         volatile union ixgbe_adv_rx_desc *rxdp;
3095         struct ixgbe_rx_queue *rxq;
3096         uint32_t desc = 0;
3097
3098         rxq = dev->data->rx_queues[rx_queue_id];
3099         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3100
3101         while ((desc < rxq->nb_rx_desc) &&
3102                 (rxdp->wb.upper.status_error &
3103                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3104                 desc += IXGBE_RXQ_SCAN_INTERVAL;
3105                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3106                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3107                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
3108                                 desc - rxq->nb_rx_desc]);
3109         }
3110
3111         return desc;
3112 }
3113
3114 int
3115 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3116 {
3117         volatile union ixgbe_adv_rx_desc *rxdp;
3118         struct ixgbe_rx_queue *rxq = rx_queue;
3119         uint32_t desc;
3120
3121         if (unlikely(offset >= rxq->nb_rx_desc))
3122                 return 0;
3123         desc = rxq->rx_tail + offset;
3124         if (desc >= rxq->nb_rx_desc)
3125                 desc -= rxq->nb_rx_desc;
3126
3127         rxdp = &rxq->rx_ring[desc];
3128         return !!(rxdp->wb.upper.status_error &
3129                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3130 }
3131
3132 int
3133 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3134 {
3135         struct ixgbe_rx_queue *rxq = rx_queue;
3136         volatile uint32_t *status;
3137         uint32_t nb_hold, desc;
3138
3139         if (unlikely(offset >= rxq->nb_rx_desc))
3140                 return -EINVAL;
3141
3142 #ifdef RTE_IXGBE_INC_VECTOR
3143         if (rxq->rx_using_sse)
3144                 nb_hold = rxq->rxrearm_nb;
3145         else
3146 #endif
3147                 nb_hold = rxq->nb_rx_hold;
3148         if (offset >= rxq->nb_rx_desc - nb_hold)
3149                 return RTE_ETH_RX_DESC_UNAVAIL;
3150
3151         desc = rxq->rx_tail + offset;
3152         if (desc >= rxq->nb_rx_desc)
3153                 desc -= rxq->nb_rx_desc;
3154
3155         status = &rxq->rx_ring[desc].wb.upper.status_error;
3156         if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3157                 return RTE_ETH_RX_DESC_DONE;
3158
3159         return RTE_ETH_RX_DESC_AVAIL;
3160 }
3161
3162 int
3163 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3164 {
3165         struct ixgbe_tx_queue *txq = tx_queue;
3166         volatile uint32_t *status;
3167         uint32_t desc;
3168
3169         if (unlikely(offset >= txq->nb_tx_desc))
3170                 return -EINVAL;
3171
3172         desc = txq->tx_tail + offset;
3173         /* go to next desc that has the RS bit */
3174         desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3175                 txq->tx_rs_thresh;
3176         if (desc >= txq->nb_tx_desc) {
3177                 desc -= txq->nb_tx_desc;
3178                 if (desc >= txq->nb_tx_desc)
3179                         desc -= txq->nb_tx_desc;
3180         }
3181
3182         status = &txq->tx_ring[desc].wb.status;
3183         if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3184                 return RTE_ETH_TX_DESC_DONE;
3185
3186         return RTE_ETH_TX_DESC_FULL;
3187 }
3188
3189 /*
3190  * Set up link loopback for X540/X550 mode Tx->Rx.
3191  */
3192 static inline void __attribute__((cold))
3193 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3194 {
3195         uint32_t macc;
3196         PMD_INIT_FUNC_TRACE();
3197
3198         u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3199
3200         hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3201                              IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3202         macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3203
3204         if (enable) {
3205                 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3206                 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3207                 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3208                 macc |= IXGBE_MACC_FLU;
3209         } else {
3210                 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3211                 macc &= ~IXGBE_MACC_FLU;
3212         }
3213
3214         hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3215                               IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3216
3217         IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3218 }
3219
3220 void __attribute__((cold))
3221 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3222 {
3223         unsigned i;
3224         struct ixgbe_adapter *adapter =
3225                 (struct ixgbe_adapter *)dev->data->dev_private;
3226         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3227
3228         PMD_INIT_FUNC_TRACE();
3229
3230         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3231                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3232
3233                 if (txq != NULL) {
3234                         txq->ops->release_mbufs(txq);
3235                         txq->ops->reset(txq);
3236                 }
3237         }
3238
3239         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3240                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3241
3242                 if (rxq != NULL) {
3243                         ixgbe_rx_queue_release_mbufs(rxq);
3244                         ixgbe_reset_rx_queue(adapter, rxq);
3245                 }
3246         }
3247         /* If loopback mode was enabled, reconfigure the link accordingly */
3248         if (dev->data->dev_conf.lpbk_mode != 0) {
3249                 if (hw->mac.type == ixgbe_mac_X540 ||
3250                      hw->mac.type == ixgbe_mac_X550 ||
3251                      hw->mac.type == ixgbe_mac_X550EM_x ||
3252                      hw->mac.type == ixgbe_mac_X550EM_a)
3253                         ixgbe_setup_loopback_link_x540_x550(hw, false);
3254         }
3255 }
3256
3257 void
3258 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3259 {
3260         unsigned i;
3261
3262         PMD_INIT_FUNC_TRACE();
3263
3264         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3265                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3266                 dev->data->rx_queues[i] = NULL;
3267         }
3268         dev->data->nb_rx_queues = 0;
3269
3270         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3271                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3272                 dev->data->tx_queues[i] = NULL;
3273         }
3274         dev->data->nb_tx_queues = 0;
3275 }
3276
3277 /*********************************************************************
3278  *
3279  *  Device RX/TX init functions
3280  *
3281  **********************************************************************/
3282
3283 /**
3284  * Receive Side Scaling (RSS)
3285  * See section 7.1.2.8 in the following document:
3286  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3287  *
3288  * Principles:
3289  * The source and destination IP addresses of the IP header and the source
3290  * and destination ports of TCP/UDP headers, if any, of received packets are
3291  * hashed against a configurable random key to compute a 32-bit RSS hash result.
3292  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3293  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
3294  * RSS output index which is used as the RX queue index where to store the
3295  * received packets.
3296  * The following output is supplied in the RX write-back descriptor:
3297  *     - 32-bit result of the Microsoft RSS hash function,
3298  *     - 4-bit RSS type field.
3299  */
3300
3301 /*
3302  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3303  * Used as the default key.
3304  */
3305 static uint8_t rss_intel_key[40] = {
3306         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3307         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3308         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3309         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3310         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3311 };
3312
3313 static void
3314 ixgbe_rss_disable(struct rte_eth_dev *dev)
3315 {
3316         struct ixgbe_hw *hw;
3317         uint32_t mrqc;
3318         uint32_t mrqc_reg;
3319
3320         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3321         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3322         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3323         mrqc &= ~IXGBE_MRQC_RSSEN;
3324         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3325 }
3326
3327 static void
3328 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3329 {
3330         uint8_t  *hash_key;
3331         uint32_t mrqc;
3332         uint32_t rss_key;
3333         uint64_t rss_hf;
3334         uint16_t i;
3335         uint32_t mrqc_reg;
3336         uint32_t rssrk_reg;
3337
3338         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3339         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3340
3341         hash_key = rss_conf->rss_key;
3342         if (hash_key != NULL) {
3343                 /* Fill in RSS hash key */
3344                 for (i = 0; i < 10; i++) {
3345                         rss_key  = hash_key[(i * 4)];
3346                         rss_key |= hash_key[(i * 4) + 1] << 8;
3347                         rss_key |= hash_key[(i * 4) + 2] << 16;
3348                         rss_key |= hash_key[(i * 4) + 3] << 24;
3349                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3350                 }
3351         }
3352
3353         /* Set configured hashing protocols in MRQC register */
3354         rss_hf = rss_conf->rss_hf;
3355         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3356         if (rss_hf & ETH_RSS_IPV4)
3357                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3358         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3359                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3360         if (rss_hf & ETH_RSS_IPV6)
3361                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3362         if (rss_hf & ETH_RSS_IPV6_EX)
3363                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3364         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3365                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3366         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3367                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3368         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3369                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3370         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3371                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3372         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3373                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3374         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3375 }
3376
3377 int
3378 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3379                           struct rte_eth_rss_conf *rss_conf)
3380 {
3381         struct ixgbe_hw *hw;
3382         uint32_t mrqc;
3383         uint64_t rss_hf;
3384         uint32_t mrqc_reg;
3385
3386         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3387
3388         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3389                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3390                         "NIC.");
3391                 return -ENOTSUP;
3392         }
3393         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3394
3395         /*
3396          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3397          *     "RSS enabling cannot be done dynamically while it must be
3398          *      preceded by a software reset"
3399          * Before changing anything, first check that the update RSS operation
3400          * does not attempt to disable RSS, if RSS was enabled at
3401          * initialization time, or does not attempt to enable RSS, if RSS was
3402          * disabled at initialization time.
3403          */
3404         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3405         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3406         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3407                 if (rss_hf != 0) /* Enable RSS */
3408                         return -(EINVAL);
3409                 return 0; /* Nothing to do */
3410         }
3411         /* RSS enabled */
3412         if (rss_hf == 0) /* Disable RSS */
3413                 return -(EINVAL);
3414         ixgbe_hw_rss_hash_set(hw, rss_conf);
3415         return 0;
3416 }
3417
3418 int
3419 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3420                             struct rte_eth_rss_conf *rss_conf)
3421 {
3422         struct ixgbe_hw *hw;
3423         uint8_t *hash_key;
3424         uint32_t mrqc;
3425         uint32_t rss_key;
3426         uint64_t rss_hf;
3427         uint16_t i;
3428         uint32_t mrqc_reg;
3429         uint32_t rssrk_reg;
3430
3431         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3432         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3433         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3434         hash_key = rss_conf->rss_key;
3435         if (hash_key != NULL) {
3436                 /* Return RSS hash key */
3437                 for (i = 0; i < 10; i++) {
3438                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3439                         hash_key[(i * 4)] = rss_key & 0x000000FF;
3440                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3441                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3442                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3443                 }
3444         }
3445
3446         /* Get RSS functions configured in MRQC register */
3447         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3448         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3449                 rss_conf->rss_hf = 0;
3450                 return 0;
3451         }
3452         rss_hf = 0;
3453         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3454                 rss_hf |= ETH_RSS_IPV4;
3455         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3456                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3457         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3458                 rss_hf |= ETH_RSS_IPV6;
3459         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3460                 rss_hf |= ETH_RSS_IPV6_EX;
3461         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3462                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3463         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3464                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3465         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3466                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3467         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3468                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3469         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3470                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3471         rss_conf->rss_hf = rss_hf;
3472         return 0;
3473 }
3474
3475 static void
3476 ixgbe_rss_configure(struct rte_eth_dev *dev)
3477 {
3478         struct rte_eth_rss_conf rss_conf;
3479         struct ixgbe_adapter *adapter;
3480         struct ixgbe_hw *hw;
3481         uint32_t reta;
3482         uint16_t i;
3483         uint16_t j;
3484         uint16_t sp_reta_size;
3485         uint32_t reta_reg;
3486
3487         PMD_INIT_FUNC_TRACE();
3488         adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3489         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3490
3491         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3492
3493         /*
3494          * Fill in redirection table
3495          * The byte-swap is needed because NIC registers are in
3496          * little-endian order.
3497          */
3498         if (adapter->rss_reta_updated == 0) {
3499                 reta = 0;
3500                 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3501                         reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3502
3503                         if (j == dev->data->nb_rx_queues)
3504                                 j = 0;
3505                         reta = (reta << 8) | j;
3506                         if ((i & 3) == 3)
3507                                 IXGBE_WRITE_REG(hw, reta_reg,
3508                                                 rte_bswap32(reta));
3509                 }
3510         }
3511
3512         /*
3513          * Configure the RSS key and the RSS protocols used to compute
3514          * the RSS hash of input packets.
3515          */
3516         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3517         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3518                 ixgbe_rss_disable(dev);
3519                 return;
3520         }
3521         if (rss_conf.rss_key == NULL)
3522                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3523         ixgbe_hw_rss_hash_set(hw, &rss_conf);
3524 }
3525
3526 #define NUM_VFTA_REGISTERS 128
3527 #define NIC_RX_BUFFER_SIZE 0x200
3528 #define X550_RX_BUFFER_SIZE 0x180
3529
3530 static void
3531 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3532 {
3533         struct rte_eth_vmdq_dcb_conf *cfg;
3534         struct ixgbe_hw *hw;
3535         enum rte_eth_nb_pools num_pools;
3536         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3537         uint16_t pbsize;
3538         uint8_t nb_tcs; /* number of traffic classes */
3539         int i;
3540
3541         PMD_INIT_FUNC_TRACE();
3542         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3543         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3544         num_pools = cfg->nb_queue_pools;
3545         /* Check we have a valid number of pools */
3546         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3547                 ixgbe_rss_disable(dev);
3548                 return;
3549         }
3550         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3551         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3552
3553         /*
3554          * RXPBSIZE
3555          * split rx buffer up into sections, each for 1 traffic class
3556          */
3557         switch (hw->mac.type) {
3558         case ixgbe_mac_X550:
3559         case ixgbe_mac_X550EM_x:
3560         case ixgbe_mac_X550EM_a:
3561                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3562                 break;
3563         default:
3564                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3565                 break;
3566         }
3567         for (i = 0; i < nb_tcs; i++) {
3568                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3569
3570                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3571                 /* clear 10 bits. */
3572                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3573                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3574         }
3575         /* zero alloc all unused TCs */
3576         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3577                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3578
3579                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3580                 /* clear 10 bits. */
3581                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3582         }
3583
3584         /* MRQC: enable vmdq and dcb */
3585         mrqc = (num_pools == ETH_16_POOLS) ?
3586                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3587         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3588
3589         /* PFVTCTL: turn on virtualisation and set the default pool */
3590         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3591         if (cfg->enable_default_pool) {
3592                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3593         } else {
3594                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3595         }
3596
3597         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3598
3599         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3600         queue_mapping = 0;
3601         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3602                 /*
3603                  * mapping is done with 3 bits per priority,
3604                  * so shift by i*3 each time
3605                  */
3606                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3607
3608         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3609
3610         /* RTRPCS: DCB related */
3611         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3612
3613         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3614         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3615         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3616         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3617
3618         /* VFTA - enable all vlan filters */
3619         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3620                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3621         }
3622
3623         /* VFRE: pool enabling for receive - 16 or 32 */
3624         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3625                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3626
3627         /*
3628          * MPSAR - allow pools to read specific mac addresses
3629          * In this case, all pools should be able to read from mac addr 0
3630          */
3631         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3632         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3633
3634         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3635         for (i = 0; i < cfg->nb_pool_maps; i++) {
3636                 /* set vlan id in VF register and set the valid bit */
3637                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3638                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
3639                 /*
3640                  * Put the allowed pools in VFB reg. As we only have 16 or 32
3641                  * pools, we only need to use the first half of the register
3642                  * i.e. bits 0-31
3643                  */
3644                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3645         }
3646 }
3647
3648 /**
3649  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3650  * @dev: pointer to eth_dev structure
3651  * @dcb_config: pointer to ixgbe_dcb_config structure
3652  */
3653 static void
3654 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3655                        struct ixgbe_dcb_config *dcb_config)
3656 {
3657         uint32_t reg;
3658         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3659
3660         PMD_INIT_FUNC_TRACE();
3661         if (hw->mac.type != ixgbe_mac_82598EB) {
3662                 /* Disable the Tx desc arbiter so that MTQC can be changed */
3663                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3664                 reg |= IXGBE_RTTDCS_ARBDIS;
3665                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3666
3667                 /* Enable DCB for Tx with 8 TCs */
3668                 if (dcb_config->num_tcs.pg_tcs == 8) {
3669                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3670                 } else {
3671                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3672                 }
3673                 if (dcb_config->vt_mode)
3674                         reg |= IXGBE_MTQC_VT_ENA;
3675                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3676
3677                 /* Enable the Tx desc arbiter */
3678                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3679                 reg &= ~IXGBE_RTTDCS_ARBDIS;
3680                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3681
3682                 /* Enable Security TX Buffer IFG for DCB */
3683                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3684                 reg |= IXGBE_SECTX_DCB;
3685                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3686         }
3687 }
3688
3689 /**
3690  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3691  * @dev: pointer to rte_eth_dev structure
3692  * @dcb_config: pointer to ixgbe_dcb_config structure
3693  */
3694 static void
3695 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3696                         struct ixgbe_dcb_config *dcb_config)
3697 {
3698         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3699                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3700         struct ixgbe_hw *hw =
3701                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3702
3703         PMD_INIT_FUNC_TRACE();
3704         if (hw->mac.type != ixgbe_mac_82598EB)
3705                 /*PF VF Transmit Enable*/
3706                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3707                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3708
3709         /*Configure general DCB TX parameters*/
3710         ixgbe_dcb_tx_hw_config(dev, dcb_config);
3711 }
3712
3713 static void
3714 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3715                         struct ixgbe_dcb_config *dcb_config)
3716 {
3717         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3718                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3719         struct ixgbe_dcb_tc_config *tc;
3720         uint8_t i, j;
3721
3722         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3723         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3724                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3725                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3726         } else {
3727                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3728                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3729         }
3730
3731         /* Initialize User Priority to Traffic Class mapping */
3732         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3733                 tc = &dcb_config->tc_config[j];
3734                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3735         }
3736
3737         /* User Priority to Traffic Class mapping */
3738         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3739                 j = vmdq_rx_conf->dcb_tc[i];
3740                 tc = &dcb_config->tc_config[j];
3741                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3742                                                 (uint8_t)(1 << i);
3743         }
3744 }
3745
3746 static void
3747 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3748                         struct ixgbe_dcb_config *dcb_config)
3749 {
3750         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3751                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3752         struct ixgbe_dcb_tc_config *tc;
3753         uint8_t i, j;
3754
3755         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3756         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3757                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3758                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3759         } else {
3760                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3761                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3762         }
3763
3764         /* Initialize User Priority to Traffic Class mapping */
3765         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3766                 tc = &dcb_config->tc_config[j];
3767                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3768         }
3769
3770         /* User Priority to Traffic Class mapping */
3771         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3772                 j = vmdq_tx_conf->dcb_tc[i];
3773                 tc = &dcb_config->tc_config[j];
3774                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3775                                                 (uint8_t)(1 << i);
3776         }
3777 }
3778
3779 static void
3780 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3781                 struct ixgbe_dcb_config *dcb_config)
3782 {
3783         struct rte_eth_dcb_rx_conf *rx_conf =
3784                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3785         struct ixgbe_dcb_tc_config *tc;
3786         uint8_t i, j;
3787
3788         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3789         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3790
3791         /* Initialize User Priority to Traffic Class mapping */
3792         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3793                 tc = &dcb_config->tc_config[j];
3794                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3795         }
3796
3797         /* User Priority to Traffic Class mapping */
3798         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3799                 j = rx_conf->dcb_tc[i];
3800                 tc = &dcb_config->tc_config[j];
3801                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3802                                                 (uint8_t)(1 << i);
3803         }
3804 }
3805
3806 static void
3807 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3808                 struct ixgbe_dcb_config *dcb_config)
3809 {
3810         struct rte_eth_dcb_tx_conf *tx_conf =
3811                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3812         struct ixgbe_dcb_tc_config *tc;
3813         uint8_t i, j;
3814
3815         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3816         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3817
3818         /* Initialize User Priority to Traffic Class mapping */
3819         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3820                 tc = &dcb_config->tc_config[j];
3821                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3822         }
3823
3824         /* User Priority to Traffic Class mapping */
3825         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3826                 j = tx_conf->dcb_tc[i];
3827                 tc = &dcb_config->tc_config[j];
3828                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3829                                                 (uint8_t)(1 << i);
3830         }
3831 }
3832
3833 /**
3834  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3835  * @dev: pointer to eth_dev structure
3836  * @dcb_config: pointer to ixgbe_dcb_config structure
3837  */
3838 static void
3839 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3840                        struct ixgbe_dcb_config *dcb_config)
3841 {
3842         uint32_t reg;
3843         uint32_t vlanctrl;
3844         uint8_t i;
3845         uint32_t q;
3846         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3847
3848         PMD_INIT_FUNC_TRACE();
3849         /*
3850          * Disable the arbiter before changing parameters
3851          * (always enable recycle mode; WSP)
3852          */
3853         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3854         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3855
3856         if (hw->mac.type != ixgbe_mac_82598EB) {
3857                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3858                 if (dcb_config->num_tcs.pg_tcs == 4) {
3859                         if (dcb_config->vt_mode)
3860                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3861                                         IXGBE_MRQC_VMDQRT4TCEN;
3862                         else {
3863                                 /* no matter the mode is DCB or DCB_RSS, just
3864                                  * set the MRQE to RSSXTCEN. RSS is controlled
3865                                  * by RSS_FIELD
3866                                  */
3867                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3868                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3869                                         IXGBE_MRQC_RTRSS4TCEN;
3870                         }
3871                 }
3872                 if (dcb_config->num_tcs.pg_tcs == 8) {
3873                         if (dcb_config->vt_mode)
3874                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3875                                         IXGBE_MRQC_VMDQRT8TCEN;
3876                         else {
3877                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3878                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3879                                         IXGBE_MRQC_RTRSS8TCEN;
3880                         }
3881                 }
3882
3883                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3884
3885                 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3886                         /* Disable drop for all queues in VMDQ mode*/
3887                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3888                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3889                                                 (IXGBE_QDE_WRITE |
3890                                                  (q << IXGBE_QDE_IDX_SHIFT)));
3891                 } else {
3892                         /* Enable drop for all queues in SRIOV mode */
3893                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3894                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3895                                                 (IXGBE_QDE_WRITE |
3896                                                  (q << IXGBE_QDE_IDX_SHIFT) |
3897                                                  IXGBE_QDE_ENABLE));
3898                 }
3899         }
3900
3901         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3902         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3903         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3904         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3905
3906         /* VFTA - enable all vlan filters */
3907         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3908                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3909         }
3910
3911         /*
3912          * Configure Rx packet plane (recycle mode; WSP) and
3913          * enable arbiter
3914          */
3915         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3916         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3917 }
3918
3919 static void
3920 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3921                         uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3922 {
3923         switch (hw->mac.type) {
3924         case ixgbe_mac_82598EB:
3925                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3926                 break;
3927         case ixgbe_mac_82599EB:
3928         case ixgbe_mac_X540:
3929         case ixgbe_mac_X550:
3930         case ixgbe_mac_X550EM_x:
3931         case ixgbe_mac_X550EM_a:
3932                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3933                                                   tsa, map);
3934                 break;
3935         default:
3936                 break;
3937         }
3938 }
3939
3940 static void
3941 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3942                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3943 {
3944         switch (hw->mac.type) {
3945         case ixgbe_mac_82598EB:
3946                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3947                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3948                 break;
3949         case ixgbe_mac_82599EB:
3950         case ixgbe_mac_X540:
3951         case ixgbe_mac_X550:
3952         case ixgbe_mac_X550EM_x:
3953         case ixgbe_mac_X550EM_a:
3954                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3955                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3956                 break;
3957         default:
3958                 break;
3959         }
3960 }
3961
3962 #define DCB_RX_CONFIG  1
3963 #define DCB_TX_CONFIG  1
3964 #define DCB_TX_PB      1024
3965 /**
3966  * ixgbe_dcb_hw_configure - Enable DCB and configure
3967  * general DCB in VT mode and non-VT mode parameters
3968  * @dev: pointer to rte_eth_dev structure
3969  * @dcb_config: pointer to ixgbe_dcb_config structure
3970  */
3971 static int
3972 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3973                         struct ixgbe_dcb_config *dcb_config)
3974 {
3975         int     ret = 0;
3976         uint8_t i, pfc_en, nb_tcs;
3977         uint16_t pbsize, rx_buffer_size;
3978         uint8_t config_dcb_rx = 0;
3979         uint8_t config_dcb_tx = 0;
3980         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3983         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3984         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3985         struct ixgbe_dcb_tc_config *tc;
3986         uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3987                 RTE_ETHER_CRC_LEN;
3988         struct ixgbe_hw *hw =
3989                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3990         struct ixgbe_bw_conf *bw_conf =
3991                 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3992
3993         switch (dev->data->dev_conf.rxmode.mq_mode) {
3994         case ETH_MQ_RX_VMDQ_DCB:
3995                 dcb_config->vt_mode = true;
3996                 if (hw->mac.type != ixgbe_mac_82598EB) {
3997                         config_dcb_rx = DCB_RX_CONFIG;
3998                         /*
3999                          *get dcb and VT rx configuration parameters
4000                          *from rte_eth_conf
4001                          */
4002                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4003                         /*Configure general VMDQ and DCB RX parameters*/
4004                         ixgbe_vmdq_dcb_configure(dev);
4005                 }
4006                 break;
4007         case ETH_MQ_RX_DCB:
4008         case ETH_MQ_RX_DCB_RSS:
4009                 dcb_config->vt_mode = false;
4010                 config_dcb_rx = DCB_RX_CONFIG;
4011                 /* Get dcb TX configuration parameters from rte_eth_conf */
4012                 ixgbe_dcb_rx_config(dev, dcb_config);
4013                 /*Configure general DCB RX parameters*/
4014                 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4015                 break;
4016         default:
4017                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4018                 break;
4019         }
4020         switch (dev->data->dev_conf.txmode.mq_mode) {
4021         case ETH_MQ_TX_VMDQ_DCB:
4022                 dcb_config->vt_mode = true;
4023                 config_dcb_tx = DCB_TX_CONFIG;
4024                 /* get DCB and VT TX configuration parameters
4025                  * from rte_eth_conf
4026                  */
4027                 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4028                 /*Configure general VMDQ and DCB TX parameters*/
4029                 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4030                 break;
4031
4032         case ETH_MQ_TX_DCB:
4033                 dcb_config->vt_mode = false;
4034                 config_dcb_tx = DCB_TX_CONFIG;
4035                 /*get DCB TX configuration parameters from rte_eth_conf*/
4036                 ixgbe_dcb_tx_config(dev, dcb_config);
4037                 /*Configure general DCB TX parameters*/
4038                 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4039                 break;
4040         default:
4041                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4042                 break;
4043         }
4044
4045         nb_tcs = dcb_config->num_tcs.pfc_tcs;
4046         /* Unpack map */
4047         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4048         if (nb_tcs == ETH_4_TCS) {
4049                 /* Avoid un-configured priority mapping to TC0 */
4050                 uint8_t j = 4;
4051                 uint8_t mask = 0xFF;
4052
4053                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4054                         mask = (uint8_t)(mask & (~(1 << map[i])));
4055                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4056                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4057                                 map[j++] = i;
4058                         mask >>= 1;
4059                 }
4060                 /* Re-configure 4 TCs BW */
4061                 for (i = 0; i < nb_tcs; i++) {
4062                         tc = &dcb_config->tc_config[i];
4063                         if (bw_conf->tc_num != nb_tcs)
4064                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4065                                         (uint8_t)(100 / nb_tcs);
4066                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4067                                                 (uint8_t)(100 / nb_tcs);
4068                 }
4069                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4070                         tc = &dcb_config->tc_config[i];
4071                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4072                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4073                 }
4074         } else {
4075                 /* Re-configure 8 TCs BW */
4076                 for (i = 0; i < nb_tcs; i++) {
4077                         tc = &dcb_config->tc_config[i];
4078                         if (bw_conf->tc_num != nb_tcs)
4079                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4080                                         (uint8_t)(100 / nb_tcs + (i & 1));
4081                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4082                                 (uint8_t)(100 / nb_tcs + (i & 1));
4083                 }
4084         }
4085
4086         switch (hw->mac.type) {
4087         case ixgbe_mac_X550:
4088         case ixgbe_mac_X550EM_x:
4089         case ixgbe_mac_X550EM_a:
4090                 rx_buffer_size = X550_RX_BUFFER_SIZE;
4091                 break;
4092         default:
4093                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4094                 break;
4095         }
4096
4097         if (config_dcb_rx) {
4098                 /* Set RX buffer size */
4099                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4100                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4101
4102                 for (i = 0; i < nb_tcs; i++) {
4103                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4104                 }
4105                 /* zero alloc all unused TCs */
4106                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4107                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4108                 }
4109         }
4110         if (config_dcb_tx) {
4111                 /* Only support an equally distributed
4112                  *  Tx packet buffer strategy.
4113                  */
4114                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4115                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4116
4117                 for (i = 0; i < nb_tcs; i++) {
4118                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4119                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4120                 }
4121                 /* Clear unused TCs, if any, to zero buffer size*/
4122                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4123                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4124                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4125                 }
4126         }
4127
4128         /*Calculates traffic class credits*/
4129         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4130                                 IXGBE_DCB_TX_CONFIG);
4131         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4132                                 IXGBE_DCB_RX_CONFIG);
4133
4134         if (config_dcb_rx) {
4135                 /* Unpack CEE standard containers */
4136                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4137                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4138                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4139                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4140                 /* Configure PG(ETS) RX */
4141                 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4142         }
4143
4144         if (config_dcb_tx) {
4145                 /* Unpack CEE standard containers */
4146                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4147                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4148                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4149                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4150                 /* Configure PG(ETS) TX */
4151                 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4152         }
4153
4154         /*Configure queue statistics registers*/
4155         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4156
4157         /* Check if the PFC is supported */
4158         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4159                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4160                 for (i = 0; i < nb_tcs; i++) {
4161                         /*
4162                         * If the TC count is 8,and the default high_water is 48,
4163                         * the low_water is 16 as default.
4164                         */
4165                         hw->fc.high_water[i] = (pbsize * 3) / 4;
4166                         hw->fc.low_water[i] = pbsize / 4;
4167                         /* Enable pfc for this TC */
4168                         tc = &dcb_config->tc_config[i];
4169                         tc->pfc = ixgbe_dcb_pfc_enabled;
4170                 }
4171                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4172                 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4173                         pfc_en &= 0x0F;
4174                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4175         }
4176
4177         return ret;
4178 }
4179
4180 /**
4181  * ixgbe_configure_dcb - Configure DCB  Hardware
4182  * @dev: pointer to rte_eth_dev
4183  */
4184 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4185 {
4186         struct ixgbe_dcb_config *dcb_cfg =
4187                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4188         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4189
4190         PMD_INIT_FUNC_TRACE();
4191
4192         /* check support mq_mode for DCB */
4193         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4194             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4195             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4196                 return;
4197
4198         if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4199                 return;
4200
4201         /** Configure DCB hardware **/
4202         ixgbe_dcb_hw_configure(dev, dcb_cfg);
4203 }
4204
4205 /*
4206  * VMDq only support for 10 GbE NIC.
4207  */
4208 static void
4209 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4210 {
4211         struct rte_eth_vmdq_rx_conf *cfg;
4212         struct ixgbe_hw *hw;
4213         enum rte_eth_nb_pools num_pools;
4214         uint32_t mrqc, vt_ctl, vlanctrl;
4215         uint32_t vmolr = 0;
4216         int i;
4217
4218         PMD_INIT_FUNC_TRACE();
4219         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4220         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4221         num_pools = cfg->nb_queue_pools;
4222
4223         ixgbe_rss_disable(dev);
4224
4225         /* MRQC: enable vmdq */
4226         mrqc = IXGBE_MRQC_VMDQEN;
4227         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4228
4229         /* PFVTCTL: turn on virtualisation and set the default pool */
4230         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4231         if (cfg->enable_default_pool)
4232                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4233         else
4234                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4235
4236         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4237
4238         for (i = 0; i < (int)num_pools; i++) {
4239                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4240                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4241         }
4242
4243         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4244         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4245         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4246         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4247
4248         /* VFTA - enable all vlan filters */
4249         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4250                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4251
4252         /* VFRE: pool enabling for receive - 64 */
4253         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4254         if (num_pools == ETH_64_POOLS)
4255                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4256
4257         /*
4258          * MPSAR - allow pools to read specific mac addresses
4259          * In this case, all pools should be able to read from mac addr 0
4260          */
4261         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4262         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4263
4264         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4265         for (i = 0; i < cfg->nb_pool_maps; i++) {
4266                 /* set vlan id in VF register and set the valid bit */
4267                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4268                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4269                 /*
4270                  * Put the allowed pools in VFB reg. As we only have 16 or 64
4271                  * pools, we only need to use the first half of the register
4272                  * i.e. bits 0-31
4273                  */
4274                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4275                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4276                                         (cfg->pool_map[i].pools & UINT32_MAX));
4277                 else
4278                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4279                                         ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4280
4281         }
4282
4283         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4284         if (cfg->enable_loop_back) {
4285                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4286                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4287                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4288         }
4289
4290         IXGBE_WRITE_FLUSH(hw);
4291 }
4292
4293 /*
4294  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4295  * @hw: pointer to hardware structure
4296  */
4297 static void
4298 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4299 {
4300         uint32_t reg;
4301         uint32_t q;
4302
4303         PMD_INIT_FUNC_TRACE();
4304         /*PF VF Transmit Enable*/
4305         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4306         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4307
4308         /* Disable the Tx desc arbiter so that MTQC can be changed */
4309         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4310         reg |= IXGBE_RTTDCS_ARBDIS;
4311         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4312
4313         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4314         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4315
4316         /* Disable drop for all queues */
4317         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4318                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4319                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4320
4321         /* Enable the Tx desc arbiter */
4322         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4323         reg &= ~IXGBE_RTTDCS_ARBDIS;
4324         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4325
4326         IXGBE_WRITE_FLUSH(hw);
4327 }
4328
4329 static int __attribute__((cold))
4330 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4331 {
4332         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4333         uint64_t dma_addr;
4334         unsigned int i;
4335
4336         /* Initialize software ring entries */
4337         for (i = 0; i < rxq->nb_rx_desc; i++) {
4338                 volatile union ixgbe_adv_rx_desc *rxd;
4339                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4340
4341                 if (mbuf == NULL) {
4342                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4343                                      (unsigned) rxq->queue_id);
4344                         return -ENOMEM;
4345                 }
4346
4347                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4348                 mbuf->port = rxq->port_id;
4349
4350                 dma_addr =
4351                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4352                 rxd = &rxq->rx_ring[i];
4353                 rxd->read.hdr_addr = 0;
4354                 rxd->read.pkt_addr = dma_addr;
4355                 rxe[i].mbuf = mbuf;
4356         }
4357
4358         return 0;
4359 }
4360
4361 static int
4362 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4363 {
4364         struct ixgbe_hw *hw;
4365         uint32_t mrqc;
4366
4367         ixgbe_rss_configure(dev);
4368
4369         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4370
4371         /* MRQC: enable VF RSS */
4372         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4373         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4374         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4375         case ETH_64_POOLS:
4376                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4377                 break;
4378
4379         case ETH_32_POOLS:
4380                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4381                 break;
4382
4383         default:
4384                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4385                 return -EINVAL;
4386         }
4387
4388         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4389
4390         return 0;
4391 }
4392
4393 static int
4394 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4395 {
4396         struct ixgbe_hw *hw =
4397                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398
4399         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4400         case ETH_64_POOLS:
4401                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4402                         IXGBE_MRQC_VMDQEN);
4403                 break;
4404
4405         case ETH_32_POOLS:
4406                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4407                         IXGBE_MRQC_VMDQRT4TCEN);
4408                 break;
4409
4410         case ETH_16_POOLS:
4411                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4412                         IXGBE_MRQC_VMDQRT8TCEN);
4413                 break;
4414         default:
4415                 PMD_INIT_LOG(ERR,
4416                         "invalid pool number in IOV mode");
4417                 break;
4418         }
4419         return 0;
4420 }
4421
4422 static int
4423 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4424 {
4425         struct ixgbe_hw *hw =
4426                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427
4428         if (hw->mac.type == ixgbe_mac_82598EB)
4429                 return 0;
4430
4431         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4432                 /*
4433                  * SRIOV inactive scheme
4434                  * any DCB/RSS w/o VMDq multi-queue setting
4435                  */
4436                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4437                 case ETH_MQ_RX_RSS:
4438                 case ETH_MQ_RX_DCB_RSS:
4439                 case ETH_MQ_RX_VMDQ_RSS:
4440                         ixgbe_rss_configure(dev);
4441                         break;
4442
4443                 case ETH_MQ_RX_VMDQ_DCB:
4444                         ixgbe_vmdq_dcb_configure(dev);
4445                         break;
4446
4447                 case ETH_MQ_RX_VMDQ_ONLY:
4448                         ixgbe_vmdq_rx_hw_configure(dev);
4449                         break;
4450
4451                 case ETH_MQ_RX_NONE:
4452                 default:
4453                         /* if mq_mode is none, disable rss mode.*/
4454                         ixgbe_rss_disable(dev);
4455                         break;
4456                 }
4457         } else {
4458                 /* SRIOV active scheme
4459                  * Support RSS together with SRIOV.
4460                  */
4461                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4462                 case ETH_MQ_RX_RSS:
4463                 case ETH_MQ_RX_VMDQ_RSS:
4464                         ixgbe_config_vf_rss(dev);
4465                         break;
4466                 case ETH_MQ_RX_VMDQ_DCB:
4467                 case ETH_MQ_RX_DCB:
4468                 /* In SRIOV, the configuration is the same as VMDq case */
4469                         ixgbe_vmdq_dcb_configure(dev);
4470                         break;
4471                 /* DCB/RSS together with SRIOV is not supported */
4472                 case ETH_MQ_RX_VMDQ_DCB_RSS:
4473                 case ETH_MQ_RX_DCB_RSS:
4474                         PMD_INIT_LOG(ERR,
4475                                 "Could not support DCB/RSS with VMDq & SRIOV");
4476                         return -1;
4477                 default:
4478                         ixgbe_config_vf_default(dev);
4479                         break;
4480                 }
4481         }
4482
4483         return 0;
4484 }
4485
4486 static int
4487 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4488 {
4489         struct ixgbe_hw *hw =
4490                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4491         uint32_t mtqc;
4492         uint32_t rttdcs;
4493
4494         if (hw->mac.type == ixgbe_mac_82598EB)
4495                 return 0;
4496
4497         /* disable arbiter before setting MTQC */
4498         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4499         rttdcs |= IXGBE_RTTDCS_ARBDIS;
4500         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4501
4502         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4503                 /*
4504                  * SRIOV inactive scheme
4505                  * any DCB w/o VMDq multi-queue setting
4506                  */
4507                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4508                         ixgbe_vmdq_tx_hw_configure(hw);
4509                 else {
4510                         mtqc = IXGBE_MTQC_64Q_1PB;
4511                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4512                 }
4513         } else {
4514                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4515
4516                 /*
4517                  * SRIOV active scheme
4518                  * FIXME if support DCB together with VMDq & SRIOV
4519                  */
4520                 case ETH_64_POOLS:
4521                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4522                         break;
4523                 case ETH_32_POOLS:
4524                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4525                         break;
4526                 case ETH_16_POOLS:
4527                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4528                                 IXGBE_MTQC_8TC_8TQ;
4529                         break;
4530                 default:
4531                         mtqc = IXGBE_MTQC_64Q_1PB;
4532                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4533                 }
4534                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4535         }
4536
4537         /* re-enable arbiter */
4538         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4539         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4540
4541         return 0;
4542 }
4543
4544 /**
4545  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4546  *
4547  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4548  * spec rev. 3.0 chapter 8.2.3.8.13.
4549  *
4550  * @pool Memory pool of the Rx queue
4551  */
4552 static inline uint32_t
4553 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4554 {
4555         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4556
4557         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4558         uint16_t maxdesc =
4559                 RTE_IPV4_MAX_PKT_LEN /
4560                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4561
4562         if (maxdesc >= 16)
4563                 return IXGBE_RSCCTL_MAXDESC_16;
4564         else if (maxdesc >= 8)
4565                 return IXGBE_RSCCTL_MAXDESC_8;
4566         else if (maxdesc >= 4)
4567                 return IXGBE_RSCCTL_MAXDESC_4;
4568         else
4569                 return IXGBE_RSCCTL_MAXDESC_1;
4570 }
4571
4572 /**
4573  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4574  * interrupt
4575  *
4576  * (Taken from FreeBSD tree)
4577  * (yes this is all very magic and confusing :)
4578  *
4579  * @dev port handle
4580  * @entry the register array entry
4581  * @vector the MSIX vector for this queue
4582  * @type RX/TX/MISC
4583  */
4584 static void
4585 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4586 {
4587         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4588         u32 ivar, index;
4589
4590         vector |= IXGBE_IVAR_ALLOC_VAL;
4591
4592         switch (hw->mac.type) {
4593
4594         case ixgbe_mac_82598EB:
4595                 if (type == -1)
4596                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4597                 else
4598                         entry += (type * 64);
4599                 index = (entry >> 2) & 0x1F;
4600                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4601                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4602                 ivar |= (vector << (8 * (entry & 0x3)));
4603                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4604                 break;
4605
4606         case ixgbe_mac_82599EB:
4607         case ixgbe_mac_X540:
4608                 if (type == -1) { /* MISC IVAR */
4609                         index = (entry & 1) * 8;
4610                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4611                         ivar &= ~(0xFF << index);
4612                         ivar |= (vector << index);
4613                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4614                 } else {        /* RX/TX IVARS */
4615                         index = (16 * (entry & 1)) + (8 * type);
4616                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4617                         ivar &= ~(0xFF << index);
4618                         ivar |= (vector << index);
4619                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4620                 }
4621
4622                 break;
4623
4624         default:
4625                 break;
4626         }
4627 }
4628
4629 void __attribute__((cold))
4630 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4631 {
4632         uint16_t i, rx_using_sse;
4633         struct ixgbe_adapter *adapter =
4634                 (struct ixgbe_adapter *)dev->data->dev_private;
4635
4636         /*
4637          * In order to allow Vector Rx there are a few configuration
4638          * conditions to be met and Rx Bulk Allocation should be allowed.
4639          */
4640         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4641             !adapter->rx_bulk_alloc_allowed) {
4642                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4643                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
4644                                     "not enabled",
4645                              dev->data->port_id);
4646
4647                 adapter->rx_vec_allowed = false;
4648         }
4649
4650         /*
4651          * Initialize the appropriate LRO callback.
4652          *
4653          * If all queues satisfy the bulk allocation preconditions
4654          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4655          * Otherwise use a single allocation version.
4656          */
4657         if (dev->data->lro) {
4658                 if (adapter->rx_bulk_alloc_allowed) {
4659                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4660                                            "allocation version");
4661                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4662                 } else {
4663                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4664                                            "allocation version");
4665                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4666                 }
4667         } else if (dev->data->scattered_rx) {
4668                 /*
4669                  * Set the non-LRO scattered callback: there are Vector and
4670                  * single allocation versions.
4671                  */
4672                 if (adapter->rx_vec_allowed) {
4673                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4674                                             "callback (port=%d).",
4675                                      dev->data->port_id);
4676
4677                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4678                 } else if (adapter->rx_bulk_alloc_allowed) {
4679                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4680                                            "allocation callback (port=%d).",
4681                                      dev->data->port_id);
4682                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4683                 } else {
4684                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4685                                             "single allocation) "
4686                                             "Scattered Rx callback "
4687                                             "(port=%d).",
4688                                      dev->data->port_id);
4689
4690                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4691                 }
4692         /*
4693          * Below we set "simple" callbacks according to port/queues parameters.
4694          * If parameters allow we are going to choose between the following
4695          * callbacks:
4696          *    - Vector
4697          *    - Bulk Allocation
4698          *    - Single buffer allocation (the simplest one)
4699          */
4700         } else if (adapter->rx_vec_allowed) {
4701                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4702                                     "burst size no less than %d (port=%d).",
4703                              RTE_IXGBE_DESCS_PER_LOOP,
4704                              dev->data->port_id);
4705
4706                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4707         } else if (adapter->rx_bulk_alloc_allowed) {
4708                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4709                                     "satisfied. Rx Burst Bulk Alloc function "
4710                                     "will be used on port=%d.",
4711                              dev->data->port_id);
4712
4713                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4714         } else {
4715                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4716                                     "satisfied, or Scattered Rx is requested "
4717                                     "(port=%d).",
4718                              dev->data->port_id);
4719
4720                 dev->rx_pkt_burst = ixgbe_recv_pkts;
4721         }
4722
4723         /* Propagate information about RX function choice through all queues. */
4724
4725         rx_using_sse =
4726                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4727                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4728
4729         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4730                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4731
4732                 rxq->rx_using_sse = rx_using_sse;
4733 #ifdef RTE_LIBRTE_SECURITY
4734                 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4735                                 DEV_RX_OFFLOAD_SECURITY);
4736 #endif
4737         }
4738 }
4739
4740 /**
4741  * ixgbe_set_rsc - configure RSC related port HW registers
4742  *
4743  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4744  * of 82599 Spec (x540 configuration is virtually the same).
4745  *
4746  * @dev port handle
4747  *
4748  * Returns 0 in case of success or a non-zero error code
4749  */
4750 static int
4751 ixgbe_set_rsc(struct rte_eth_dev *dev)
4752 {
4753         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4754         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4755         struct rte_eth_dev_info dev_info = { 0 };
4756         bool rsc_capable = false;
4757         uint16_t i;
4758         uint32_t rdrxctl;
4759         uint32_t rfctl;
4760
4761         /* Sanity check */
4762         dev->dev_ops->dev_infos_get(dev, &dev_info);
4763         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4764                 rsc_capable = true;
4765
4766         if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4767                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4768                                    "support it");
4769                 return -EINVAL;
4770         }
4771
4772         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4773
4774         if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4775              (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4776                 /*
4777                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4778                  * 3.0 RSC configuration requires HW CRC stripping being
4779                  * enabled. If user requested both HW CRC stripping off
4780                  * and RSC on - return an error.
4781                  */
4782                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4783                                     "is disabled");
4784                 return -EINVAL;
4785         }
4786
4787         /* RFCTL configuration  */
4788         rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4789         if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4790                 /*
4791                  * Since NFS packets coalescing is not supported - clear
4792                  * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4793                  * enabled.
4794                  */
4795                 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4796                            IXGBE_RFCTL_NFSR_DIS);
4797         else
4798                 rfctl |= IXGBE_RFCTL_RSC_DIS;
4799         IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4800
4801         /* If LRO hasn't been requested - we are done here. */
4802         if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4803                 return 0;
4804
4805         /* Set RDRXCTL.RSCACKC bit */
4806         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4807         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4808         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4809
4810         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4811         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4812                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4813                 uint32_t srrctl =
4814                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4815                 uint32_t rscctl =
4816                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4817                 uint32_t psrtype =
4818                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4819                 uint32_t eitr =
4820                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4821
4822                 /*
4823                  * ixgbe PMD doesn't support header-split at the moment.
4824                  *
4825                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4826                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4827                  * should be configured even if header split is not
4828                  * enabled. We will configure it 128 bytes following the
4829                  * recommendation in the spec.
4830                  */
4831                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4832                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4833                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4834
4835                 /*
4836                  * TODO: Consider setting the Receive Descriptor Minimum
4837                  * Threshold Size for an RSC case. This is not an obviously
4838                  * beneficiary option but the one worth considering...
4839                  */
4840
4841                 rscctl |= IXGBE_RSCCTL_RSCEN;
4842                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4843                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4844
4845                 /*
4846                  * RSC: Set ITR interval corresponding to 2K ints/s.
4847                  *
4848                  * Full-sized RSC aggregations for a 10Gb/s link will
4849                  * arrive at about 20K aggregation/s rate.
4850                  *
4851                  * 2K inst/s rate will make only 10% of the
4852                  * aggregations to be closed due to the interrupt timer
4853                  * expiration for a streaming at wire-speed case.
4854                  *
4855                  * For a sparse streaming case this setting will yield
4856                  * at most 500us latency for a single RSC aggregation.
4857                  */
4858                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4859                 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4860                 eitr |= IXGBE_EITR_CNT_WDIS;
4861
4862                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4863                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4864                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4865                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4866
4867                 /*
4868                  * RSC requires the mapping of the queue to the
4869                  * interrupt vector.
4870                  */
4871                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4872         }
4873
4874         dev->data->lro = 1;
4875
4876         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4877
4878         return 0;
4879 }
4880
4881 /*
4882  * Initializes Receive Unit.
4883  */
4884 int __attribute__((cold))
4885 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4886 {
4887         struct ixgbe_hw     *hw;
4888         struct ixgbe_rx_queue *rxq;
4889         uint64_t bus_addr;
4890         uint32_t rxctrl;
4891         uint32_t fctrl;
4892         uint32_t hlreg0;
4893         uint32_t maxfrs;
4894         uint32_t srrctl;
4895         uint32_t rdrxctl;
4896         uint32_t rxcsum;
4897         uint16_t buf_size;
4898         uint16_t i;
4899         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4900         int rc;
4901
4902         PMD_INIT_FUNC_TRACE();
4903         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4904
4905         /*
4906          * Make sure receives are disabled while setting
4907          * up the RX context (registers, descriptor rings, etc.).
4908          */
4909         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4910         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4911
4912         /* Enable receipt of broadcasted frames */
4913         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4914         fctrl |= IXGBE_FCTRL_BAM;
4915         fctrl |= IXGBE_FCTRL_DPF;
4916         fctrl |= IXGBE_FCTRL_PMCF;
4917         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4918
4919         /*
4920          * Configure CRC stripping, if any.
4921          */
4922         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4923         if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4924                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4925         else
4926                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4927
4928         /*
4929          * Configure jumbo frame support, if any.
4930          */
4931         if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4932                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4933                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4934                 maxfrs &= 0x0000FFFF;
4935                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4936                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4937         } else
4938                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4939
4940         /*
4941          * If loopback mode is configured, set LPBK bit.
4942          */
4943         if (dev->data->dev_conf.lpbk_mode != 0) {
4944                 rc = ixgbe_check_supported_loopback_mode(dev);
4945                 if (rc < 0) {
4946                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4947                         return rc;
4948                 }
4949                 hlreg0 |= IXGBE_HLREG0_LPBK;
4950         } else {
4951                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4952         }
4953
4954         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4955
4956         /*
4957          * Assume no header split and no VLAN strip support
4958          * on any Rx queue first .
4959          */
4960         rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4961         /* Setup RX queues */
4962         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4963                 rxq = dev->data->rx_queues[i];
4964
4965                 /*
4966                  * Reset crc_len in case it was changed after queue setup by a
4967                  * call to configure.
4968                  */
4969                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4970                         rxq->crc_len = RTE_ETHER_CRC_LEN;
4971                 else
4972                         rxq->crc_len = 0;
4973
4974                 /* Setup the Base and Length of the Rx Descriptor Rings */
4975                 bus_addr = rxq->rx_ring_phys_addr;
4976                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4977                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4978                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4979                                 (uint32_t)(bus_addr >> 32));
4980                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4981                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4982                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4983                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4984
4985                 /* Configure the SRRCTL register */
4986                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4987
4988                 /* Set if packets are dropped when no descriptors available */
4989                 if (rxq->drop_en)
4990                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4991
4992                 /*
4993                  * Configure the RX buffer size in the BSIZEPACKET field of
4994                  * the SRRCTL register of the queue.
4995                  * The value is in 1 KB resolution. Valid values can be from
4996                  * 1 KB to 16 KB.
4997                  */
4998                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4999                         RTE_PKTMBUF_HEADROOM);
5000                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5001                            IXGBE_SRRCTL_BSIZEPKT_MASK);
5002
5003                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5004
5005                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5006                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5007
5008                 /* It adds dual VLAN length for supporting dual VLAN */
5009                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5010                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5011                         dev->data->scattered_rx = 1;
5012                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5013                         rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5014         }
5015
5016         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5017                 dev->data->scattered_rx = 1;
5018
5019         /*
5020          * Device configured with multiple RX queues.
5021          */
5022         ixgbe_dev_mq_rx_configure(dev);
5023
5024         /*
5025          * Setup the Checksum Register.
5026          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5027          * Enable IP/L4 checkum computation by hardware if requested to do so.
5028          */
5029         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5030         rxcsum |= IXGBE_RXCSUM_PCSD;
5031         if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5032                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5033         else
5034                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5035
5036         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5037
5038         if (hw->mac.type == ixgbe_mac_82599EB ||
5039             hw->mac.type == ixgbe_mac_X540) {
5040                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5041                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5042                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5043                 else
5044                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5045                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5046                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5047         }
5048
5049         rc = ixgbe_set_rsc(dev);
5050         if (rc)
5051                 return rc;
5052
5053         ixgbe_set_rx_function(dev);
5054
5055         return 0;
5056 }
5057
5058 /*
5059  * Initializes Transmit Unit.
5060  */
5061 void __attribute__((cold))
5062 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5063 {
5064         struct ixgbe_hw     *hw;
5065         struct ixgbe_tx_queue *txq;
5066         uint64_t bus_addr;
5067         uint32_t hlreg0;
5068         uint32_t txctrl;
5069         uint16_t i;
5070
5071         PMD_INIT_FUNC_TRACE();
5072         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5073
5074         /* Enable TX CRC (checksum offload requirement) and hw padding
5075          * (TSO requirement)
5076          */
5077         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5078         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5079         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5080
5081         /* Setup the Base and Length of the Tx Descriptor Rings */
5082         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5083                 txq = dev->data->tx_queues[i];
5084
5085                 bus_addr = txq->tx_ring_phys_addr;
5086                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5087                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5088                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5089                                 (uint32_t)(bus_addr >> 32));
5090                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5091                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5092                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5093                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5094                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5095
5096                 /*
5097                  * Disable Tx Head Writeback RO bit, since this hoses
5098                  * bookkeeping if things aren't delivered in order.
5099                  */
5100                 switch (hw->mac.type) {
5101                 case ixgbe_mac_82598EB:
5102                         txctrl = IXGBE_READ_REG(hw,
5103                                                 IXGBE_DCA_TXCTRL(txq->reg_idx));
5104                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5105                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5106                                         txctrl);
5107                         break;
5108
5109                 case ixgbe_mac_82599EB:
5110                 case ixgbe_mac_X540:
5111                 case ixgbe_mac_X550:
5112                 case ixgbe_mac_X550EM_x:
5113                 case ixgbe_mac_X550EM_a:
5114                 default:
5115                         txctrl = IXGBE_READ_REG(hw,
5116                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5117                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5118                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5119                                         txctrl);
5120                         break;
5121                 }
5122         }
5123
5124         /* Device configured with multiple TX queues. */
5125         ixgbe_dev_mq_tx_configure(dev);
5126 }
5127
5128 /*
5129  * Check if requested loopback mode is supported
5130  */
5131 int
5132 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5133 {
5134         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5135
5136         if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5137                 if (hw->mac.type == ixgbe_mac_82599EB ||
5138                      hw->mac.type == ixgbe_mac_X540 ||
5139                      hw->mac.type == ixgbe_mac_X550 ||
5140                      hw->mac.type == ixgbe_mac_X550EM_x ||
5141                      hw->mac.type == ixgbe_mac_X550EM_a)
5142                         return 0;
5143
5144         return -ENOTSUP;
5145 }
5146
5147 /*
5148  * Set up link for 82599 loopback mode Tx->Rx.
5149  */
5150 static inline void __attribute__((cold))
5151 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5152 {
5153         PMD_INIT_FUNC_TRACE();
5154
5155         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5156                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5157                                 IXGBE_SUCCESS) {
5158                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5159                         /* ignore error */
5160                         return;
5161                 }
5162         }
5163
5164         /* Restart link */
5165         IXGBE_WRITE_REG(hw,
5166                         IXGBE_AUTOC,
5167                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5168         ixgbe_reset_pipeline_82599(hw);
5169
5170         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5171         msec_delay(50);
5172 }
5173
5174
5175 /*
5176  * Start Transmit and Receive Units.
5177  */
5178 int __attribute__((cold))
5179 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5180 {
5181         struct ixgbe_hw     *hw;
5182         struct ixgbe_tx_queue *txq;
5183         struct ixgbe_rx_queue *rxq;
5184         uint32_t txdctl;
5185         uint32_t dmatxctl;
5186         uint32_t rxctrl;
5187         uint16_t i;
5188         int ret = 0;
5189
5190         PMD_INIT_FUNC_TRACE();
5191         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5192
5193         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5194                 txq = dev->data->tx_queues[i];
5195                 /* Setup Transmit Threshold Registers */
5196                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5197                 txdctl |= txq->pthresh & 0x7F;
5198                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5199                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5200                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5201         }
5202
5203         if (hw->mac.type != ixgbe_mac_82598EB) {
5204                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5205                 dmatxctl |= IXGBE_DMATXCTL_TE;
5206                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5207         }
5208
5209         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5210                 txq = dev->data->tx_queues[i];
5211                 if (!txq->tx_deferred_start) {
5212                         ret = ixgbe_dev_tx_queue_start(dev, i);
5213                         if (ret < 0)
5214                                 return ret;
5215                 }
5216         }
5217
5218         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5219                 rxq = dev->data->rx_queues[i];
5220                 if (!rxq->rx_deferred_start) {
5221                         ret = ixgbe_dev_rx_queue_start(dev, i);
5222                         if (ret < 0)
5223                                 return ret;
5224                 }
5225         }
5226
5227         /* Enable Receive engine */
5228         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5229         if (hw->mac.type == ixgbe_mac_82598EB)
5230                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5231         rxctrl |= IXGBE_RXCTRL_RXEN;
5232         hw->mac.ops.enable_rx_dma(hw, rxctrl);
5233
5234         /* If loopback mode is enabled, set up the link accordingly */
5235         if (dev->data->dev_conf.lpbk_mode != 0) {
5236                 if (hw->mac.type == ixgbe_mac_82599EB)
5237                         ixgbe_setup_loopback_link_82599(hw);
5238                 else if (hw->mac.type == ixgbe_mac_X540 ||
5239                      hw->mac.type == ixgbe_mac_X550 ||
5240                      hw->mac.type == ixgbe_mac_X550EM_x ||
5241                      hw->mac.type == ixgbe_mac_X550EM_a)
5242                         ixgbe_setup_loopback_link_x540_x550(hw, true);
5243         }
5244
5245 #ifdef RTE_LIBRTE_SECURITY
5246         if ((dev->data->dev_conf.rxmode.offloads &
5247                         DEV_RX_OFFLOAD_SECURITY) ||
5248                 (dev->data->dev_conf.txmode.offloads &
5249                         DEV_TX_OFFLOAD_SECURITY)) {
5250                 ret = ixgbe_crypto_enable_ipsec(dev);
5251                 if (ret != 0) {
5252                         PMD_DRV_LOG(ERR,
5253                                     "ixgbe_crypto_enable_ipsec fails with %d.",
5254                                     ret);
5255                         return ret;
5256                 }
5257         }
5258 #endif
5259
5260         return 0;
5261 }
5262
5263 /*
5264  * Start Receive Units for specified queue.
5265  */
5266 int __attribute__((cold))
5267 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5268 {
5269         struct ixgbe_hw     *hw;
5270         struct ixgbe_rx_queue *rxq;
5271         uint32_t rxdctl;
5272         int poll_ms;
5273
5274         PMD_INIT_FUNC_TRACE();
5275         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5276
5277         rxq = dev->data->rx_queues[rx_queue_id];
5278
5279         /* Allocate buffers for descriptor rings */
5280         if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5281                 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5282                              rx_queue_id);
5283                 return -1;
5284         }
5285         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5286         rxdctl |= IXGBE_RXDCTL_ENABLE;
5287         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5288
5289         /* Wait until RX Enable ready */
5290         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5291         do {
5292                 rte_delay_ms(1);
5293                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5294         } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5295         if (!poll_ms)
5296                 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5297         rte_wmb();
5298         IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5299         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5300         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5301
5302         return 0;
5303 }
5304
5305 /*
5306  * Stop Receive Units for specified queue.
5307  */
5308 int __attribute__((cold))
5309 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5310 {
5311         struct ixgbe_hw     *hw;
5312         struct ixgbe_adapter *adapter =
5313                 (struct ixgbe_adapter *)dev->data->dev_private;
5314         struct ixgbe_rx_queue *rxq;
5315         uint32_t rxdctl;
5316         int poll_ms;
5317
5318         PMD_INIT_FUNC_TRACE();
5319         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5320
5321         rxq = dev->data->rx_queues[rx_queue_id];
5322
5323         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5324         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5325         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5326
5327         /* Wait until RX Enable bit clear */
5328         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5329         do {
5330                 rte_delay_ms(1);
5331                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5332         } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5333         if (!poll_ms)
5334                 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5335
5336         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5337
5338         ixgbe_rx_queue_release_mbufs(rxq);
5339         ixgbe_reset_rx_queue(adapter, rxq);
5340         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5341
5342         return 0;
5343 }
5344
5345
5346 /*
5347  * Start Transmit Units for specified queue.
5348  */
5349 int __attribute__((cold))
5350 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5351 {
5352         struct ixgbe_hw     *hw;
5353         struct ixgbe_tx_queue *txq;
5354         uint32_t txdctl;
5355         int poll_ms;
5356
5357         PMD_INIT_FUNC_TRACE();
5358         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5359
5360         txq = dev->data->tx_queues[tx_queue_id];
5361         IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5362         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5363         txdctl |= IXGBE_TXDCTL_ENABLE;
5364         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5365
5366         /* Wait until TX Enable ready */
5367         if (hw->mac.type == ixgbe_mac_82599EB) {
5368                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5369                 do {
5370                         rte_delay_ms(1);
5371                         txdctl = IXGBE_READ_REG(hw,
5372                                 IXGBE_TXDCTL(txq->reg_idx));
5373                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5374                 if (!poll_ms)
5375                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5376                                 tx_queue_id);
5377         }
5378         rte_wmb();
5379         IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5380         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5381
5382         return 0;
5383 }
5384
5385 /*
5386  * Stop Transmit Units for specified queue.
5387  */
5388 int __attribute__((cold))
5389 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5390 {
5391         struct ixgbe_hw     *hw;
5392         struct ixgbe_tx_queue *txq;
5393         uint32_t txdctl;
5394         uint32_t txtdh, txtdt;
5395         int poll_ms;
5396
5397         PMD_INIT_FUNC_TRACE();
5398         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5399
5400         txq = dev->data->tx_queues[tx_queue_id];
5401
5402         /* Wait until TX queue is empty */
5403         if (hw->mac.type == ixgbe_mac_82599EB) {
5404                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5405                 do {
5406                         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5407                         txtdh = IXGBE_READ_REG(hw,
5408                                                IXGBE_TDH(txq->reg_idx));
5409                         txtdt = IXGBE_READ_REG(hw,
5410                                                IXGBE_TDT(txq->reg_idx));
5411                 } while (--poll_ms && (txtdh != txtdt));
5412                 if (!poll_ms)
5413                         PMD_INIT_LOG(ERR,
5414                                 "Tx Queue %d is not empty when stopping.",
5415                                 tx_queue_id);
5416         }
5417
5418         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5419         txdctl &= ~IXGBE_TXDCTL_ENABLE;
5420         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5421
5422         /* Wait until TX Enable bit clear */
5423         if (hw->mac.type == ixgbe_mac_82599EB) {
5424                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5425                 do {
5426                         rte_delay_ms(1);
5427                         txdctl = IXGBE_READ_REG(hw,
5428                                                 IXGBE_TXDCTL(txq->reg_idx));
5429                 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5430                 if (!poll_ms)
5431                         PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5432                                 tx_queue_id);
5433         }
5434
5435         if (txq->ops != NULL) {
5436                 txq->ops->release_mbufs(txq);
5437                 txq->ops->reset(txq);
5438         }
5439         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5440
5441         return 0;
5442 }
5443
5444 void
5445 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5446         struct rte_eth_rxq_info *qinfo)
5447 {
5448         struct ixgbe_rx_queue *rxq;
5449
5450         rxq = dev->data->rx_queues[queue_id];
5451
5452         qinfo->mp = rxq->mb_pool;
5453         qinfo->scattered_rx = dev->data->scattered_rx;
5454         qinfo->nb_desc = rxq->nb_rx_desc;
5455
5456         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5457         qinfo->conf.rx_drop_en = rxq->drop_en;
5458         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5459         qinfo->conf.offloads = rxq->offloads;
5460 }
5461
5462 void
5463 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5464         struct rte_eth_txq_info *qinfo)
5465 {
5466         struct ixgbe_tx_queue *txq;
5467
5468         txq = dev->data->tx_queues[queue_id];
5469
5470         qinfo->nb_desc = txq->nb_tx_desc;
5471
5472         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5473         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5474         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5475
5476         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5477         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5478         qinfo->conf.offloads = txq->offloads;
5479         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5480 }
5481
5482 /*
5483  * [VF] Initializes Receive Unit.
5484  */
5485 int __attribute__((cold))
5486 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5487 {
5488         struct ixgbe_hw     *hw;
5489         struct ixgbe_rx_queue *rxq;
5490         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5491         uint64_t bus_addr;
5492         uint32_t srrctl, psrtype = 0;
5493         uint16_t buf_size;
5494         uint16_t i;
5495         int ret;
5496
5497         PMD_INIT_FUNC_TRACE();
5498         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5499
5500         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5501                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5502                         "it should be power of 2");
5503                 return -1;
5504         }
5505
5506         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5507                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5508                         "it should be equal to or less than %d",
5509                         hw->mac.max_rx_queues);
5510                 return -1;
5511         }
5512
5513         /*
5514          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5515          * disables the VF receipt of packets if the PF MTU is > 1500.
5516          * This is done to deal with 82599 limitations that imposes
5517          * the PF and all VFs to share the same MTU.
5518          * Then, the PF driver enables again the VF receipt of packet when
5519          * the VF driver issues a IXGBE_VF_SET_LPE request.
5520          * In the meantime, the VF device cannot be used, even if the VF driver
5521          * and the Guest VM network stack are ready to accept packets with a
5522          * size up to the PF MTU.
5523          * As a work-around to this PF behaviour, force the call to
5524          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5525          * VF packets received can work in all cases.
5526          */
5527         ixgbevf_rlpml_set_vf(hw,
5528                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5529
5530         /*
5531          * Assume no header split and no VLAN strip support
5532          * on any Rx queue first .
5533          */
5534         rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5535         /* Setup RX queues */
5536         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5537                 rxq = dev->data->rx_queues[i];
5538
5539                 /* Allocate buffers for descriptor rings */
5540                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5541                 if (ret)
5542                         return ret;
5543
5544                 /* Setup the Base and Length of the Rx Descriptor Rings */
5545                 bus_addr = rxq->rx_ring_phys_addr;
5546
5547                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5548                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5549                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5550                                 (uint32_t)(bus_addr >> 32));
5551                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5552                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5553                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5554                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5555
5556
5557                 /* Configure the SRRCTL register */
5558                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5559
5560                 /* Set if packets are dropped when no descriptors available */
5561                 if (rxq->drop_en)
5562                         srrctl |= IXGBE_SRRCTL_DROP_EN;
5563
5564                 /*
5565                  * Configure the RX buffer size in the BSIZEPACKET field of
5566                  * the SRRCTL register of the queue.
5567                  * The value is in 1 KB resolution. Valid values can be from
5568                  * 1 KB to 16 KB.
5569                  */
5570                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5571                         RTE_PKTMBUF_HEADROOM);
5572                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5573                            IXGBE_SRRCTL_BSIZEPKT_MASK);
5574
5575                 /*
5576                  * VF modification to write virtual function SRRCTL register
5577                  */
5578                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5579
5580                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5581                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5582
5583                 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5584                     /* It adds dual VLAN length for supporting dual VLAN */
5585                     (rxmode->max_rx_pkt_len +
5586                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5587                         if (!dev->data->scattered_rx)
5588                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5589                         dev->data->scattered_rx = 1;
5590                 }
5591
5592                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5593                         rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5594         }
5595
5596         /* Set RQPL for VF RSS according to max Rx queue */
5597         psrtype |= (dev->data->nb_rx_queues >> 1) <<
5598                 IXGBE_PSRTYPE_RQPL_SHIFT;
5599         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5600
5601         ixgbe_set_rx_function(dev);
5602
5603         return 0;
5604 }
5605
5606 /*
5607  * [VF] Initializes Transmit Unit.
5608  */
5609 void __attribute__((cold))
5610 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5611 {
5612         struct ixgbe_hw     *hw;
5613         struct ixgbe_tx_queue *txq;
5614         uint64_t bus_addr;
5615         uint32_t txctrl;
5616         uint16_t i;
5617
5618         PMD_INIT_FUNC_TRACE();
5619         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5620
5621         /* Setup the Base and Length of the Tx Descriptor Rings */
5622         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5623                 txq = dev->data->tx_queues[i];
5624                 bus_addr = txq->tx_ring_phys_addr;
5625                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5626                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5627                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5628                                 (uint32_t)(bus_addr >> 32));
5629                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5630                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5631                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5632                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5633                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5634
5635                 /*
5636                  * Disable Tx Head Writeback RO bit, since this hoses
5637                  * bookkeeping if things aren't delivered in order.
5638                  */
5639                 txctrl = IXGBE_READ_REG(hw,
5640                                 IXGBE_VFDCA_TXCTRL(i));
5641                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5642                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5643                                 txctrl);
5644         }
5645 }
5646
5647 /*
5648  * [VF] Start Transmit and Receive Units.
5649  */
5650 void __attribute__((cold))
5651 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5652 {
5653         struct ixgbe_hw     *hw;
5654         struct ixgbe_tx_queue *txq;
5655         struct ixgbe_rx_queue *rxq;
5656         uint32_t txdctl;
5657         uint32_t rxdctl;
5658         uint16_t i;
5659         int poll_ms;
5660
5661         PMD_INIT_FUNC_TRACE();
5662         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5663
5664         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5665                 txq = dev->data->tx_queues[i];
5666                 /* Setup Transmit Threshold Registers */
5667                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5668                 txdctl |= txq->pthresh & 0x7F;
5669                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5670                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5671                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5672         }
5673
5674         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5675
5676                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5677                 txdctl |= IXGBE_TXDCTL_ENABLE;
5678                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5679
5680                 poll_ms = 10;
5681                 /* Wait until TX Enable ready */
5682                 do {
5683                         rte_delay_ms(1);
5684                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5685                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5686                 if (!poll_ms)
5687                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5688         }
5689         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5690
5691                 rxq = dev->data->rx_queues[i];
5692
5693                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5694                 rxdctl |= IXGBE_RXDCTL_ENABLE;
5695                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5696
5697                 /* Wait until RX Enable ready */
5698                 poll_ms = 10;
5699                 do {
5700                         rte_delay_ms(1);
5701                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5702                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5703                 if (!poll_ms)
5704                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5705                 rte_wmb();
5706                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5707
5708         }
5709 }
5710
5711 int
5712 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5713                     const struct rte_flow_action_rss *in)
5714 {
5715         if (in->key_len > RTE_DIM(out->key) ||
5716             in->queue_num > RTE_DIM(out->queue))
5717                 return -EINVAL;
5718         out->conf = (struct rte_flow_action_rss){
5719                 .func = in->func,
5720                 .level = in->level,
5721                 .types = in->types,
5722                 .key_len = in->key_len,
5723                 .queue_num = in->queue_num,
5724                 .key = memcpy(out->key, in->key, in->key_len),
5725                 .queue = memcpy(out->queue, in->queue,
5726                                 sizeof(*in->queue) * in->queue_num),
5727         };
5728         return 0;
5729 }
5730
5731 int
5732 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5733                       const struct rte_flow_action_rss *with)
5734 {
5735         return (comp->func == with->func &&
5736                 comp->level == with->level &&
5737                 comp->types == with->types &&
5738                 comp->key_len == with->key_len &&
5739                 comp->queue_num == with->queue_num &&
5740                 !memcmp(comp->key, with->key, with->key_len) &&
5741                 !memcmp(comp->queue, with->queue,
5742                         sizeof(*with->queue) * with->queue_num));
5743 }
5744
5745 int
5746 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5747                 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5748 {
5749         struct ixgbe_hw *hw;
5750         uint32_t reta;
5751         uint16_t i;
5752         uint16_t j;
5753         uint16_t sp_reta_size;
5754         uint32_t reta_reg;
5755         struct rte_eth_rss_conf rss_conf = {
5756                 .rss_key = conf->conf.key_len ?
5757                         (void *)(uintptr_t)conf->conf.key : NULL,
5758                 .rss_key_len = conf->conf.key_len,
5759                 .rss_hf = conf->conf.types,
5760         };
5761         struct ixgbe_filter_info *filter_info =
5762                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5763
5764         PMD_INIT_FUNC_TRACE();
5765         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5766
5767         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5768
5769         if (!add) {
5770                 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5771                                           &conf->conf)) {
5772                         ixgbe_rss_disable(dev);
5773                         memset(&filter_info->rss_info, 0,
5774                                 sizeof(struct ixgbe_rte_flow_rss_conf));
5775                         return 0;
5776                 }
5777                 return -EINVAL;
5778         }
5779
5780         if (filter_info->rss_info.conf.queue_num)
5781                 return -EINVAL;
5782         /* Fill in redirection table
5783          * The byte-swap is needed because NIC registers are in
5784          * little-endian order.
5785          */
5786         reta = 0;
5787         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5788                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5789
5790                 if (j == conf->conf.queue_num)
5791                         j = 0;
5792                 reta = (reta << 8) | conf->conf.queue[j];
5793                 if ((i & 3) == 3)
5794                         IXGBE_WRITE_REG(hw, reta_reg,
5795                                         rte_bswap32(reta));
5796         }
5797
5798         /* Configure the RSS key and the RSS protocols used to compute
5799          * the RSS hash of input packets.
5800          */
5801         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5802                 ixgbe_rss_disable(dev);
5803                 return 0;
5804         }
5805         if (rss_conf.rss_key == NULL)
5806                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5807         ixgbe_hw_rss_hash_set(hw, &rss_conf);
5808
5809         if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5810                 return -EINVAL;
5811
5812         return 0;
5813 }
5814
5815 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5816 __rte_weak int
5817 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5818 {
5819         return -1;
5820 }
5821
5822 __rte_weak uint16_t
5823 ixgbe_recv_pkts_vec(
5824         void __rte_unused *rx_queue,
5825         struct rte_mbuf __rte_unused **rx_pkts,
5826         uint16_t __rte_unused nb_pkts)
5827 {
5828         return 0;
5829 }
5830
5831 __rte_weak uint16_t
5832 ixgbe_recv_scattered_pkts_vec(
5833         void __rte_unused *rx_queue,
5834         struct rte_mbuf __rte_unused **rx_pkts,
5835         uint16_t __rte_unused nb_pkts)
5836 {
5837         return 0;
5838 }
5839
5840 __rte_weak int
5841 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5842 {
5843         return -1;
5844 }