net: add rte prefix to TCP structure
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5
6 #include <sys/queue.h>
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdint.h>
13 #include <stdarg.h>
14 #include <unistd.h>
15 #include <inttypes.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
23 #include <rte_pci.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
38 #include <rte_udp.h>
39 #include <rte_tcp.h>
40 #include <rte_sctp.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
43 #include <rte_ip.h>
44 #include <rte_net.h>
45
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
53
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
56 #else
57 #define IXGBE_TX_IEEE1588_TMST 0
58 #endif
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK (                  \
61                 PKT_TX_OUTER_IPV6 |              \
62                 PKT_TX_OUTER_IPV4 |              \
63                 PKT_TX_IPV6 |                    \
64                 PKT_TX_IPV4 |                    \
65                 PKT_TX_VLAN_PKT |                \
66                 PKT_TX_IP_CKSUM |                \
67                 PKT_TX_L4_MASK |                 \
68                 PKT_TX_TCP_SEG |                 \
69                 PKT_TX_MACSEC |                  \
70                 PKT_TX_OUTER_IP_CKSUM |          \
71                 PKT_TX_SEC_OFFLOAD |     \
72                 IXGBE_TX_IEEE1588_TMST)
73
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75                 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
76
77 #if 1
78 #define RTE_PMD_USE_PREFETCH
79 #endif
80
81 #ifdef RTE_PMD_USE_PREFETCH
82 /*
83  * Prefetch a cache line into all cache levels.
84  */
85 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
86 #else
87 #define rte_ixgbe_prefetch(p)   do {} while (0)
88 #endif
89
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
92                                     uint16_t nb_pkts);
93 #endif
94
95 /*********************************************************************
96  *
97  *  TX functions
98  *
99  **********************************************************************/
100
101 /*
102  * Check for descriptors with their DD bit set and free mbufs.
103  * Return the total number of buffers freed.
104  */
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
107 {
108         struct ixgbe_tx_entry *txep;
109         uint32_t status;
110         int i, nb_free = 0;
111         struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
112
113         /* check DD bit on threshold descriptor */
114         status = txq->tx_ring[txq->tx_next_dd].wb.status;
115         if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
116                 return 0;
117
118         /*
119          * first buffer to free from S/W ring is at index
120          * tx_next_dd - (tx_rs_thresh-1)
121          */
122         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
123
124         for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125                 /* free buffers one at a time */
126                 m = rte_pktmbuf_prefree_seg(txep->mbuf);
127                 txep->mbuf = NULL;
128
129                 if (unlikely(m == NULL))
130                         continue;
131
132                 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133                     (nb_free > 0 && m->pool != free[0]->pool)) {
134                         rte_mempool_put_bulk(free[0]->pool,
135                                              (void **)free, nb_free);
136                         nb_free = 0;
137                 }
138
139                 free[nb_free++] = m;
140         }
141
142         if (nb_free > 0)
143                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
144
145         /* buffers were freed, update counters */
146         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148         if (txq->tx_next_dd >= txq->nb_tx_desc)
149                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
150
151         return txq->tx_rs_thresh;
152 }
153
154 /* Populate 4 descriptors with data from 4 mbufs */
155 static inline void
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
157 {
158         uint64_t buf_dma_addr;
159         uint32_t pkt_len;
160         int i;
161
162         for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163                 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164                 pkt_len = (*pkts)->data_len;
165
166                 /* write data to descriptor */
167                 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
168
169                 txdp->read.cmd_type_len =
170                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
171
172                 txdp->read.olinfo_status =
173                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
174
175                 rte_prefetch0(&(*pkts)->pool);
176         }
177 }
178
179 /* Populate 1 descriptor with data from 1 mbuf */
180 static inline void
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
182 {
183         uint64_t buf_dma_addr;
184         uint32_t pkt_len;
185
186         buf_dma_addr = rte_mbuf_data_iova(*pkts);
187         pkt_len = (*pkts)->data_len;
188
189         /* write data to descriptor */
190         txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191         txdp->read.cmd_type_len =
192                         rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193         txdp->read.olinfo_status =
194                         rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195         rte_prefetch0(&(*pkts)->pool);
196 }
197
198 /*
199  * Fill H/W descriptor ring with mbuf data.
200  * Copy mbuf pointers to the S/W ring.
201  */
202 static inline void
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
204                       uint16_t nb_pkts)
205 {
206         volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207         struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208         const int N_PER_LOOP = 4;
209         const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210         int mainpart, leftover;
211         int i, j;
212
213         /*
214          * Process most of the packets in chunks of N pkts.  Any
215          * leftover packets will get processed one at a time.
216          */
217         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
219         for (i = 0; i < mainpart; i += N_PER_LOOP) {
220                 /* Copy N mbuf pointers to the S/W ring */
221                 for (j = 0; j < N_PER_LOOP; ++j) {
222                         (txep + i + j)->mbuf = *(pkts + i + j);
223                 }
224                 tx4(txdp + i, pkts + i);
225         }
226
227         if (unlikely(leftover > 0)) {
228                 for (i = 0; i < leftover; ++i) {
229                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230                         tx1(txdp + mainpart + i, pkts + mainpart + i);
231                 }
232         }
233 }
234
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
237              uint16_t nb_pkts)
238 {
239         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240         volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
241         uint16_t n = 0;
242
243         /*
244          * Begin scanning the H/W ring for done descriptors when the
245          * number of available descriptors drops below tx_free_thresh.  For
246          * each done descriptor, free the associated buffer.
247          */
248         if (txq->nb_tx_free < txq->tx_free_thresh)
249                 ixgbe_tx_free_bufs(txq);
250
251         /* Only use descriptors that are available */
252         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253         if (unlikely(nb_pkts == 0))
254                 return 0;
255
256         /* Use exactly nb_pkts descriptors */
257         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
258
259         /*
260          * At this point, we know there are enough descriptors in the
261          * ring to transmit all the packets.  This assumes that each
262          * mbuf contains a single segment, and that no new offloads
263          * are expected, which would require a new context descriptor.
264          */
265
266         /*
267          * See if we're going to wrap-around. If so, handle the top
268          * of the descriptor ring first, then do the bottom.  If not,
269          * the processing looks just like the "bottom" part anyway...
270          */
271         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273                 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
274
275                 /*
276                  * We know that the last descriptor in the ring will need to
277                  * have its RS bit set because tx_rs_thresh has to be
278                  * a divisor of the ring size
279                  */
280                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
283
284                 txq->tx_tail = 0;
285         }
286
287         /* Fill H/W descriptor ring with mbuf data */
288         ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
290
291         /*
292          * Determine if RS bit should be set
293          * This is what we actually want:
294          *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295          * but instead of subtracting 1 and doing >=, we can just do
296          * greater than without subtracting.
297          */
298         if (txq->tx_tail > txq->tx_next_rs) {
299                 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300                         rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301                 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
302                                                 txq->tx_rs_thresh);
303                 if (txq->tx_next_rs >= txq->nb_tx_desc)
304                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
305         }
306
307         /*
308          * Check for wrap-around. This would only happen if we used
309          * up to the last descriptor in the ring, no more, no less.
310          */
311         if (txq->tx_tail >= txq->nb_tx_desc)
312                 txq->tx_tail = 0;
313
314         /* update tail pointer */
315         rte_wmb();
316         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
317
318         return nb_pkts;
319 }
320
321 uint16_t
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
323                        uint16_t nb_pkts)
324 {
325         uint16_t nb_tx;
326
327         /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328         if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329                 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
330
331         /* transmit more than the max burst, in chunks of TX_MAX_BURST */
332         nb_tx = 0;
333         while (nb_pkts) {
334                 uint16_t ret, n;
335
336                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337                 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338                 nb_tx = (uint16_t)(nb_tx + ret);
339                 nb_pkts = (uint16_t)(nb_pkts - ret);
340                 if (ret < n)
341                         break;
342         }
343
344         return nb_tx;
345 }
346
347 #ifdef RTE_IXGBE_INC_VECTOR
348 static uint16_t
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
350                     uint16_t nb_pkts)
351 {
352         uint16_t nb_tx = 0;
353         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354
355         while (nb_pkts) {
356                 uint16_t ret, num;
357
358                 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359                 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
360                                                  num);
361                 nb_tx += ret;
362                 nb_pkts -= ret;
363                 if (ret < num)
364                         break;
365         }
366
367         return nb_tx;
368 }
369 #endif
370
371 static inline void
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373                 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374                 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375                 __rte_unused uint64_t *mdata)
376 {
377         uint32_t type_tucmd_mlhl;
378         uint32_t mss_l4len_idx = 0;
379         uint32_t ctx_idx;
380         uint32_t vlan_macip_lens;
381         union ixgbe_tx_offload tx_offload_mask;
382         uint32_t seqnum_seed = 0;
383
384         ctx_idx = txq->ctx_curr;
385         tx_offload_mask.data[0] = 0;
386         tx_offload_mask.data[1] = 0;
387         type_tucmd_mlhl = 0;
388
389         /* Specify which HW CTX to upload. */
390         mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
391
392         if (ol_flags & PKT_TX_VLAN_PKT) {
393                 tx_offload_mask.vlan_tci |= ~0;
394         }
395
396         /* check if TCP segmentation required for this packet */
397         if (ol_flags & PKT_TX_TCP_SEG) {
398                 /* implies IP cksum in IPv4 */
399                 if (ol_flags & PKT_TX_IP_CKSUM)
400                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
403                 else
404                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405                                 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
407
408                 tx_offload_mask.l2_len |= ~0;
409                 tx_offload_mask.l3_len |= ~0;
410                 tx_offload_mask.l4_len |= ~0;
411                 tx_offload_mask.tso_segsz |= ~0;
412                 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413                 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414         } else { /* no TSO, check if hardware checksum is needed */
415                 if (ol_flags & PKT_TX_IP_CKSUM) {
416                         type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417                         tx_offload_mask.l2_len |= ~0;
418                         tx_offload_mask.l3_len |= ~0;
419                 }
420
421                 switch (ol_flags & PKT_TX_L4_MASK) {
422                 case PKT_TX_UDP_CKSUM:
423                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425                         mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
426                         tx_offload_mask.l2_len |= ~0;
427                         tx_offload_mask.l3_len |= ~0;
428                         break;
429                 case PKT_TX_TCP_CKSUM:
430                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
431                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
432                         mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
433                                 << IXGBE_ADVTXD_L4LEN_SHIFT;
434                         tx_offload_mask.l2_len |= ~0;
435                         tx_offload_mask.l3_len |= ~0;
436                         break;
437                 case PKT_TX_SCTP_CKSUM:
438                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
439                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
440                         mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
441                                 << IXGBE_ADVTXD_L4LEN_SHIFT;
442                         tx_offload_mask.l2_len |= ~0;
443                         tx_offload_mask.l3_len |= ~0;
444                         break;
445                 default:
446                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
447                                 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
448                         break;
449                 }
450         }
451
452         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
453                 tx_offload_mask.outer_l2_len |= ~0;
454                 tx_offload_mask.outer_l3_len |= ~0;
455                 tx_offload_mask.l2_len |= ~0;
456                 seqnum_seed |= tx_offload.outer_l3_len
457                                << IXGBE_ADVTXD_OUTER_IPLEN;
458                 seqnum_seed |= tx_offload.l2_len
459                                << IXGBE_ADVTXD_TUNNEL_LEN;
460         }
461 #ifdef RTE_LIBRTE_SECURITY
462         if (ol_flags & PKT_TX_SEC_OFFLOAD) {
463                 union ixgbe_crypto_tx_desc_md *md =
464                                 (union ixgbe_crypto_tx_desc_md *)mdata;
465                 seqnum_seed |=
466                         (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
467                 type_tucmd_mlhl |= md->enc ?
468                                 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
469                                 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
470                 type_tucmd_mlhl |=
471                         (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
472                 tx_offload_mask.sa_idx |= ~0;
473                 tx_offload_mask.sec_pad_len |= ~0;
474         }
475 #endif
476
477         txq->ctx_cache[ctx_idx].flags = ol_flags;
478         txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
479                 tx_offload_mask.data[0] & tx_offload.data[0];
480         txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
481                 tx_offload_mask.data[1] & tx_offload.data[1];
482         txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
483
484         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
485         vlan_macip_lens = tx_offload.l3_len;
486         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
487                 vlan_macip_lens |= (tx_offload.outer_l2_len <<
488                                     IXGBE_ADVTXD_MACLEN_SHIFT);
489         else
490                 vlan_macip_lens |= (tx_offload.l2_len <<
491                                     IXGBE_ADVTXD_MACLEN_SHIFT);
492         vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
493         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
494         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
495         ctx_txd->seqnum_seed     = seqnum_seed;
496 }
497
498 /*
499  * Check which hardware context can be used. Use the existing match
500  * or create a new context descriptor.
501  */
502 static inline uint32_t
503 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
504                    union ixgbe_tx_offload tx_offload)
505 {
506         /* If match with the current used context */
507         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
508                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
509                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
510                      & tx_offload.data[0])) &&
511                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
512                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
513                      & tx_offload.data[1]))))
514                 return txq->ctx_curr;
515
516         /* What if match with the next context  */
517         txq->ctx_curr ^= 1;
518         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
519                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
520                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
521                      & tx_offload.data[0])) &&
522                    (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
523                     (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
524                      & tx_offload.data[1]))))
525                 return txq->ctx_curr;
526
527         /* Mismatch, use the previous context */
528         return IXGBE_CTX_NUM;
529 }
530
531 static inline uint32_t
532 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
533 {
534         uint32_t tmp = 0;
535
536         if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
537                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
538         if (ol_flags & PKT_TX_IP_CKSUM)
539                 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
540         if (ol_flags & PKT_TX_TCP_SEG)
541                 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
542         return tmp;
543 }
544
545 static inline uint32_t
546 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
547 {
548         uint32_t cmdtype = 0;
549
550         if (ol_flags & PKT_TX_VLAN_PKT)
551                 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
552         if (ol_flags & PKT_TX_TCP_SEG)
553                 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
554         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
555                 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
556         if (ol_flags & PKT_TX_MACSEC)
557                 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
558         return cmdtype;
559 }
560
561 /* Default RS bit threshold values */
562 #ifndef DEFAULT_TX_RS_THRESH
563 #define DEFAULT_TX_RS_THRESH   32
564 #endif
565 #ifndef DEFAULT_TX_FREE_THRESH
566 #define DEFAULT_TX_FREE_THRESH 32
567 #endif
568
569 /* Reset transmit descriptors after they have been used */
570 static inline int
571 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
572 {
573         struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
574         volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
575         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
576         uint16_t nb_tx_desc = txq->nb_tx_desc;
577         uint16_t desc_to_clean_to;
578         uint16_t nb_tx_to_clean;
579         uint32_t status;
580
581         /* Determine the last descriptor needing to be cleaned */
582         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
583         if (desc_to_clean_to >= nb_tx_desc)
584                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
585
586         /* Check to make sure the last descriptor to clean is done */
587         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
588         status = txr[desc_to_clean_to].wb.status;
589         if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
590                 PMD_TX_FREE_LOG(DEBUG,
591                                 "TX descriptor %4u is not done"
592                                 "(port=%d queue=%d)",
593                                 desc_to_clean_to,
594                                 txq->port_id, txq->queue_id);
595                 /* Failed to clean any descriptors, better luck next time */
596                 return -(1);
597         }
598
599         /* Figure out how many descriptors will be cleaned */
600         if (last_desc_cleaned > desc_to_clean_to)
601                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
602                                                         desc_to_clean_to);
603         else
604                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
605                                                 last_desc_cleaned);
606
607         PMD_TX_FREE_LOG(DEBUG,
608                         "Cleaning %4u TX descriptors: %4u to %4u "
609                         "(port=%d queue=%d)",
610                         nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
611                         txq->port_id, txq->queue_id);
612
613         /*
614          * The last descriptor to clean is done, so that means all the
615          * descriptors from the last descriptor that was cleaned
616          * up to the last descriptor with the RS bit set
617          * are done. Only reset the threshold descriptor.
618          */
619         txr[desc_to_clean_to].wb.status = 0;
620
621         /* Update the txq to reflect the last descriptor that was cleaned */
622         txq->last_desc_cleaned = desc_to_clean_to;
623         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
624
625         /* No Error */
626         return 0;
627 }
628
629 uint16_t
630 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
631                 uint16_t nb_pkts)
632 {
633         struct ixgbe_tx_queue *txq;
634         struct ixgbe_tx_entry *sw_ring;
635         struct ixgbe_tx_entry *txe, *txn;
636         volatile union ixgbe_adv_tx_desc *txr;
637         volatile union ixgbe_adv_tx_desc *txd, *txp;
638         struct rte_mbuf     *tx_pkt;
639         struct rte_mbuf     *m_seg;
640         uint64_t buf_dma_addr;
641         uint32_t olinfo_status;
642         uint32_t cmd_type_len;
643         uint32_t pkt_len;
644         uint16_t slen;
645         uint64_t ol_flags;
646         uint16_t tx_id;
647         uint16_t tx_last;
648         uint16_t nb_tx;
649         uint16_t nb_used;
650         uint64_t tx_ol_req;
651         uint32_t ctx = 0;
652         uint32_t new_ctx;
653         union ixgbe_tx_offload tx_offload;
654 #ifdef RTE_LIBRTE_SECURITY
655         uint8_t use_ipsec;
656 #endif
657
658         tx_offload.data[0] = 0;
659         tx_offload.data[1] = 0;
660         txq = tx_queue;
661         sw_ring = txq->sw_ring;
662         txr     = txq->tx_ring;
663         tx_id   = txq->tx_tail;
664         txe = &sw_ring[tx_id];
665         txp = NULL;
666
667         /* Determine if the descriptor ring needs to be cleaned. */
668         if (txq->nb_tx_free < txq->tx_free_thresh)
669                 ixgbe_xmit_cleanup(txq);
670
671         rte_prefetch0(&txe->mbuf->pool);
672
673         /* TX loop */
674         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
675                 new_ctx = 0;
676                 tx_pkt = *tx_pkts++;
677                 pkt_len = tx_pkt->pkt_len;
678
679                 /*
680                  * Determine how many (if any) context descriptors
681                  * are needed for offload functionality.
682                  */
683                 ol_flags = tx_pkt->ol_flags;
684 #ifdef RTE_LIBRTE_SECURITY
685                 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
686 #endif
687
688                 /* If hardware offload required */
689                 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
690                 if (tx_ol_req) {
691                         tx_offload.l2_len = tx_pkt->l2_len;
692                         tx_offload.l3_len = tx_pkt->l3_len;
693                         tx_offload.l4_len = tx_pkt->l4_len;
694                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
695                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
696                         tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
697                         tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
698 #ifdef RTE_LIBRTE_SECURITY
699                         if (use_ipsec) {
700                                 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
701                                         (union ixgbe_crypto_tx_desc_md *)
702                                                         &tx_pkt->udata64;
703                                 tx_offload.sa_idx = ipsec_mdata->sa_idx;
704                                 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
705                         }
706 #endif
707
708                         /* If new context need be built or reuse the exist ctx. */
709                         ctx = what_advctx_update(txq, tx_ol_req,
710                                 tx_offload);
711                         /* Only allocate context descriptor if required*/
712                         new_ctx = (ctx == IXGBE_CTX_NUM);
713                         ctx = txq->ctx_curr;
714                 }
715
716                 /*
717                  * Keep track of how many descriptors are used this loop
718                  * This will always be the number of segments + the number of
719                  * Context descriptors required to transmit the packet
720                  */
721                 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
722
723                 if (txp != NULL &&
724                                 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
725                         /* set RS on the previous packet in the burst */
726                         txp->read.cmd_type_len |=
727                                 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
728
729                 /*
730                  * The number of descriptors that must be allocated for a
731                  * packet is the number of segments of that packet, plus 1
732                  * Context Descriptor for the hardware offload, if any.
733                  * Determine the last TX descriptor to allocate in the TX ring
734                  * for the packet, starting from the current position (tx_id)
735                  * in the ring.
736                  */
737                 tx_last = (uint16_t) (tx_id + nb_used - 1);
738
739                 /* Circular ring */
740                 if (tx_last >= txq->nb_tx_desc)
741                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
742
743                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
744                            " tx_first=%u tx_last=%u",
745                            (unsigned) txq->port_id,
746                            (unsigned) txq->queue_id,
747                            (unsigned) pkt_len,
748                            (unsigned) tx_id,
749                            (unsigned) tx_last);
750
751                 /*
752                  * Make sure there are enough TX descriptors available to
753                  * transmit the entire packet.
754                  * nb_used better be less than or equal to txq->tx_rs_thresh
755                  */
756                 if (nb_used > txq->nb_tx_free) {
757                         PMD_TX_FREE_LOG(DEBUG,
758                                         "Not enough free TX descriptors "
759                                         "nb_used=%4u nb_free=%4u "
760                                         "(port=%d queue=%d)",
761                                         nb_used, txq->nb_tx_free,
762                                         txq->port_id, txq->queue_id);
763
764                         if (ixgbe_xmit_cleanup(txq) != 0) {
765                                 /* Could not clean any descriptors */
766                                 if (nb_tx == 0)
767                                         return 0;
768                                 goto end_of_tx;
769                         }
770
771                         /* nb_used better be <= txq->tx_rs_thresh */
772                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
773                                 PMD_TX_FREE_LOG(DEBUG,
774                                         "The number of descriptors needed to "
775                                         "transmit the packet exceeds the "
776                                         "RS bit threshold. This will impact "
777                                         "performance."
778                                         "nb_used=%4u nb_free=%4u "
779                                         "tx_rs_thresh=%4u. "
780                                         "(port=%d queue=%d)",
781                                         nb_used, txq->nb_tx_free,
782                                         txq->tx_rs_thresh,
783                                         txq->port_id, txq->queue_id);
784                                 /*
785                                  * Loop here until there are enough TX
786                                  * descriptors or until the ring cannot be
787                                  * cleaned.
788                                  */
789                                 while (nb_used > txq->nb_tx_free) {
790                                         if (ixgbe_xmit_cleanup(txq) != 0) {
791                                                 /*
792                                                  * Could not clean any
793                                                  * descriptors
794                                                  */
795                                                 if (nb_tx == 0)
796                                                         return 0;
797                                                 goto end_of_tx;
798                                         }
799                                 }
800                         }
801                 }
802
803                 /*
804                  * By now there are enough free TX descriptors to transmit
805                  * the packet.
806                  */
807
808                 /*
809                  * Set common flags of all TX Data Descriptors.
810                  *
811                  * The following bits must be set in all Data Descriptors:
812                  *   - IXGBE_ADVTXD_DTYP_DATA
813                  *   - IXGBE_ADVTXD_DCMD_DEXT
814                  *
815                  * The following bits must be set in the first Data Descriptor
816                  * and are ignored in the other ones:
817                  *   - IXGBE_ADVTXD_DCMD_IFCS
818                  *   - IXGBE_ADVTXD_MAC_1588
819                  *   - IXGBE_ADVTXD_DCMD_VLE
820                  *
821                  * The following bits must only be set in the last Data
822                  * Descriptor:
823                  *   - IXGBE_TXD_CMD_EOP
824                  *
825                  * The following bits can be set in any Data Descriptor, but
826                  * are only set in the last Data Descriptor:
827                  *   - IXGBE_TXD_CMD_RS
828                  */
829                 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
830                         IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
831
832 #ifdef RTE_LIBRTE_IEEE1588
833                 if (ol_flags & PKT_TX_IEEE1588_TMST)
834                         cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
835 #endif
836
837                 olinfo_status = 0;
838                 if (tx_ol_req) {
839
840                         if (ol_flags & PKT_TX_TCP_SEG) {
841                                 /* when TSO is on, paylen in descriptor is the
842                                  * not the packet len but the tcp payload len */
843                                 pkt_len -= (tx_offload.l2_len +
844                                         tx_offload.l3_len + tx_offload.l4_len);
845                         }
846
847                         /*
848                          * Setup the TX Advanced Context Descriptor if required
849                          */
850                         if (new_ctx) {
851                                 volatile struct ixgbe_adv_tx_context_desc *
852                                     ctx_txd;
853
854                                 ctx_txd = (volatile struct
855                                     ixgbe_adv_tx_context_desc *)
856                                     &txr[tx_id];
857
858                                 txn = &sw_ring[txe->next_id];
859                                 rte_prefetch0(&txn->mbuf->pool);
860
861                                 if (txe->mbuf != NULL) {
862                                         rte_pktmbuf_free_seg(txe->mbuf);
863                                         txe->mbuf = NULL;
864                                 }
865
866                                 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
867                                         tx_offload, &tx_pkt->udata64);
868
869                                 txe->last_id = tx_last;
870                                 tx_id = txe->next_id;
871                                 txe = txn;
872                         }
873
874                         /*
875                          * Setup the TX Advanced Data Descriptor,
876                          * This path will go through
877                          * whatever new/reuse the context descriptor
878                          */
879                         cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
880                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
881                         olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
882                 }
883
884                 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
885 #ifdef RTE_LIBRTE_SECURITY
886                 if (use_ipsec)
887                         olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
888 #endif
889
890                 m_seg = tx_pkt;
891                 do {
892                         txd = &txr[tx_id];
893                         txn = &sw_ring[txe->next_id];
894                         rte_prefetch0(&txn->mbuf->pool);
895
896                         if (txe->mbuf != NULL)
897                                 rte_pktmbuf_free_seg(txe->mbuf);
898                         txe->mbuf = m_seg;
899
900                         /*
901                          * Set up Transmit Data Descriptor.
902                          */
903                         slen = m_seg->data_len;
904                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
905                         txd->read.buffer_addr =
906                                 rte_cpu_to_le_64(buf_dma_addr);
907                         txd->read.cmd_type_len =
908                                 rte_cpu_to_le_32(cmd_type_len | slen);
909                         txd->read.olinfo_status =
910                                 rte_cpu_to_le_32(olinfo_status);
911                         txe->last_id = tx_last;
912                         tx_id = txe->next_id;
913                         txe = txn;
914                         m_seg = m_seg->next;
915                 } while (m_seg != NULL);
916
917                 /*
918                  * The last packet data descriptor needs End Of Packet (EOP)
919                  */
920                 cmd_type_len |= IXGBE_TXD_CMD_EOP;
921                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
922                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
923
924                 /* Set RS bit only on threshold packets' last descriptor */
925                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
926                         PMD_TX_FREE_LOG(DEBUG,
927                                         "Setting RS bit on TXD id="
928                                         "%4u (port=%d queue=%d)",
929                                         tx_last, txq->port_id, txq->queue_id);
930
931                         cmd_type_len |= IXGBE_TXD_CMD_RS;
932
933                         /* Update txq RS bit counters */
934                         txq->nb_tx_used = 0;
935                         txp = NULL;
936                 } else
937                         txp = txd;
938
939                 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
940         }
941
942 end_of_tx:
943         /* set RS on last packet in the burst */
944         if (txp != NULL)
945                 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
946
947         rte_wmb();
948
949         /*
950          * Set the Transmit Descriptor Tail (TDT)
951          */
952         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
953                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
954                    (unsigned) tx_id, (unsigned) nb_tx);
955         IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
956         txq->tx_tail = tx_id;
957
958         return nb_tx;
959 }
960
961 /*********************************************************************
962  *
963  *  TX prep functions
964  *
965  **********************************************************************/
966 uint16_t
967 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
968 {
969         int i, ret;
970         uint64_t ol_flags;
971         struct rte_mbuf *m;
972         struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
973
974         for (i = 0; i < nb_pkts; i++) {
975                 m = tx_pkts[i];
976                 ol_flags = m->ol_flags;
977
978                 /**
979                  * Check if packet meets requirements for number of segments
980                  *
981                  * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
982                  *       non-TSO
983                  */
984
985                 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
986                         rte_errno = -EINVAL;
987                         return i;
988                 }
989
990                 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
991                         rte_errno = -ENOTSUP;
992                         return i;
993                 }
994
995 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
996                 ret = rte_validate_tx_offload(m);
997                 if (ret != 0) {
998                         rte_errno = ret;
999                         return i;
1000                 }
1001 #endif
1002                 ret = rte_net_intel_cksum_prepare(m);
1003                 if (ret != 0) {
1004                         rte_errno = ret;
1005                         return i;
1006                 }
1007         }
1008
1009         return i;
1010 }
1011
1012 /*********************************************************************
1013  *
1014  *  RX functions
1015  *
1016  **********************************************************************/
1017
1018 #define IXGBE_PACKET_TYPE_ETHER                         0X00
1019 #define IXGBE_PACKET_TYPE_IPV4                          0X01
1020 #define IXGBE_PACKET_TYPE_IPV4_TCP                      0X11
1021 #define IXGBE_PACKET_TYPE_IPV4_UDP                      0X21
1022 #define IXGBE_PACKET_TYPE_IPV4_SCTP                     0X41
1023 #define IXGBE_PACKET_TYPE_IPV4_EXT                      0X03
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP                  0X13
1025 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP                  0X23
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP                 0X43
1027 #define IXGBE_PACKET_TYPE_IPV6                          0X04
1028 #define IXGBE_PACKET_TYPE_IPV6_TCP                      0X14
1029 #define IXGBE_PACKET_TYPE_IPV6_UDP                      0X24
1030 #define IXGBE_PACKET_TYPE_IPV6_SCTP                     0X44
1031 #define IXGBE_PACKET_TYPE_IPV6_EXT                      0X0C
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP                  0X1C
1033 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP                  0X2C
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP                 0X4C
1035 #define IXGBE_PACKET_TYPE_IPV4_IPV6                     0X05
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP                 0X15
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP                 0X25
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP                0X45
1039 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6                 0X07
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP             0X17
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP             0X27
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP            0X47
1043 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT                 0X0D
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP             0X1D
1045 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP             0X2D
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP            0X4D
1047 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT             0X0F
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP         0X1F
1049 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP         0X2F
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP        0X4F
1051
1052 #define IXGBE_PACKET_TYPE_NVGRE                   0X00
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV4              0X01
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP          0X11
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP          0X21
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP         0X41
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT          0X03
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0X13
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0X23
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0X43
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV6              0X04
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP          0X14
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP          0X24
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP         0X44
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT          0X0C
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0X1C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0X2C
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0X4C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6         0X05
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0X15
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0X25
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0X0D
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1075
1076 #define IXGBE_PACKET_TYPE_VXLAN                   0X80
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV4              0X81
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0X93
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0XA3
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0XC3
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV6              0X84
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP          0X94
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP          0XA4
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP         0XC4
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT          0X8C
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0X9C
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0XAC
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0XCC
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6         0X85
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0X95
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0XA5
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0X8D
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1099
1100 /**
1101  * Use 2 different table for normal packet and tunnel packet
1102  * to save the space.
1103  */
1104 const uint32_t
1105         ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1106         [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1107         [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1108                 RTE_PTYPE_L3_IPV4,
1109         [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1110                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1111         [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1112                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1113         [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1114                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1115         [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1116                 RTE_PTYPE_L3_IPV4_EXT,
1117         [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1118                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1119         [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1120                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1121         [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1122                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1123         [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1124                 RTE_PTYPE_L3_IPV6,
1125         [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1126                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1127         [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1128                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1129         [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1130                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1131         [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1132                 RTE_PTYPE_L3_IPV6_EXT,
1133         [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1134                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1135         [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1136                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1137         [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1138                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1139         [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1140                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1141                 RTE_PTYPE_INNER_L3_IPV6,
1142         [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1143                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1144                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1145         [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1146                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1147         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1148         [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1149                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1150                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1151         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1152                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1153                 RTE_PTYPE_INNER_L3_IPV6,
1154         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1155                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1156                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1157         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1158                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1159                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1160         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1161                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1162                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1163         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1164                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1165                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1166         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1167                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1168                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1169         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1170                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1171                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1172         [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1173                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1174                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1175         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1176                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1177                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1178         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1179                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1180                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1181         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1182                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1183                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1184         [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1185                 RTE_PTYPE_L2_ETHER |
1186                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1187                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1188 };
1189
1190 const uint32_t
1191         ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1192         [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1193                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1194                 RTE_PTYPE_INNER_L2_ETHER,
1195         [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1196                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1197                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1198         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1199                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1200                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1201         [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1202                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1203                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1204         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1205                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1206                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1207         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1208                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1209                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1210         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1211                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1212                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1213         [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1214                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1215                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1216                 RTE_PTYPE_INNER_L4_TCP,
1217         [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1218                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1219                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1220                 RTE_PTYPE_INNER_L4_TCP,
1221         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1222                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1223                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1224         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1225                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1226                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1227                 RTE_PTYPE_INNER_L4_TCP,
1228         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1229                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1230                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1231                 RTE_PTYPE_INNER_L3_IPV4,
1232         [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1233                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1234                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1235                 RTE_PTYPE_INNER_L4_UDP,
1236         [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1237                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1238                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1239                 RTE_PTYPE_INNER_L4_UDP,
1240         [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1241                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1242                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1243                 RTE_PTYPE_INNER_L4_SCTP,
1244         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1245                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1246                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1247         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1248                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1249                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1250                 RTE_PTYPE_INNER_L4_UDP,
1251         [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1252                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1253                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1254                 RTE_PTYPE_INNER_L4_SCTP,
1255         [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1256                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1257                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1258                 RTE_PTYPE_INNER_L3_IPV4,
1259         [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1260                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1261                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1262                 RTE_PTYPE_INNER_L4_SCTP,
1263         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1264                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1265                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1266                 RTE_PTYPE_INNER_L4_SCTP,
1267         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1268                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1269                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1270                 RTE_PTYPE_INNER_L4_TCP,
1271         [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1272                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1273                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1274                 RTE_PTYPE_INNER_L4_UDP,
1275
1276         [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1277                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1278                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1279         [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1280                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1281                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1282                 RTE_PTYPE_INNER_L3_IPV4,
1283         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1284                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1285                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1286                 RTE_PTYPE_INNER_L3_IPV4_EXT,
1287         [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1288                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1289                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1290                 RTE_PTYPE_INNER_L3_IPV6,
1291         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1292                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1293                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1294                 RTE_PTYPE_INNER_L3_IPV4,
1295         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1296                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1297                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1298                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1299         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1300                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1301                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1302                 RTE_PTYPE_INNER_L3_IPV4,
1303         [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1304                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1305                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1306                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1307         [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1308                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1309                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1310                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1311         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1312                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1313                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1314                 RTE_PTYPE_INNER_L3_IPV4,
1315         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1316                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1317                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1318                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1319         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1320                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1321                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1322                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1323         [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1324                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1325                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1326                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1327         [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1328                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1329                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1330                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1331         [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1332                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1333                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1334                 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1335         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1336                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1337                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1338                 RTE_PTYPE_INNER_L3_IPV4,
1339         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1340                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1341                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1342                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1343         [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1344                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1345                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1346                 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1347         [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1348                 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1349                 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1350                 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1351         [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1352                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1353                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1354                 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1355         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1356                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1357                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1358                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1359         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1360                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1361                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1362                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1363         [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1364                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1365                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1366                 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1367 };
1368
1369 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1370 static inline uint32_t
1371 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1372 {
1373
1374         if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1375                 return RTE_PTYPE_UNKNOWN;
1376
1377         pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1378
1379         /* For tunnel packet */
1380         if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1381                 /* Remove the tunnel bit to save the space. */
1382                 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1383                 return ptype_table_tn[pkt_info];
1384         }
1385
1386         /**
1387          * For x550, if it's not tunnel,
1388          * tunnel type bit should be set to 0.
1389          * Reuse 82599's mask.
1390          */
1391         pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1392
1393         return ptype_table[pkt_info];
1394 }
1395
1396 static inline uint64_t
1397 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1398 {
1399         static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1400                 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1401                 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1402                 PKT_RX_RSS_HASH, 0, 0, 0,
1403                 0, 0, 0,  PKT_RX_FDIR,
1404         };
1405 #ifdef RTE_LIBRTE_IEEE1588
1406         static uint64_t ip_pkt_etqf_map[8] = {
1407                 0, 0, 0, PKT_RX_IEEE1588_PTP,
1408                 0, 0, 0, 0,
1409         };
1410
1411         if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1412                 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1413                                 ip_rss_types_map[pkt_info & 0XF];
1414         else
1415                 return ip_rss_types_map[pkt_info & 0XF];
1416 #else
1417         return ip_rss_types_map[pkt_info & 0XF];
1418 #endif
1419 }
1420
1421 static inline uint64_t
1422 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1423 {
1424         uint64_t pkt_flags;
1425
1426         /*
1427          * Check if VLAN present only.
1428          * Do not check whether L3/L4 rx checksum done by NIC or not,
1429          * That can be found from rte_eth_rxmode.offloads flag
1430          */
1431         pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  vlan_flags : 0;
1432
1433 #ifdef RTE_LIBRTE_IEEE1588
1434         if (rx_status & IXGBE_RXD_STAT_TMST)
1435                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1436 #endif
1437         return pkt_flags;
1438 }
1439
1440 static inline uint64_t
1441 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1442 {
1443         uint64_t pkt_flags;
1444
1445         /*
1446          * Bit 31: IPE, IPv4 checksum error
1447          * Bit 30: L4I, L4I integrity error
1448          */
1449         static uint64_t error_to_pkt_flags_map[4] = {
1450                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1451                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1452                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1453                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1454         };
1455         pkt_flags = error_to_pkt_flags_map[(rx_status >>
1456                 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1457
1458         if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1459             (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1460                 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1461         }
1462
1463 #ifdef RTE_LIBRTE_SECURITY
1464         if (rx_status & IXGBE_RXD_STAT_SECP) {
1465                 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1466                 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1467                         pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1468         }
1469 #endif
1470
1471         return pkt_flags;
1472 }
1473
1474 /*
1475  * LOOK_AHEAD defines how many desc statuses to check beyond the
1476  * current descriptor.
1477  * It must be a pound define for optimal performance.
1478  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1479  * function only works with LOOK_AHEAD=8.
1480  */
1481 #define LOOK_AHEAD 8
1482 #if (LOOK_AHEAD != 8)
1483 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1484 #endif
1485 static inline int
1486 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1487 {
1488         volatile union ixgbe_adv_rx_desc *rxdp;
1489         struct ixgbe_rx_entry *rxep;
1490         struct rte_mbuf *mb;
1491         uint16_t pkt_len;
1492         uint64_t pkt_flags;
1493         int nb_dd;
1494         uint32_t s[LOOK_AHEAD];
1495         uint32_t pkt_info[LOOK_AHEAD];
1496         int i, j, nb_rx = 0;
1497         uint32_t status;
1498         uint64_t vlan_flags = rxq->vlan_flags;
1499
1500         /* get references to current descriptor and S/W ring entry */
1501         rxdp = &rxq->rx_ring[rxq->rx_tail];
1502         rxep = &rxq->sw_ring[rxq->rx_tail];
1503
1504         status = rxdp->wb.upper.status_error;
1505         /* check to make sure there is at least 1 packet to receive */
1506         if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1507                 return 0;
1508
1509         /*
1510          * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1511          * reference packets that are ready to be received.
1512          */
1513         for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1514              i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1515                 /* Read desc statuses backwards to avoid race condition */
1516                 for (j = 0; j < LOOK_AHEAD; j++)
1517                         s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1518
1519                 rte_smp_rmb();
1520
1521                 /* Compute how many status bits were set */
1522                 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1523                                 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1524                         ;
1525
1526                 for (j = 0; j < nb_dd; j++)
1527                         pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1528                                                        lo_dword.data);
1529
1530                 nb_rx += nb_dd;
1531
1532                 /* Translate descriptor info to mbuf format */
1533                 for (j = 0; j < nb_dd; ++j) {
1534                         mb = rxep[j].mbuf;
1535                         pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1536                                   rxq->crc_len;
1537                         mb->data_len = pkt_len;
1538                         mb->pkt_len = pkt_len;
1539                         mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1540
1541                         /* convert descriptor fields to rte mbuf flags */
1542                         pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1543                                 vlan_flags);
1544                         pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1545                         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1546                                         ((uint16_t)pkt_info[j]);
1547                         mb->ol_flags = pkt_flags;
1548                         mb->packet_type =
1549                                 ixgbe_rxd_pkt_info_to_pkt_type
1550                                         (pkt_info[j], rxq->pkt_type_mask);
1551
1552                         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1553                                 mb->hash.rss = rte_le_to_cpu_32(
1554                                     rxdp[j].wb.lower.hi_dword.rss);
1555                         else if (pkt_flags & PKT_RX_FDIR) {
1556                                 mb->hash.fdir.hash = rte_le_to_cpu_16(
1557                                     rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1558                                     IXGBE_ATR_HASH_MASK;
1559                                 mb->hash.fdir.id = rte_le_to_cpu_16(
1560                                     rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1561                         }
1562                 }
1563
1564                 /* Move mbuf pointers from the S/W ring to the stage */
1565                 for (j = 0; j < LOOK_AHEAD; ++j) {
1566                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1567                 }
1568
1569                 /* stop if all requested packets could not be received */
1570                 if (nb_dd != LOOK_AHEAD)
1571                         break;
1572         }
1573
1574         /* clear software ring entries so we can cleanup correctly */
1575         for (i = 0; i < nb_rx; ++i) {
1576                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1577         }
1578
1579
1580         return nb_rx;
1581 }
1582
1583 static inline int
1584 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1585 {
1586         volatile union ixgbe_adv_rx_desc *rxdp;
1587         struct ixgbe_rx_entry *rxep;
1588         struct rte_mbuf *mb;
1589         uint16_t alloc_idx;
1590         __le64 dma_addr;
1591         int diag, i;
1592
1593         /* allocate buffers in bulk directly into the S/W ring */
1594         alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1595         rxep = &rxq->sw_ring[alloc_idx];
1596         diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1597                                     rxq->rx_free_thresh);
1598         if (unlikely(diag != 0))
1599                 return -ENOMEM;
1600
1601         rxdp = &rxq->rx_ring[alloc_idx];
1602         for (i = 0; i < rxq->rx_free_thresh; ++i) {
1603                 /* populate the static rte mbuf fields */
1604                 mb = rxep[i].mbuf;
1605                 if (reset_mbuf) {
1606                         mb->port = rxq->port_id;
1607                 }
1608
1609                 rte_mbuf_refcnt_set(mb, 1);
1610                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1611
1612                 /* populate the descriptors */
1613                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1614                 rxdp[i].read.hdr_addr = 0;
1615                 rxdp[i].read.pkt_addr = dma_addr;
1616         }
1617
1618         /* update state of internal queue structure */
1619         rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1620         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1621                 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1622
1623         /* no errors */
1624         return 0;
1625 }
1626
1627 static inline uint16_t
1628 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1629                          uint16_t nb_pkts)
1630 {
1631         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1632         int i;
1633
1634         /* how many packets are ready to return? */
1635         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1636
1637         /* copy mbuf pointers to the application's packet list */
1638         for (i = 0; i < nb_pkts; ++i)
1639                 rx_pkts[i] = stage[i];
1640
1641         /* update internal queue state */
1642         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1643         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1644
1645         return nb_pkts;
1646 }
1647
1648 static inline uint16_t
1649 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1650              uint16_t nb_pkts)
1651 {
1652         struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1653         uint16_t nb_rx = 0;
1654
1655         /* Any previously recv'd pkts will be returned from the Rx stage */
1656         if (rxq->rx_nb_avail)
1657                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1658
1659         /* Scan the H/W ring for packets to receive */
1660         nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1661
1662         /* update internal queue state */
1663         rxq->rx_next_avail = 0;
1664         rxq->rx_nb_avail = nb_rx;
1665         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1666
1667         /* if required, allocate new buffers to replenish descriptors */
1668         if (rxq->rx_tail > rxq->rx_free_trigger) {
1669                 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1670
1671                 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1672                         int i, j;
1673
1674                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1675                                    "queue_id=%u", (unsigned) rxq->port_id,
1676                                    (unsigned) rxq->queue_id);
1677
1678                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1679                                 rxq->rx_free_thresh;
1680
1681                         /*
1682                          * Need to rewind any previous receives if we cannot
1683                          * allocate new buffers to replenish the old ones.
1684                          */
1685                         rxq->rx_nb_avail = 0;
1686                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1687                         for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1688                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1689
1690                         return 0;
1691                 }
1692
1693                 /* update tail pointer */
1694                 rte_wmb();
1695                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1696                                             cur_free_trigger);
1697         }
1698
1699         if (rxq->rx_tail >= rxq->nb_rx_desc)
1700                 rxq->rx_tail = 0;
1701
1702         /* received any packets this loop? */
1703         if (rxq->rx_nb_avail)
1704                 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1705
1706         return 0;
1707 }
1708
1709 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1710 uint16_t
1711 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1712                            uint16_t nb_pkts)
1713 {
1714         uint16_t nb_rx;
1715
1716         if (unlikely(nb_pkts == 0))
1717                 return 0;
1718
1719         if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1720                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1721
1722         /* request is relatively large, chunk it up */
1723         nb_rx = 0;
1724         while (nb_pkts) {
1725                 uint16_t ret, n;
1726
1727                 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1728                 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1729                 nb_rx = (uint16_t)(nb_rx + ret);
1730                 nb_pkts = (uint16_t)(nb_pkts - ret);
1731                 if (ret < n)
1732                         break;
1733         }
1734
1735         return nb_rx;
1736 }
1737
1738 uint16_t
1739 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1740                 uint16_t nb_pkts)
1741 {
1742         struct ixgbe_rx_queue *rxq;
1743         volatile union ixgbe_adv_rx_desc *rx_ring;
1744         volatile union ixgbe_adv_rx_desc *rxdp;
1745         struct ixgbe_rx_entry *sw_ring;
1746         struct ixgbe_rx_entry *rxe;
1747         struct rte_mbuf *rxm;
1748         struct rte_mbuf *nmb;
1749         union ixgbe_adv_rx_desc rxd;
1750         uint64_t dma_addr;
1751         uint32_t staterr;
1752         uint32_t pkt_info;
1753         uint16_t pkt_len;
1754         uint16_t rx_id;
1755         uint16_t nb_rx;
1756         uint16_t nb_hold;
1757         uint64_t pkt_flags;
1758         uint64_t vlan_flags;
1759
1760         nb_rx = 0;
1761         nb_hold = 0;
1762         rxq = rx_queue;
1763         rx_id = rxq->rx_tail;
1764         rx_ring = rxq->rx_ring;
1765         sw_ring = rxq->sw_ring;
1766         vlan_flags = rxq->vlan_flags;
1767         while (nb_rx < nb_pkts) {
1768                 /*
1769                  * The order of operations here is important as the DD status
1770                  * bit must not be read after any other descriptor fields.
1771                  * rx_ring and rxdp are pointing to volatile data so the order
1772                  * of accesses cannot be reordered by the compiler. If they were
1773                  * not volatile, they could be reordered which could lead to
1774                  * using invalid descriptor fields when read from rxd.
1775                  */
1776                 rxdp = &rx_ring[rx_id];
1777                 staterr = rxdp->wb.upper.status_error;
1778                 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1779                         break;
1780                 rxd = *rxdp;
1781
1782                 /*
1783                  * End of packet.
1784                  *
1785                  * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1786                  * is likely to be invalid and to be dropped by the various
1787                  * validation checks performed by the network stack.
1788                  *
1789                  * Allocate a new mbuf to replenish the RX ring descriptor.
1790                  * If the allocation fails:
1791                  *    - arrange for that RX descriptor to be the first one
1792                  *      being parsed the next time the receive function is
1793                  *      invoked [on the same queue].
1794                  *
1795                  *    - Stop parsing the RX ring and return immediately.
1796                  *
1797                  * This policy do not drop the packet received in the RX
1798                  * descriptor for which the allocation of a new mbuf failed.
1799                  * Thus, it allows that packet to be later retrieved if
1800                  * mbuf have been freed in the mean time.
1801                  * As a side effect, holding RX descriptors instead of
1802                  * systematically giving them back to the NIC may lead to
1803                  * RX ring exhaustion situations.
1804                  * However, the NIC can gracefully prevent such situations
1805                  * to happen by sending specific "back-pressure" flow control
1806                  * frames to its peer(s).
1807                  */
1808                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1809                            "ext_err_stat=0x%08x pkt_len=%u",
1810                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1811                            (unsigned) rx_id, (unsigned) staterr,
1812                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1813
1814                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1815                 if (nmb == NULL) {
1816                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1817                                    "queue_id=%u", (unsigned) rxq->port_id,
1818                                    (unsigned) rxq->queue_id);
1819                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1820                         break;
1821                 }
1822
1823                 nb_hold++;
1824                 rxe = &sw_ring[rx_id];
1825                 rx_id++;
1826                 if (rx_id == rxq->nb_rx_desc)
1827                         rx_id = 0;
1828
1829                 /* Prefetch next mbuf while processing current one. */
1830                 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1831
1832                 /*
1833                  * When next RX descriptor is on a cache-line boundary,
1834                  * prefetch the next 4 RX descriptors and the next 8 pointers
1835                  * to mbufs.
1836                  */
1837                 if ((rx_id & 0x3) == 0) {
1838                         rte_ixgbe_prefetch(&rx_ring[rx_id]);
1839                         rte_ixgbe_prefetch(&sw_ring[rx_id]);
1840                 }
1841
1842                 rxm = rxe->mbuf;
1843                 rxe->mbuf = nmb;
1844                 dma_addr =
1845                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1846                 rxdp->read.hdr_addr = 0;
1847                 rxdp->read.pkt_addr = dma_addr;
1848
1849                 /*
1850                  * Initialize the returned mbuf.
1851                  * 1) setup generic mbuf fields:
1852                  *    - number of segments,
1853                  *    - next segment,
1854                  *    - packet length,
1855                  *    - RX port identifier.
1856                  * 2) integrate hardware offload data, if any:
1857                  *    - RSS flag & hash,
1858                  *    - IP checksum flag,
1859                  *    - VLAN TCI, if any,
1860                  *    - error flags.
1861                  */
1862                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1863                                       rxq->crc_len);
1864                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1865                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1866                 rxm->nb_segs = 1;
1867                 rxm->next = NULL;
1868                 rxm->pkt_len = pkt_len;
1869                 rxm->data_len = pkt_len;
1870                 rxm->port = rxq->port_id;
1871
1872                 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1873                 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1874                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1875
1876                 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1877                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1878                 pkt_flags = pkt_flags |
1879                         ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1880                 rxm->ol_flags = pkt_flags;
1881                 rxm->packet_type =
1882                         ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1883                                                        rxq->pkt_type_mask);
1884
1885                 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1886                         rxm->hash.rss = rte_le_to_cpu_32(
1887                                                 rxd.wb.lower.hi_dword.rss);
1888                 else if (pkt_flags & PKT_RX_FDIR) {
1889                         rxm->hash.fdir.hash = rte_le_to_cpu_16(
1890                                         rxd.wb.lower.hi_dword.csum_ip.csum) &
1891                                         IXGBE_ATR_HASH_MASK;
1892                         rxm->hash.fdir.id = rte_le_to_cpu_16(
1893                                         rxd.wb.lower.hi_dword.csum_ip.ip_id);
1894                 }
1895                 /*
1896                  * Store the mbuf address into the next entry of the array
1897                  * of returned packets.
1898                  */
1899                 rx_pkts[nb_rx++] = rxm;
1900         }
1901         rxq->rx_tail = rx_id;
1902
1903         /*
1904          * If the number of free RX descriptors is greater than the RX free
1905          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1906          * register.
1907          * Update the RDT with the value of the last processed RX descriptor
1908          * minus 1, to guarantee that the RDT register is never equal to the
1909          * RDH register, which creates a "full" ring situtation from the
1910          * hardware point of view...
1911          */
1912         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1913         if (nb_hold > rxq->rx_free_thresh) {
1914                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1915                            "nb_hold=%u nb_rx=%u",
1916                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1917                            (unsigned) rx_id, (unsigned) nb_hold,
1918                            (unsigned) nb_rx);
1919                 rx_id = (uint16_t) ((rx_id == 0) ?
1920                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1921                 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1922                 nb_hold = 0;
1923         }
1924         rxq->nb_rx_hold = nb_hold;
1925         return nb_rx;
1926 }
1927
1928 /**
1929  * Detect an RSC descriptor.
1930  */
1931 static inline uint32_t
1932 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1933 {
1934         return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1935                 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1936 }
1937
1938 /**
1939  * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1940  *
1941  * Fill the following info in the HEAD buffer of the Rx cluster:
1942  *    - RX port identifier
1943  *    - hardware offload data, if any:
1944  *      - RSS flag & hash
1945  *      - IP checksum flag
1946  *      - VLAN TCI, if any
1947  *      - error flags
1948  * @head HEAD of the packet cluster
1949  * @desc HW descriptor to get data from
1950  * @rxq Pointer to the Rx queue
1951  */
1952 static inline void
1953 ixgbe_fill_cluster_head_buf(
1954         struct rte_mbuf *head,
1955         union ixgbe_adv_rx_desc *desc,
1956         struct ixgbe_rx_queue *rxq,
1957         uint32_t staterr)
1958 {
1959         uint32_t pkt_info;
1960         uint64_t pkt_flags;
1961
1962         head->port = rxq->port_id;
1963
1964         /* The vlan_tci field is only valid when PKT_RX_VLAN is
1965          * set in the pkt_flags field.
1966          */
1967         head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1968         pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1969         pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1970         pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1971         pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1972         head->ol_flags = pkt_flags;
1973         head->packet_type =
1974                 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1975
1976         if (likely(pkt_flags & PKT_RX_RSS_HASH))
1977                 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1978         else if (pkt_flags & PKT_RX_FDIR) {
1979                 head->hash.fdir.hash =
1980                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1981                                                           & IXGBE_ATR_HASH_MASK;
1982                 head->hash.fdir.id =
1983                         rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1984         }
1985 }
1986
1987 /**
1988  * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1989  *
1990  * @rx_queue Rx queue handle
1991  * @rx_pkts table of received packets
1992  * @nb_pkts size of rx_pkts table
1993  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1994  *
1995  * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1996  * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1997  *
1998  * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1999  * 1) When non-EOP RSC completion arrives:
2000  *    a) Update the HEAD of the current RSC aggregation cluster with the new
2001  *       segment's data length.
2002  *    b) Set the "next" pointer of the current segment to point to the segment
2003  *       at the NEXTP index.
2004  *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2005  *       in the sw_rsc_ring.
2006  * 2) When EOP arrives we just update the cluster's total length and offload
2007  *    flags and deliver the cluster up to the upper layers. In our case - put it
2008  *    in the rx_pkts table.
2009  *
2010  * Returns the number of received packets/clusters (according to the "bulk
2011  * receive" interface).
2012  */
2013 static inline uint16_t
2014 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2015                     bool bulk_alloc)
2016 {
2017         struct ixgbe_rx_queue *rxq = rx_queue;
2018         volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2019         struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2020         struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2021         uint16_t rx_id = rxq->rx_tail;
2022         uint16_t nb_rx = 0;
2023         uint16_t nb_hold = rxq->nb_rx_hold;
2024         uint16_t prev_id = rxq->rx_tail;
2025
2026         while (nb_rx < nb_pkts) {
2027                 bool eop;
2028                 struct ixgbe_rx_entry *rxe;
2029                 struct ixgbe_scattered_rx_entry *sc_entry;
2030                 struct ixgbe_scattered_rx_entry *next_sc_entry;
2031                 struct ixgbe_rx_entry *next_rxe = NULL;
2032                 struct rte_mbuf *first_seg;
2033                 struct rte_mbuf *rxm;
2034                 struct rte_mbuf *nmb = NULL;
2035                 union ixgbe_adv_rx_desc rxd;
2036                 uint16_t data_len;
2037                 uint16_t next_id;
2038                 volatile union ixgbe_adv_rx_desc *rxdp;
2039                 uint32_t staterr;
2040
2041 next_desc:
2042                 /*
2043                  * The code in this whole file uses the volatile pointer to
2044                  * ensure the read ordering of the status and the rest of the
2045                  * descriptor fields (on the compiler level only!!!). This is so
2046                  * UGLY - why not to just use the compiler barrier instead? DPDK
2047                  * even has the rte_compiler_barrier() for that.
2048                  *
2049                  * But most importantly this is just wrong because this doesn't
2050                  * ensure memory ordering in a general case at all. For
2051                  * instance, DPDK is supposed to work on Power CPUs where
2052                  * compiler barrier may just not be enough!
2053                  *
2054                  * I tried to write only this function properly to have a
2055                  * starting point (as a part of an LRO/RSC series) but the
2056                  * compiler cursed at me when I tried to cast away the
2057                  * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2058                  * keeping it the way it is for now.
2059                  *
2060                  * The code in this file is broken in so many other places and
2061                  * will just not work on a big endian CPU anyway therefore the
2062                  * lines below will have to be revisited together with the rest
2063                  * of the ixgbe PMD.
2064                  *
2065                  * TODO:
2066                  *    - Get rid of "volatile" and let the compiler do its job.
2067                  *    - Use the proper memory barrier (rte_rmb()) to ensure the
2068                  *      memory ordering below.
2069                  */
2070                 rxdp = &rx_ring[rx_id];
2071                 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2072
2073                 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2074                         break;
2075
2076                 rxd = *rxdp;
2077
2078                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2079                                   "staterr=0x%x data_len=%u",
2080                            rxq->port_id, rxq->queue_id, rx_id, staterr,
2081                            rte_le_to_cpu_16(rxd.wb.upper.length));
2082
2083                 if (!bulk_alloc) {
2084                         nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2085                         if (nmb == NULL) {
2086                                 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2087                                                   "port_id=%u queue_id=%u",
2088                                            rxq->port_id, rxq->queue_id);
2089
2090                                 rte_eth_devices[rxq->port_id].data->
2091                                                         rx_mbuf_alloc_failed++;
2092                                 break;
2093                         }
2094                 } else if (nb_hold > rxq->rx_free_thresh) {
2095                         uint16_t next_rdt = rxq->rx_free_trigger;
2096
2097                         if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2098                                 rte_wmb();
2099                                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2100                                                             next_rdt);
2101                                 nb_hold -= rxq->rx_free_thresh;
2102                         } else {
2103                                 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2104                                                   "port_id=%u queue_id=%u",
2105                                            rxq->port_id, rxq->queue_id);
2106
2107                                 rte_eth_devices[rxq->port_id].data->
2108                                                         rx_mbuf_alloc_failed++;
2109                                 break;
2110                         }
2111                 }
2112
2113                 nb_hold++;
2114                 rxe = &sw_ring[rx_id];
2115                 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2116
2117                 next_id = rx_id + 1;
2118                 if (next_id == rxq->nb_rx_desc)
2119                         next_id = 0;
2120
2121                 /* Prefetch next mbuf while processing current one. */
2122                 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2123
2124                 /*
2125                  * When next RX descriptor is on a cache-line boundary,
2126                  * prefetch the next 4 RX descriptors and the next 4 pointers
2127                  * to mbufs.
2128                  */
2129                 if ((next_id & 0x3) == 0) {
2130                         rte_ixgbe_prefetch(&rx_ring[next_id]);
2131                         rte_ixgbe_prefetch(&sw_ring[next_id]);
2132                 }
2133
2134                 rxm = rxe->mbuf;
2135
2136                 if (!bulk_alloc) {
2137                         __le64 dma =
2138                           rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2139                         /*
2140                          * Update RX descriptor with the physical address of the
2141                          * new data buffer of the new allocated mbuf.
2142                          */
2143                         rxe->mbuf = nmb;
2144
2145                         rxm->data_off = RTE_PKTMBUF_HEADROOM;
2146                         rxdp->read.hdr_addr = 0;
2147                         rxdp->read.pkt_addr = dma;
2148                 } else
2149                         rxe->mbuf = NULL;
2150
2151                 /*
2152                  * Set data length & data buffer address of mbuf.
2153                  */
2154                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2155                 rxm->data_len = data_len;
2156
2157                 if (!eop) {
2158                         uint16_t nextp_id;
2159                         /*
2160                          * Get next descriptor index:
2161                          *  - For RSC it's in the NEXTP field.
2162                          *  - For a scattered packet - it's just a following
2163                          *    descriptor.
2164                          */
2165                         if (ixgbe_rsc_count(&rxd))
2166                                 nextp_id =
2167                                         (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2168                                                        IXGBE_RXDADV_NEXTP_SHIFT;
2169                         else
2170                                 nextp_id = next_id;
2171
2172                         next_sc_entry = &sw_sc_ring[nextp_id];
2173                         next_rxe = &sw_ring[nextp_id];
2174                         rte_ixgbe_prefetch(next_rxe);
2175                 }
2176
2177                 sc_entry = &sw_sc_ring[rx_id];
2178                 first_seg = sc_entry->fbuf;
2179                 sc_entry->fbuf = NULL;
2180
2181                 /*
2182                  * If this is the first buffer of the received packet,
2183                  * set the pointer to the first mbuf of the packet and
2184                  * initialize its context.
2185                  * Otherwise, update the total length and the number of segments
2186                  * of the current scattered packet, and update the pointer to
2187                  * the last mbuf of the current packet.
2188                  */
2189                 if (first_seg == NULL) {
2190                         first_seg = rxm;
2191                         first_seg->pkt_len = data_len;
2192                         first_seg->nb_segs = 1;
2193                 } else {
2194                         first_seg->pkt_len += data_len;
2195                         first_seg->nb_segs++;
2196                 }
2197
2198                 prev_id = rx_id;
2199                 rx_id = next_id;
2200
2201                 /*
2202                  * If this is not the last buffer of the received packet, update
2203                  * the pointer to the first mbuf at the NEXTP entry in the
2204                  * sw_sc_ring and continue to parse the RX ring.
2205                  */
2206                 if (!eop && next_rxe) {
2207                         rxm->next = next_rxe->mbuf;
2208                         next_sc_entry->fbuf = first_seg;
2209                         goto next_desc;
2210                 }
2211
2212                 /* Initialize the first mbuf of the returned packet */
2213                 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2214
2215                 /*
2216                  * Deal with the case, when HW CRC srip is disabled.
2217                  * That can't happen when LRO is enabled, but still could
2218                  * happen for scattered RX mode.
2219                  */
2220                 first_seg->pkt_len -= rxq->crc_len;
2221                 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2222                         struct rte_mbuf *lp;
2223
2224                         for (lp = first_seg; lp->next != rxm; lp = lp->next)
2225                                 ;
2226
2227                         first_seg->nb_segs--;
2228                         lp->data_len -= rxq->crc_len - rxm->data_len;
2229                         lp->next = NULL;
2230                         rte_pktmbuf_free_seg(rxm);
2231                 } else
2232                         rxm->data_len -= rxq->crc_len;
2233
2234                 /* Prefetch data of first segment, if configured to do so. */
2235                 rte_packet_prefetch((char *)first_seg->buf_addr +
2236                         first_seg->data_off);
2237
2238                 /*
2239                  * Store the mbuf address into the next entry of the array
2240                  * of returned packets.
2241                  */
2242                 rx_pkts[nb_rx++] = first_seg;
2243         }
2244
2245         /*
2246          * Record index of the next RX descriptor to probe.
2247          */
2248         rxq->rx_tail = rx_id;
2249
2250         /*
2251          * If the number of free RX descriptors is greater than the RX free
2252          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2253          * register.
2254          * Update the RDT with the value of the last processed RX descriptor
2255          * minus 1, to guarantee that the RDT register is never equal to the
2256          * RDH register, which creates a "full" ring situtation from the
2257          * hardware point of view...
2258          */
2259         if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2260                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2261                            "nb_hold=%u nb_rx=%u",
2262                            rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2263
2264                 rte_wmb();
2265                 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2266                 nb_hold = 0;
2267         }
2268
2269         rxq->nb_rx_hold = nb_hold;
2270         return nb_rx;
2271 }
2272
2273 uint16_t
2274 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2275                                  uint16_t nb_pkts)
2276 {
2277         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2278 }
2279
2280 uint16_t
2281 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2282                                uint16_t nb_pkts)
2283 {
2284         return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2285 }
2286
2287 /*********************************************************************
2288  *
2289  *  Queue management functions
2290  *
2291  **********************************************************************/
2292
2293 static void __attribute__((cold))
2294 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2295 {
2296         unsigned i;
2297
2298         if (txq->sw_ring != NULL) {
2299                 for (i = 0; i < txq->nb_tx_desc; i++) {
2300                         if (txq->sw_ring[i].mbuf != NULL) {
2301                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2302                                 txq->sw_ring[i].mbuf = NULL;
2303                         }
2304                 }
2305         }
2306 }
2307
2308 static void __attribute__((cold))
2309 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2310 {
2311         if (txq != NULL &&
2312             txq->sw_ring != NULL)
2313                 rte_free(txq->sw_ring);
2314 }
2315
2316 static void __attribute__((cold))
2317 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2318 {
2319         if (txq != NULL && txq->ops != NULL) {
2320                 txq->ops->release_mbufs(txq);
2321                 txq->ops->free_swring(txq);
2322                 rte_free(txq);
2323         }
2324 }
2325
2326 void __attribute__((cold))
2327 ixgbe_dev_tx_queue_release(void *txq)
2328 {
2329         ixgbe_tx_queue_release(txq);
2330 }
2331
2332 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2333 static void __attribute__((cold))
2334 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2335 {
2336         static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2337         struct ixgbe_tx_entry *txe = txq->sw_ring;
2338         uint16_t prev, i;
2339
2340         /* Zero out HW ring memory */
2341         for (i = 0; i < txq->nb_tx_desc; i++) {
2342                 txq->tx_ring[i] = zeroed_desc;
2343         }
2344
2345         /* Initialize SW ring entries */
2346         prev = (uint16_t) (txq->nb_tx_desc - 1);
2347         for (i = 0; i < txq->nb_tx_desc; i++) {
2348                 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2349
2350                 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2351                 txe[i].mbuf = NULL;
2352                 txe[i].last_id = i;
2353                 txe[prev].next_id = i;
2354                 prev = i;
2355         }
2356
2357         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2358         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2359
2360         txq->tx_tail = 0;
2361         txq->nb_tx_used = 0;
2362         /*
2363          * Always allow 1 descriptor to be un-allocated to avoid
2364          * a H/W race condition
2365          */
2366         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2367         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2368         txq->ctx_curr = 0;
2369         memset((void *)&txq->ctx_cache, 0,
2370                 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2371 }
2372
2373 static const struct ixgbe_txq_ops def_txq_ops = {
2374         .release_mbufs = ixgbe_tx_queue_release_mbufs,
2375         .free_swring = ixgbe_tx_free_swring,
2376         .reset = ixgbe_reset_tx_queue,
2377 };
2378
2379 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2380  * the queue parameters. Used in tx_queue_setup by primary process and then
2381  * in dev_init by secondary process when attaching to an existing ethdev.
2382  */
2383 void __attribute__((cold))
2384 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2385 {
2386         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2387         if ((txq->offloads == 0) &&
2388 #ifdef RTE_LIBRTE_SECURITY
2389                         !(txq->using_ipsec) &&
2390 #endif
2391                         (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2392                 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2393                 dev->tx_pkt_prepare = NULL;
2394 #ifdef RTE_IXGBE_INC_VECTOR
2395                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2396                                 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2397                                         ixgbe_txq_vec_setup(txq) == 0)) {
2398                         PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2399                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2400                 } else
2401 #endif
2402                 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2403         } else {
2404                 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2405                 PMD_INIT_LOG(DEBUG,
2406                                 " - offloads = 0x%" PRIx64,
2407                                 txq->offloads);
2408                 PMD_INIT_LOG(DEBUG,
2409                                 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2410                                 (unsigned long)txq->tx_rs_thresh,
2411                                 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2412                 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2413                 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2414         }
2415 }
2416
2417 uint64_t
2418 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2419 {
2420         RTE_SET_USED(dev);
2421
2422         return 0;
2423 }
2424
2425 uint64_t
2426 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2427 {
2428         uint64_t tx_offload_capa;
2429         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2430
2431         tx_offload_capa =
2432                 DEV_TX_OFFLOAD_VLAN_INSERT |
2433                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2434                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2435                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2436                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2437                 DEV_TX_OFFLOAD_TCP_TSO     |
2438                 DEV_TX_OFFLOAD_MULTI_SEGS;
2439
2440         if (hw->mac.type == ixgbe_mac_82599EB ||
2441             hw->mac.type == ixgbe_mac_X540)
2442                 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2443
2444         if (hw->mac.type == ixgbe_mac_X550 ||
2445             hw->mac.type == ixgbe_mac_X550EM_x ||
2446             hw->mac.type == ixgbe_mac_X550EM_a)
2447                 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2448
2449 #ifdef RTE_LIBRTE_SECURITY
2450         if (dev->security_ctx)
2451                 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2452 #endif
2453         return tx_offload_capa;
2454 }
2455
2456 int __attribute__((cold))
2457 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2458                          uint16_t queue_idx,
2459                          uint16_t nb_desc,
2460                          unsigned int socket_id,
2461                          const struct rte_eth_txconf *tx_conf)
2462 {
2463         const struct rte_memzone *tz;
2464         struct ixgbe_tx_queue *txq;
2465         struct ixgbe_hw     *hw;
2466         uint16_t tx_rs_thresh, tx_free_thresh;
2467         uint64_t offloads;
2468
2469         PMD_INIT_FUNC_TRACE();
2470         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2471
2472         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2473
2474         /*
2475          * Validate number of transmit descriptors.
2476          * It must not exceed hardware maximum, and must be multiple
2477          * of IXGBE_ALIGN.
2478          */
2479         if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2480                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2481                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2482                 return -EINVAL;
2483         }
2484
2485         /*
2486          * The following two parameters control the setting of the RS bit on
2487          * transmit descriptors.
2488          * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2489          * descriptors have been used.
2490          * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2491          * descriptors are used or if the number of descriptors required
2492          * to transmit a packet is greater than the number of free TX
2493          * descriptors.
2494          * The following constraints must be satisfied:
2495          *  tx_rs_thresh must be greater than 0.
2496          *  tx_rs_thresh must be less than the size of the ring minus 2.
2497          *  tx_rs_thresh must be less than or equal to tx_free_thresh.
2498          *  tx_rs_thresh must be a divisor of the ring size.
2499          *  tx_free_thresh must be greater than 0.
2500          *  tx_free_thresh must be less than the size of the ring minus 3.
2501          *  tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2502          * One descriptor in the TX ring is used as a sentinel to avoid a
2503          * H/W race condition, hence the maximum threshold constraints.
2504          * When set to zero use default values.
2505          */
2506         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2507                         tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2508         /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2509         tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2510                         nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2511         if (tx_conf->tx_rs_thresh > 0)
2512                 tx_rs_thresh = tx_conf->tx_rs_thresh;
2513         if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2514                 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2515                              "exceed nb_desc. (tx_rs_thresh=%u "
2516                              "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2517                              (unsigned int)tx_rs_thresh,
2518                              (unsigned int)tx_free_thresh,
2519                              (unsigned int)nb_desc,
2520                              (int)dev->data->port_id,
2521                              (int)queue_idx);
2522                 return -(EINVAL);
2523         }
2524         if (tx_rs_thresh >= (nb_desc - 2)) {
2525                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2526                         "of TX descriptors minus 2. (tx_rs_thresh=%u "
2527                         "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2528                         (int)dev->data->port_id, (int)queue_idx);
2529                 return -(EINVAL);
2530         }
2531         if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2532                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2533                         "(tx_rs_thresh=%u port=%d queue=%d)",
2534                         DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2535                         (int)dev->data->port_id, (int)queue_idx);
2536                 return -(EINVAL);
2537         }
2538         if (tx_free_thresh >= (nb_desc - 3)) {
2539                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2540                              "tx_free_thresh must be less than the number of "
2541                              "TX descriptors minus 3. (tx_free_thresh=%u "
2542                              "port=%d queue=%d)",
2543                              (unsigned int)tx_free_thresh,
2544                              (int)dev->data->port_id, (int)queue_idx);
2545                 return -(EINVAL);
2546         }
2547         if (tx_rs_thresh > tx_free_thresh) {
2548                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2549                              "tx_free_thresh. (tx_free_thresh=%u "
2550                              "tx_rs_thresh=%u port=%d queue=%d)",
2551                              (unsigned int)tx_free_thresh,
2552                              (unsigned int)tx_rs_thresh,
2553                              (int)dev->data->port_id,
2554                              (int)queue_idx);
2555                 return -(EINVAL);
2556         }
2557         if ((nb_desc % tx_rs_thresh) != 0) {
2558                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2559                              "number of TX descriptors. (tx_rs_thresh=%u "
2560                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2561                              (int)dev->data->port_id, (int)queue_idx);
2562                 return -(EINVAL);
2563         }
2564
2565         /*
2566          * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2567          * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2568          * by the NIC and all descriptors are written back after the NIC
2569          * accumulates WTHRESH descriptors.
2570          */
2571         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2572                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2573                              "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2574                              "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2575                              (int)dev->data->port_id, (int)queue_idx);
2576                 return -(EINVAL);
2577         }
2578
2579         /* Free memory prior to re-allocation if needed... */
2580         if (dev->data->tx_queues[queue_idx] != NULL) {
2581                 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2582                 dev->data->tx_queues[queue_idx] = NULL;
2583         }
2584
2585         /* First allocate the tx queue data structure */
2586         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2587                                  RTE_CACHE_LINE_SIZE, socket_id);
2588         if (txq == NULL)
2589                 return -ENOMEM;
2590
2591         /*
2592          * Allocate TX ring hardware descriptors. A memzone large enough to
2593          * handle the maximum ring size is allocated in order to allow for
2594          * resizing in later calls to the queue setup function.
2595          */
2596         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2597                         sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2598                         IXGBE_ALIGN, socket_id);
2599         if (tz == NULL) {
2600                 ixgbe_tx_queue_release(txq);
2601                 return -ENOMEM;
2602         }
2603
2604         txq->nb_tx_desc = nb_desc;
2605         txq->tx_rs_thresh = tx_rs_thresh;
2606         txq->tx_free_thresh = tx_free_thresh;
2607         txq->pthresh = tx_conf->tx_thresh.pthresh;
2608         txq->hthresh = tx_conf->tx_thresh.hthresh;
2609         txq->wthresh = tx_conf->tx_thresh.wthresh;
2610         txq->queue_id = queue_idx;
2611         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2612                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2613         txq->port_id = dev->data->port_id;
2614         txq->offloads = offloads;
2615         txq->ops = &def_txq_ops;
2616         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2617 #ifdef RTE_LIBRTE_SECURITY
2618         txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2619                         DEV_TX_OFFLOAD_SECURITY);
2620 #endif
2621
2622         /*
2623          * Modification to set VFTDT for virtual function if vf is detected
2624          */
2625         if (hw->mac.type == ixgbe_mac_82599_vf ||
2626             hw->mac.type == ixgbe_mac_X540_vf ||
2627             hw->mac.type == ixgbe_mac_X550_vf ||
2628             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2629             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2630                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2631         else
2632                 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2633
2634         txq->tx_ring_phys_addr = tz->iova;
2635         txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2636
2637         /* Allocate software ring */
2638         txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2639                                 sizeof(struct ixgbe_tx_entry) * nb_desc,
2640                                 RTE_CACHE_LINE_SIZE, socket_id);
2641         if (txq->sw_ring == NULL) {
2642                 ixgbe_tx_queue_release(txq);
2643                 return -ENOMEM;
2644         }
2645         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2646                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2647
2648         /* set up vector or scalar TX function as appropriate */
2649         ixgbe_set_tx_function(dev, txq);
2650
2651         txq->ops->reset(txq);
2652
2653         dev->data->tx_queues[queue_idx] = txq;
2654
2655
2656         return 0;
2657 }
2658
2659 /**
2660  * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2661  *
2662  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2663  * in the sw_rsc_ring is not set to NULL but rather points to the next
2664  * mbuf of this RSC aggregation (that has not been completed yet and still
2665  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2666  * will just free first "nb_segs" segments of the cluster explicitly by calling
2667  * an rte_pktmbuf_free_seg().
2668  *
2669  * @m scattered cluster head
2670  */
2671 static void __attribute__((cold))
2672 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2673 {
2674         uint16_t i, nb_segs = m->nb_segs;
2675         struct rte_mbuf *next_seg;
2676
2677         for (i = 0; i < nb_segs; i++) {
2678                 next_seg = m->next;
2679                 rte_pktmbuf_free_seg(m);
2680                 m = next_seg;
2681         }
2682 }
2683
2684 static void __attribute__((cold))
2685 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2686 {
2687         unsigned i;
2688
2689 #ifdef RTE_IXGBE_INC_VECTOR
2690         /* SSE Vector driver has a different way of releasing mbufs. */
2691         if (rxq->rx_using_sse) {
2692                 ixgbe_rx_queue_release_mbufs_vec(rxq);
2693                 return;
2694         }
2695 #endif
2696
2697         if (rxq->sw_ring != NULL) {
2698                 for (i = 0; i < rxq->nb_rx_desc; i++) {
2699                         if (rxq->sw_ring[i].mbuf != NULL) {
2700                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2701                                 rxq->sw_ring[i].mbuf = NULL;
2702                         }
2703                 }
2704                 if (rxq->rx_nb_avail) {
2705                         for (i = 0; i < rxq->rx_nb_avail; ++i) {
2706                                 struct rte_mbuf *mb;
2707
2708                                 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2709                                 rte_pktmbuf_free_seg(mb);
2710                         }
2711                         rxq->rx_nb_avail = 0;
2712                 }
2713         }
2714
2715         if (rxq->sw_sc_ring)
2716                 for (i = 0; i < rxq->nb_rx_desc; i++)
2717                         if (rxq->sw_sc_ring[i].fbuf) {
2718                                 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2719                                 rxq->sw_sc_ring[i].fbuf = NULL;
2720                         }
2721 }
2722
2723 static void __attribute__((cold))
2724 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2725 {
2726         if (rxq != NULL) {
2727                 ixgbe_rx_queue_release_mbufs(rxq);
2728                 rte_free(rxq->sw_ring);
2729                 rte_free(rxq->sw_sc_ring);
2730                 rte_free(rxq);
2731         }
2732 }
2733
2734 void __attribute__((cold))
2735 ixgbe_dev_rx_queue_release(void *rxq)
2736 {
2737         ixgbe_rx_queue_release(rxq);
2738 }
2739
2740 /*
2741  * Check if Rx Burst Bulk Alloc function can be used.
2742  * Return
2743  *        0: the preconditions are satisfied and the bulk allocation function
2744  *           can be used.
2745  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2746  *           function must be used.
2747  */
2748 static inline int __attribute__((cold))
2749 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2750 {
2751         int ret = 0;
2752
2753         /*
2754          * Make sure the following pre-conditions are satisfied:
2755          *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2756          *   rxq->rx_free_thresh < rxq->nb_rx_desc
2757          *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2758          * Scattered packets are not supported.  This should be checked
2759          * outside of this function.
2760          */
2761         if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2762                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2763                              "rxq->rx_free_thresh=%d, "
2764                              "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2765                              rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2766                 ret = -EINVAL;
2767         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2768                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2769                              "rxq->rx_free_thresh=%d, "
2770                              "rxq->nb_rx_desc=%d",
2771                              rxq->rx_free_thresh, rxq->nb_rx_desc);
2772                 ret = -EINVAL;
2773         } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2774                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2775                              "rxq->nb_rx_desc=%d, "
2776                              "rxq->rx_free_thresh=%d",
2777                              rxq->nb_rx_desc, rxq->rx_free_thresh);
2778                 ret = -EINVAL;
2779         }
2780
2781         return ret;
2782 }
2783
2784 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2785 static void __attribute__((cold))
2786 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2787 {
2788         static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2789         unsigned i;
2790         uint16_t len = rxq->nb_rx_desc;
2791
2792         /*
2793          * By default, the Rx queue setup function allocates enough memory for
2794          * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
2795          * extra memory at the end of the descriptor ring to be zero'd out.
2796          */
2797         if (adapter->rx_bulk_alloc_allowed)
2798                 /* zero out extra memory */
2799                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2800
2801         /*
2802          * Zero out HW ring memory. Zero out extra memory at the end of
2803          * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2804          * reads extra memory as zeros.
2805          */
2806         for (i = 0; i < len; i++) {
2807                 rxq->rx_ring[i] = zeroed_desc;
2808         }
2809
2810         /*
2811          * initialize extra software ring entries. Space for these extra
2812          * entries is always allocated
2813          */
2814         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2815         for (i = rxq->nb_rx_desc; i < len; ++i) {
2816                 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2817         }
2818
2819         rxq->rx_nb_avail = 0;
2820         rxq->rx_next_avail = 0;
2821         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2822         rxq->rx_tail = 0;
2823         rxq->nb_rx_hold = 0;
2824         rxq->pkt_first_seg = NULL;
2825         rxq->pkt_last_seg = NULL;
2826
2827 #ifdef RTE_IXGBE_INC_VECTOR
2828         rxq->rxrearm_start = 0;
2829         rxq->rxrearm_nb = 0;
2830 #endif
2831 }
2832
2833 static int
2834 ixgbe_is_vf(struct rte_eth_dev *dev)
2835 {
2836         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2837
2838         switch (hw->mac.type) {
2839         case ixgbe_mac_82599_vf:
2840         case ixgbe_mac_X540_vf:
2841         case ixgbe_mac_X550_vf:
2842         case ixgbe_mac_X550EM_x_vf:
2843         case ixgbe_mac_X550EM_a_vf:
2844                 return 1;
2845         default:
2846                 return 0;
2847         }
2848 }
2849
2850 uint64_t
2851 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2852 {
2853         uint64_t offloads = 0;
2854         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2855
2856         if (hw->mac.type != ixgbe_mac_82598EB)
2857                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2858
2859         return offloads;
2860 }
2861
2862 uint64_t
2863 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2864 {
2865         uint64_t offloads;
2866         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2867
2868         offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
2869                    DEV_RX_OFFLOAD_UDP_CKSUM   |
2870                    DEV_RX_OFFLOAD_TCP_CKSUM   |
2871                    DEV_RX_OFFLOAD_KEEP_CRC    |
2872                    DEV_RX_OFFLOAD_JUMBO_FRAME |
2873                    DEV_RX_OFFLOAD_VLAN_FILTER |
2874                    DEV_RX_OFFLOAD_SCATTER;
2875
2876         if (hw->mac.type == ixgbe_mac_82598EB)
2877                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2878
2879         if (ixgbe_is_vf(dev) == 0)
2880                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2881
2882         /*
2883          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2884          * mode.
2885          */
2886         if ((hw->mac.type == ixgbe_mac_82599EB ||
2887              hw->mac.type == ixgbe_mac_X540 ||
2888              hw->mac.type == ixgbe_mac_X550) &&
2889             !RTE_ETH_DEV_SRIOV(dev).active)
2890                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2891
2892         if (hw->mac.type == ixgbe_mac_82599EB ||
2893             hw->mac.type == ixgbe_mac_X540)
2894                 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2895
2896         if (hw->mac.type == ixgbe_mac_X550 ||
2897             hw->mac.type == ixgbe_mac_X550EM_x ||
2898             hw->mac.type == ixgbe_mac_X550EM_a)
2899                 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2900
2901 #ifdef RTE_LIBRTE_SECURITY
2902         if (dev->security_ctx)
2903                 offloads |= DEV_RX_OFFLOAD_SECURITY;
2904 #endif
2905
2906         return offloads;
2907 }
2908
2909 int __attribute__((cold))
2910 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2911                          uint16_t queue_idx,
2912                          uint16_t nb_desc,
2913                          unsigned int socket_id,
2914                          const struct rte_eth_rxconf *rx_conf,
2915                          struct rte_mempool *mp)
2916 {
2917         const struct rte_memzone *rz;
2918         struct ixgbe_rx_queue *rxq;
2919         struct ixgbe_hw     *hw;
2920         uint16_t len;
2921         struct ixgbe_adapter *adapter =
2922                 (struct ixgbe_adapter *)dev->data->dev_private;
2923         uint64_t offloads;
2924
2925         PMD_INIT_FUNC_TRACE();
2926         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2927
2928         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2929
2930         /*
2931          * Validate number of receive descriptors.
2932          * It must not exceed hardware maximum, and must be multiple
2933          * of IXGBE_ALIGN.
2934          */
2935         if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2936                         (nb_desc > IXGBE_MAX_RING_DESC) ||
2937                         (nb_desc < IXGBE_MIN_RING_DESC)) {
2938                 return -EINVAL;
2939         }
2940
2941         /* Free memory prior to re-allocation if needed... */
2942         if (dev->data->rx_queues[queue_idx] != NULL) {
2943                 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2944                 dev->data->rx_queues[queue_idx] = NULL;
2945         }
2946
2947         /* First allocate the rx queue data structure */
2948         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2949                                  RTE_CACHE_LINE_SIZE, socket_id);
2950         if (rxq == NULL)
2951                 return -ENOMEM;
2952         rxq->mb_pool = mp;
2953         rxq->nb_rx_desc = nb_desc;
2954         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2955         rxq->queue_id = queue_idx;
2956         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2957                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2958         rxq->port_id = dev->data->port_id;
2959         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2960                 rxq->crc_len = RTE_ETHER_CRC_LEN;
2961         else
2962                 rxq->crc_len = 0;
2963         rxq->drop_en = rx_conf->rx_drop_en;
2964         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2965         rxq->offloads = offloads;
2966
2967         /*
2968          * The packet type in RX descriptor is different for different NICs.
2969          * Some bits are used for x550 but reserved for other NICS.
2970          * So set different masks for different NICs.
2971          */
2972         if (hw->mac.type == ixgbe_mac_X550 ||
2973             hw->mac.type == ixgbe_mac_X550EM_x ||
2974             hw->mac.type == ixgbe_mac_X550EM_a ||
2975             hw->mac.type == ixgbe_mac_X550_vf ||
2976             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2977             hw->mac.type == ixgbe_mac_X550EM_a_vf)
2978                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2979         else
2980                 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2981
2982         /*
2983          * Allocate RX ring hardware descriptors. A memzone large enough to
2984          * handle the maximum ring size is allocated in order to allow for
2985          * resizing in later calls to the queue setup function.
2986          */
2987         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2988                                       RX_RING_SZ, IXGBE_ALIGN, socket_id);
2989         if (rz == NULL) {
2990                 ixgbe_rx_queue_release(rxq);
2991                 return -ENOMEM;
2992         }
2993
2994         /*
2995          * Zero init all the descriptors in the ring.
2996          */
2997         memset(rz->addr, 0, RX_RING_SZ);
2998
2999         /*
3000          * Modified to setup VFRDT for Virtual Function
3001          */
3002         if (hw->mac.type == ixgbe_mac_82599_vf ||
3003             hw->mac.type == ixgbe_mac_X540_vf ||
3004             hw->mac.type == ixgbe_mac_X550_vf ||
3005             hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3006             hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3007                 rxq->rdt_reg_addr =
3008                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3009                 rxq->rdh_reg_addr =
3010                         IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3011         } else {
3012                 rxq->rdt_reg_addr =
3013                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3014                 rxq->rdh_reg_addr =
3015                         IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3016         }
3017
3018         rxq->rx_ring_phys_addr = rz->iova;
3019         rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3020
3021         /*
3022          * Certain constraints must be met in order to use the bulk buffer
3023          * allocation Rx burst function. If any of Rx queues doesn't meet them
3024          * the feature should be disabled for the whole port.
3025          */
3026         if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3027                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3028                                     "preconditions - canceling the feature for "
3029                                     "the whole port[%d]",
3030                              rxq->queue_id, rxq->port_id);
3031                 adapter->rx_bulk_alloc_allowed = false;
3032         }
3033
3034         /*
3035          * Allocate software ring. Allow for space at the end of the
3036          * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3037          * function does not access an invalid memory region.
3038          */
3039         len = nb_desc;
3040         if (adapter->rx_bulk_alloc_allowed)
3041                 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3042
3043         rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3044                                           sizeof(struct ixgbe_rx_entry) * len,
3045                                           RTE_CACHE_LINE_SIZE, socket_id);
3046         if (!rxq->sw_ring) {
3047                 ixgbe_rx_queue_release(rxq);
3048                 return -ENOMEM;
3049         }
3050
3051         /*
3052          * Always allocate even if it's not going to be needed in order to
3053          * simplify the code.
3054          *
3055          * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3056          * be requested in ixgbe_dev_rx_init(), which is called later from
3057          * dev_start() flow.
3058          */
3059         rxq->sw_sc_ring =
3060                 rte_zmalloc_socket("rxq->sw_sc_ring",
3061                                    sizeof(struct ixgbe_scattered_rx_entry) * len,
3062                                    RTE_CACHE_LINE_SIZE, socket_id);
3063         if (!rxq->sw_sc_ring) {
3064                 ixgbe_rx_queue_release(rxq);
3065                 return -ENOMEM;
3066         }
3067
3068         PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3069                             "dma_addr=0x%"PRIx64,
3070                      rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3071                      rxq->rx_ring_phys_addr);
3072
3073         if (!rte_is_power_of_2(nb_desc)) {
3074                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3075                                     "preconditions - canceling the feature for "
3076                                     "the whole port[%d]",
3077                              rxq->queue_id, rxq->port_id);
3078                 adapter->rx_vec_allowed = false;
3079         } else
3080                 ixgbe_rxq_vec_setup(rxq);
3081
3082         dev->data->rx_queues[queue_idx] = rxq;
3083
3084         ixgbe_reset_rx_queue(adapter, rxq);
3085
3086         return 0;
3087 }
3088
3089 uint32_t
3090 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3091 {
3092 #define IXGBE_RXQ_SCAN_INTERVAL 4
3093         volatile union ixgbe_adv_rx_desc *rxdp;
3094         struct ixgbe_rx_queue *rxq;
3095         uint32_t desc = 0;
3096
3097         rxq = dev->data->rx_queues[rx_queue_id];
3098         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3099
3100         while ((desc < rxq->nb_rx_desc) &&
3101                 (rxdp->wb.upper.status_error &
3102                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3103                 desc += IXGBE_RXQ_SCAN_INTERVAL;
3104                 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3105                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3106                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
3107                                 desc - rxq->nb_rx_desc]);
3108         }
3109
3110         return desc;
3111 }
3112
3113 int
3114 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3115 {
3116         volatile union ixgbe_adv_rx_desc *rxdp;
3117         struct ixgbe_rx_queue *rxq = rx_queue;
3118         uint32_t desc;
3119
3120         if (unlikely(offset >= rxq->nb_rx_desc))
3121                 return 0;
3122         desc = rxq->rx_tail + offset;
3123         if (desc >= rxq->nb_rx_desc)
3124                 desc -= rxq->nb_rx_desc;
3125
3126         rxdp = &rxq->rx_ring[desc];
3127         return !!(rxdp->wb.upper.status_error &
3128                         rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3129 }
3130
3131 int
3132 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3133 {
3134         struct ixgbe_rx_queue *rxq = rx_queue;
3135         volatile uint32_t *status;
3136         uint32_t nb_hold, desc;
3137
3138         if (unlikely(offset >= rxq->nb_rx_desc))
3139                 return -EINVAL;
3140
3141 #ifdef RTE_IXGBE_INC_VECTOR
3142         if (rxq->rx_using_sse)
3143                 nb_hold = rxq->rxrearm_nb;
3144         else
3145 #endif
3146                 nb_hold = rxq->nb_rx_hold;
3147         if (offset >= rxq->nb_rx_desc - nb_hold)
3148                 return RTE_ETH_RX_DESC_UNAVAIL;
3149
3150         desc = rxq->rx_tail + offset;
3151         if (desc >= rxq->nb_rx_desc)
3152                 desc -= rxq->nb_rx_desc;
3153
3154         status = &rxq->rx_ring[desc].wb.upper.status_error;
3155         if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3156                 return RTE_ETH_RX_DESC_DONE;
3157
3158         return RTE_ETH_RX_DESC_AVAIL;
3159 }
3160
3161 int
3162 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3163 {
3164         struct ixgbe_tx_queue *txq = tx_queue;
3165         volatile uint32_t *status;
3166         uint32_t desc;
3167
3168         if (unlikely(offset >= txq->nb_tx_desc))
3169                 return -EINVAL;
3170
3171         desc = txq->tx_tail + offset;
3172         /* go to next desc that has the RS bit */
3173         desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3174                 txq->tx_rs_thresh;
3175         if (desc >= txq->nb_tx_desc) {
3176                 desc -= txq->nb_tx_desc;
3177                 if (desc >= txq->nb_tx_desc)
3178                         desc -= txq->nb_tx_desc;
3179         }
3180
3181         status = &txq->tx_ring[desc].wb.status;
3182         if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3183                 return RTE_ETH_TX_DESC_DONE;
3184
3185         return RTE_ETH_TX_DESC_FULL;
3186 }
3187
3188 /*
3189  * Set up link loopback for X540/X550 mode Tx->Rx.
3190  */
3191 static inline void __attribute__((cold))
3192 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3193 {
3194         uint32_t macc;
3195         PMD_INIT_FUNC_TRACE();
3196
3197         u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3198
3199         hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3200                              IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3201         macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3202
3203         if (enable) {
3204                 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3205                 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3206                 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3207                 macc |= IXGBE_MACC_FLU;
3208         } else {
3209                 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3210                 macc &= ~IXGBE_MACC_FLU;
3211         }
3212
3213         hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3214                               IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3215
3216         IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3217 }
3218
3219 void __attribute__((cold))
3220 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3221 {
3222         unsigned i;
3223         struct ixgbe_adapter *adapter =
3224                 (struct ixgbe_adapter *)dev->data->dev_private;
3225         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3226
3227         PMD_INIT_FUNC_TRACE();
3228
3229         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3230                 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3231
3232                 if (txq != NULL) {
3233                         txq->ops->release_mbufs(txq);
3234                         txq->ops->reset(txq);
3235                 }
3236         }
3237
3238         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3239                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3240
3241                 if (rxq != NULL) {
3242                         ixgbe_rx_queue_release_mbufs(rxq);
3243                         ixgbe_reset_rx_queue(adapter, rxq);
3244                 }
3245         }
3246         /* If loopback mode was enabled, reconfigure the link accordingly */
3247         if (dev->data->dev_conf.lpbk_mode != 0) {
3248                 if (hw->mac.type == ixgbe_mac_X540 ||
3249                      hw->mac.type == ixgbe_mac_X550 ||
3250                      hw->mac.type == ixgbe_mac_X550EM_x ||
3251                      hw->mac.type == ixgbe_mac_X550EM_a)
3252                         ixgbe_setup_loopback_link_x540_x550(hw, false);
3253         }
3254 }
3255
3256 void
3257 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3258 {
3259         unsigned i;
3260
3261         PMD_INIT_FUNC_TRACE();
3262
3263         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3264                 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3265                 dev->data->rx_queues[i] = NULL;
3266         }
3267         dev->data->nb_rx_queues = 0;
3268
3269         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3270                 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3271                 dev->data->tx_queues[i] = NULL;
3272         }
3273         dev->data->nb_tx_queues = 0;
3274 }
3275
3276 /*********************************************************************
3277  *
3278  *  Device RX/TX init functions
3279  *
3280  **********************************************************************/
3281
3282 /**
3283  * Receive Side Scaling (RSS)
3284  * See section 7.1.2.8 in the following document:
3285  *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3286  *
3287  * Principles:
3288  * The source and destination IP addresses of the IP header and the source
3289  * and destination ports of TCP/UDP headers, if any, of received packets are
3290  * hashed against a configurable random key to compute a 32-bit RSS hash result.
3291  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3292  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
3293  * RSS output index which is used as the RX queue index where to store the
3294  * received packets.
3295  * The following output is supplied in the RX write-back descriptor:
3296  *     - 32-bit result of the Microsoft RSS hash function,
3297  *     - 4-bit RSS type field.
3298  */
3299
3300 /*
3301  * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3302  * Used as the default key.
3303  */
3304 static uint8_t rss_intel_key[40] = {
3305         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3306         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3307         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3308         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3309         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3310 };
3311
3312 static void
3313 ixgbe_rss_disable(struct rte_eth_dev *dev)
3314 {
3315         struct ixgbe_hw *hw;
3316         uint32_t mrqc;
3317         uint32_t mrqc_reg;
3318
3319         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3320         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3321         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3322         mrqc &= ~IXGBE_MRQC_RSSEN;
3323         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3324 }
3325
3326 static void
3327 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3328 {
3329         uint8_t  *hash_key;
3330         uint32_t mrqc;
3331         uint32_t rss_key;
3332         uint64_t rss_hf;
3333         uint16_t i;
3334         uint32_t mrqc_reg;
3335         uint32_t rssrk_reg;
3336
3337         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3338         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3339
3340         hash_key = rss_conf->rss_key;
3341         if (hash_key != NULL) {
3342                 /* Fill in RSS hash key */
3343                 for (i = 0; i < 10; i++) {
3344                         rss_key  = hash_key[(i * 4)];
3345                         rss_key |= hash_key[(i * 4) + 1] << 8;
3346                         rss_key |= hash_key[(i * 4) + 2] << 16;
3347                         rss_key |= hash_key[(i * 4) + 3] << 24;
3348                         IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3349                 }
3350         }
3351
3352         /* Set configured hashing protocols in MRQC register */
3353         rss_hf = rss_conf->rss_hf;
3354         mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3355         if (rss_hf & ETH_RSS_IPV4)
3356                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3357         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3358                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3359         if (rss_hf & ETH_RSS_IPV6)
3360                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3361         if (rss_hf & ETH_RSS_IPV6_EX)
3362                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3363         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3364                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3365         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3366                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3367         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3368                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3369         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3370                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3371         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3372                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3373         IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3374 }
3375
3376 int
3377 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3378                           struct rte_eth_rss_conf *rss_conf)
3379 {
3380         struct ixgbe_hw *hw;
3381         uint32_t mrqc;
3382         uint64_t rss_hf;
3383         uint32_t mrqc_reg;
3384
3385         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3386
3387         if (!ixgbe_rss_update_sp(hw->mac.type)) {
3388                 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3389                         "NIC.");
3390                 return -ENOTSUP;
3391         }
3392         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3393
3394         /*
3395          * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3396          *     "RSS enabling cannot be done dynamically while it must be
3397          *      preceded by a software reset"
3398          * Before changing anything, first check that the update RSS operation
3399          * does not attempt to disable RSS, if RSS was enabled at
3400          * initialization time, or does not attempt to enable RSS, if RSS was
3401          * disabled at initialization time.
3402          */
3403         rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3404         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3405         if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3406                 if (rss_hf != 0) /* Enable RSS */
3407                         return -(EINVAL);
3408                 return 0; /* Nothing to do */
3409         }
3410         /* RSS enabled */
3411         if (rss_hf == 0) /* Disable RSS */
3412                 return -(EINVAL);
3413         ixgbe_hw_rss_hash_set(hw, rss_conf);
3414         return 0;
3415 }
3416
3417 int
3418 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3419                             struct rte_eth_rss_conf *rss_conf)
3420 {
3421         struct ixgbe_hw *hw;
3422         uint8_t *hash_key;
3423         uint32_t mrqc;
3424         uint32_t rss_key;
3425         uint64_t rss_hf;
3426         uint16_t i;
3427         uint32_t mrqc_reg;
3428         uint32_t rssrk_reg;
3429
3430         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3431         mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3432         rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3433         hash_key = rss_conf->rss_key;
3434         if (hash_key != NULL) {
3435                 /* Return RSS hash key */
3436                 for (i = 0; i < 10; i++) {
3437                         rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3438                         hash_key[(i * 4)] = rss_key & 0x000000FF;
3439                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3440                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3441                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3442                 }
3443         }
3444
3445         /* Get RSS functions configured in MRQC register */
3446         mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3447         if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3448                 rss_conf->rss_hf = 0;
3449                 return 0;
3450         }
3451         rss_hf = 0;
3452         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3453                 rss_hf |= ETH_RSS_IPV4;
3454         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3455                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3456         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3457                 rss_hf |= ETH_RSS_IPV6;
3458         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3459                 rss_hf |= ETH_RSS_IPV6_EX;
3460         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3461                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3462         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3463                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3464         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3465                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3466         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3467                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3468         if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3469                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3470         rss_conf->rss_hf = rss_hf;
3471         return 0;
3472 }
3473
3474 static void
3475 ixgbe_rss_configure(struct rte_eth_dev *dev)
3476 {
3477         struct rte_eth_rss_conf rss_conf;
3478         struct ixgbe_adapter *adapter;
3479         struct ixgbe_hw *hw;
3480         uint32_t reta;
3481         uint16_t i;
3482         uint16_t j;
3483         uint16_t sp_reta_size;
3484         uint32_t reta_reg;
3485
3486         PMD_INIT_FUNC_TRACE();
3487         adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3488         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3489
3490         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3491
3492         /*
3493          * Fill in redirection table
3494          * The byte-swap is needed because NIC registers are in
3495          * little-endian order.
3496          */
3497         if (adapter->rss_reta_updated == 0) {
3498                 reta = 0;
3499                 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3500                         reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3501
3502                         if (j == dev->data->nb_rx_queues)
3503                                 j = 0;
3504                         reta = (reta << 8) | j;
3505                         if ((i & 3) == 3)
3506                                 IXGBE_WRITE_REG(hw, reta_reg,
3507                                                 rte_bswap32(reta));
3508                 }
3509         }
3510
3511         /*
3512          * Configure the RSS key and the RSS protocols used to compute
3513          * the RSS hash of input packets.
3514          */
3515         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3516         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3517                 ixgbe_rss_disable(dev);
3518                 return;
3519         }
3520         if (rss_conf.rss_key == NULL)
3521                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3522         ixgbe_hw_rss_hash_set(hw, &rss_conf);
3523 }
3524
3525 #define NUM_VFTA_REGISTERS 128
3526 #define NIC_RX_BUFFER_SIZE 0x200
3527 #define X550_RX_BUFFER_SIZE 0x180
3528
3529 static void
3530 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3531 {
3532         struct rte_eth_vmdq_dcb_conf *cfg;
3533         struct ixgbe_hw *hw;
3534         enum rte_eth_nb_pools num_pools;
3535         uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3536         uint16_t pbsize;
3537         uint8_t nb_tcs; /* number of traffic classes */
3538         int i;
3539
3540         PMD_INIT_FUNC_TRACE();
3541         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3542         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3543         num_pools = cfg->nb_queue_pools;
3544         /* Check we have a valid number of pools */
3545         if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3546                 ixgbe_rss_disable(dev);
3547                 return;
3548         }
3549         /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3550         nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3551
3552         /*
3553          * RXPBSIZE
3554          * split rx buffer up into sections, each for 1 traffic class
3555          */
3556         switch (hw->mac.type) {
3557         case ixgbe_mac_X550:
3558         case ixgbe_mac_X550EM_x:
3559         case ixgbe_mac_X550EM_a:
3560                 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3561                 break;
3562         default:
3563                 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3564                 break;
3565         }
3566         for (i = 0; i < nb_tcs; i++) {
3567                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3568
3569                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3570                 /* clear 10 bits. */
3571                 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3572                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3573         }
3574         /* zero alloc all unused TCs */
3575         for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3576                 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3577
3578                 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3579                 /* clear 10 bits. */
3580                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3581         }
3582
3583         /* MRQC: enable vmdq and dcb */
3584         mrqc = (num_pools == ETH_16_POOLS) ?
3585                 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3586         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3587
3588         /* PFVTCTL: turn on virtualisation and set the default pool */
3589         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3590         if (cfg->enable_default_pool) {
3591                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3592         } else {
3593                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3594         }
3595
3596         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3597
3598         /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3599         queue_mapping = 0;
3600         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3601                 /*
3602                  * mapping is done with 3 bits per priority,
3603                  * so shift by i*3 each time
3604                  */
3605                 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3606
3607         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3608
3609         /* RTRPCS: DCB related */
3610         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3611
3612         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3613         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3614         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3615         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3616
3617         /* VFTA - enable all vlan filters */
3618         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3619                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3620         }
3621
3622         /* VFRE: pool enabling for receive - 16 or 32 */
3623         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3624                         num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3625
3626         /*
3627          * MPSAR - allow pools to read specific mac addresses
3628          * In this case, all pools should be able to read from mac addr 0
3629          */
3630         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3631         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3632
3633         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3634         for (i = 0; i < cfg->nb_pool_maps; i++) {
3635                 /* set vlan id in VF register and set the valid bit */
3636                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3637                                 (cfg->pool_map[i].vlan_id & 0xFFF)));
3638                 /*
3639                  * Put the allowed pools in VFB reg. As we only have 16 or 32
3640                  * pools, we only need to use the first half of the register
3641                  * i.e. bits 0-31
3642                  */
3643                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3644         }
3645 }
3646
3647 /**
3648  * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3649  * @dev: pointer to eth_dev structure
3650  * @dcb_config: pointer to ixgbe_dcb_config structure
3651  */
3652 static void
3653 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3654                        struct ixgbe_dcb_config *dcb_config)
3655 {
3656         uint32_t reg;
3657         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3658
3659         PMD_INIT_FUNC_TRACE();
3660         if (hw->mac.type != ixgbe_mac_82598EB) {
3661                 /* Disable the Tx desc arbiter so that MTQC can be changed */
3662                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3663                 reg |= IXGBE_RTTDCS_ARBDIS;
3664                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3665
3666                 /* Enable DCB for Tx with 8 TCs */
3667                 if (dcb_config->num_tcs.pg_tcs == 8) {
3668                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3669                 } else {
3670                         reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3671                 }
3672                 if (dcb_config->vt_mode)
3673                         reg |= IXGBE_MTQC_VT_ENA;
3674                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3675
3676                 /* Enable the Tx desc arbiter */
3677                 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3678                 reg &= ~IXGBE_RTTDCS_ARBDIS;
3679                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3680
3681                 /* Enable Security TX Buffer IFG for DCB */
3682                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3683                 reg |= IXGBE_SECTX_DCB;
3684                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3685         }
3686 }
3687
3688 /**
3689  * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3690  * @dev: pointer to rte_eth_dev structure
3691  * @dcb_config: pointer to ixgbe_dcb_config structure
3692  */
3693 static void
3694 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3695                         struct ixgbe_dcb_config *dcb_config)
3696 {
3697         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3698                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3699         struct ixgbe_hw *hw =
3700                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3701
3702         PMD_INIT_FUNC_TRACE();
3703         if (hw->mac.type != ixgbe_mac_82598EB)
3704                 /*PF VF Transmit Enable*/
3705                 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3706                         vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3707
3708         /*Configure general DCB TX parameters*/
3709         ixgbe_dcb_tx_hw_config(dev, dcb_config);
3710 }
3711
3712 static void
3713 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3714                         struct ixgbe_dcb_config *dcb_config)
3715 {
3716         struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3717                         &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3718         struct ixgbe_dcb_tc_config *tc;
3719         uint8_t i, j;
3720
3721         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3722         if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3723                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3724                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3725         } else {
3726                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3727                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3728         }
3729
3730         /* Initialize User Priority to Traffic Class mapping */
3731         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3732                 tc = &dcb_config->tc_config[j];
3733                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3734         }
3735
3736         /* User Priority to Traffic Class mapping */
3737         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3738                 j = vmdq_rx_conf->dcb_tc[i];
3739                 tc = &dcb_config->tc_config[j];
3740                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3741                                                 (uint8_t)(1 << i);
3742         }
3743 }
3744
3745 static void
3746 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3747                         struct ixgbe_dcb_config *dcb_config)
3748 {
3749         struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3750                         &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3751         struct ixgbe_dcb_tc_config *tc;
3752         uint8_t i, j;
3753
3754         /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3755         if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3756                 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3757                 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3758         } else {
3759                 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3760                 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3761         }
3762
3763         /* Initialize User Priority to Traffic Class mapping */
3764         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3765                 tc = &dcb_config->tc_config[j];
3766                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3767         }
3768
3769         /* User Priority to Traffic Class mapping */
3770         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3771                 j = vmdq_tx_conf->dcb_tc[i];
3772                 tc = &dcb_config->tc_config[j];
3773                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3774                                                 (uint8_t)(1 << i);
3775         }
3776 }
3777
3778 static void
3779 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3780                 struct ixgbe_dcb_config *dcb_config)
3781 {
3782         struct rte_eth_dcb_rx_conf *rx_conf =
3783                         &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3784         struct ixgbe_dcb_tc_config *tc;
3785         uint8_t i, j;
3786
3787         dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3788         dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3789
3790         /* Initialize User Priority to Traffic Class mapping */
3791         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3792                 tc = &dcb_config->tc_config[j];
3793                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3794         }
3795
3796         /* User Priority to Traffic Class mapping */
3797         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3798                 j = rx_conf->dcb_tc[i];
3799                 tc = &dcb_config->tc_config[j];
3800                 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3801                                                 (uint8_t)(1 << i);
3802         }
3803 }
3804
3805 static void
3806 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3807                 struct ixgbe_dcb_config *dcb_config)
3808 {
3809         struct rte_eth_dcb_tx_conf *tx_conf =
3810                         &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3811         struct ixgbe_dcb_tc_config *tc;
3812         uint8_t i, j;
3813
3814         dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3815         dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3816
3817         /* Initialize User Priority to Traffic Class mapping */
3818         for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3819                 tc = &dcb_config->tc_config[j];
3820                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3821         }
3822
3823         /* User Priority to Traffic Class mapping */
3824         for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3825                 j = tx_conf->dcb_tc[i];
3826                 tc = &dcb_config->tc_config[j];
3827                 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3828                                                 (uint8_t)(1 << i);
3829         }
3830 }
3831
3832 /**
3833  * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3834  * @dev: pointer to eth_dev structure
3835  * @dcb_config: pointer to ixgbe_dcb_config structure
3836  */
3837 static void
3838 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3839                        struct ixgbe_dcb_config *dcb_config)
3840 {
3841         uint32_t reg;
3842         uint32_t vlanctrl;
3843         uint8_t i;
3844         uint32_t q;
3845         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3846
3847         PMD_INIT_FUNC_TRACE();
3848         /*
3849          * Disable the arbiter before changing parameters
3850          * (always enable recycle mode; WSP)
3851          */
3852         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3853         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3854
3855         if (hw->mac.type != ixgbe_mac_82598EB) {
3856                 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3857                 if (dcb_config->num_tcs.pg_tcs == 4) {
3858                         if (dcb_config->vt_mode)
3859                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3860                                         IXGBE_MRQC_VMDQRT4TCEN;
3861                         else {
3862                                 /* no matter the mode is DCB or DCB_RSS, just
3863                                  * set the MRQE to RSSXTCEN. RSS is controlled
3864                                  * by RSS_FIELD
3865                                  */
3866                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3867                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3868                                         IXGBE_MRQC_RTRSS4TCEN;
3869                         }
3870                 }
3871                 if (dcb_config->num_tcs.pg_tcs == 8) {
3872                         if (dcb_config->vt_mode)
3873                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3874                                         IXGBE_MRQC_VMDQRT8TCEN;
3875                         else {
3876                                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3877                                 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3878                                         IXGBE_MRQC_RTRSS8TCEN;
3879                         }
3880                 }
3881
3882                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3883
3884                 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3885                         /* Disable drop for all queues in VMDQ mode*/
3886                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3887                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3888                                                 (IXGBE_QDE_WRITE |
3889                                                  (q << IXGBE_QDE_IDX_SHIFT)));
3890                 } else {
3891                         /* Enable drop for all queues in SRIOV mode */
3892                         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3893                                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3894                                                 (IXGBE_QDE_WRITE |
3895                                                  (q << IXGBE_QDE_IDX_SHIFT) |
3896                                                  IXGBE_QDE_ENABLE));
3897                 }
3898         }
3899
3900         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3901         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3902         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3903         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3904
3905         /* VFTA - enable all vlan filters */
3906         for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3907                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3908         }
3909
3910         /*
3911          * Configure Rx packet plane (recycle mode; WSP) and
3912          * enable arbiter
3913          */
3914         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3915         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3916 }
3917
3918 static void
3919 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3920                         uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3921 {
3922         switch (hw->mac.type) {
3923         case ixgbe_mac_82598EB:
3924                 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3925                 break;
3926         case ixgbe_mac_82599EB:
3927         case ixgbe_mac_X540:
3928         case ixgbe_mac_X550:
3929         case ixgbe_mac_X550EM_x:
3930         case ixgbe_mac_X550EM_a:
3931                 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3932                                                   tsa, map);
3933                 break;
3934         default:
3935                 break;
3936         }
3937 }
3938
3939 static void
3940 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3941                             uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3942 {
3943         switch (hw->mac.type) {
3944         case ixgbe_mac_82598EB:
3945                 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3946                 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3947                 break;
3948         case ixgbe_mac_82599EB:
3949         case ixgbe_mac_X540:
3950         case ixgbe_mac_X550:
3951         case ixgbe_mac_X550EM_x:
3952         case ixgbe_mac_X550EM_a:
3953                 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3954                 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3955                 break;
3956         default:
3957                 break;
3958         }
3959 }
3960
3961 #define DCB_RX_CONFIG  1
3962 #define DCB_TX_CONFIG  1
3963 #define DCB_TX_PB      1024
3964 /**
3965  * ixgbe_dcb_hw_configure - Enable DCB and configure
3966  * general DCB in VT mode and non-VT mode parameters
3967  * @dev: pointer to rte_eth_dev structure
3968  * @dcb_config: pointer to ixgbe_dcb_config structure
3969  */
3970 static int
3971 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3972                         struct ixgbe_dcb_config *dcb_config)
3973 {
3974         int     ret = 0;
3975         uint8_t i, pfc_en, nb_tcs;
3976         uint16_t pbsize, rx_buffer_size;
3977         uint8_t config_dcb_rx = 0;
3978         uint8_t config_dcb_tx = 0;
3979         uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3980         uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981         uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982         uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3983         uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3984         struct ixgbe_dcb_tc_config *tc;
3985         uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3986                 RTE_ETHER_CRC_LEN;
3987         struct ixgbe_hw *hw =
3988                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3989         struct ixgbe_bw_conf *bw_conf =
3990                 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3991
3992         switch (dev->data->dev_conf.rxmode.mq_mode) {
3993         case ETH_MQ_RX_VMDQ_DCB:
3994                 dcb_config->vt_mode = true;
3995                 if (hw->mac.type != ixgbe_mac_82598EB) {
3996                         config_dcb_rx = DCB_RX_CONFIG;
3997                         /*
3998                          *get dcb and VT rx configuration parameters
3999                          *from rte_eth_conf
4000                          */
4001                         ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4002                         /*Configure general VMDQ and DCB RX parameters*/
4003                         ixgbe_vmdq_dcb_configure(dev);
4004                 }
4005                 break;
4006         case ETH_MQ_RX_DCB:
4007         case ETH_MQ_RX_DCB_RSS:
4008                 dcb_config->vt_mode = false;
4009                 config_dcb_rx = DCB_RX_CONFIG;
4010                 /* Get dcb TX configuration parameters from rte_eth_conf */
4011                 ixgbe_dcb_rx_config(dev, dcb_config);
4012                 /*Configure general DCB RX parameters*/
4013                 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4014                 break;
4015         default:
4016                 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4017                 break;
4018         }
4019         switch (dev->data->dev_conf.txmode.mq_mode) {
4020         case ETH_MQ_TX_VMDQ_DCB:
4021                 dcb_config->vt_mode = true;
4022                 config_dcb_tx = DCB_TX_CONFIG;
4023                 /* get DCB and VT TX configuration parameters
4024                  * from rte_eth_conf
4025                  */
4026                 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4027                 /*Configure general VMDQ and DCB TX parameters*/
4028                 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4029                 break;
4030
4031         case ETH_MQ_TX_DCB:
4032                 dcb_config->vt_mode = false;
4033                 config_dcb_tx = DCB_TX_CONFIG;
4034                 /*get DCB TX configuration parameters from rte_eth_conf*/
4035                 ixgbe_dcb_tx_config(dev, dcb_config);
4036                 /*Configure general DCB TX parameters*/
4037                 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4038                 break;
4039         default:
4040                 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4041                 break;
4042         }
4043
4044         nb_tcs = dcb_config->num_tcs.pfc_tcs;
4045         /* Unpack map */
4046         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4047         if (nb_tcs == ETH_4_TCS) {
4048                 /* Avoid un-configured priority mapping to TC0 */
4049                 uint8_t j = 4;
4050                 uint8_t mask = 0xFF;
4051
4052                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4053                         mask = (uint8_t)(mask & (~(1 << map[i])));
4054                 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4055                         if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4056                                 map[j++] = i;
4057                         mask >>= 1;
4058                 }
4059                 /* Re-configure 4 TCs BW */
4060                 for (i = 0; i < nb_tcs; i++) {
4061                         tc = &dcb_config->tc_config[i];
4062                         if (bw_conf->tc_num != nb_tcs)
4063                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4064                                         (uint8_t)(100 / nb_tcs);
4065                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4066                                                 (uint8_t)(100 / nb_tcs);
4067                 }
4068                 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4069                         tc = &dcb_config->tc_config[i];
4070                         tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4071                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4072                 }
4073         } else {
4074                 /* Re-configure 8 TCs BW */
4075                 for (i = 0; i < nb_tcs; i++) {
4076                         tc = &dcb_config->tc_config[i];
4077                         if (bw_conf->tc_num != nb_tcs)
4078                                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4079                                         (uint8_t)(100 / nb_tcs + (i & 1));
4080                         tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4081                                 (uint8_t)(100 / nb_tcs + (i & 1));
4082                 }
4083         }
4084
4085         switch (hw->mac.type) {
4086         case ixgbe_mac_X550:
4087         case ixgbe_mac_X550EM_x:
4088         case ixgbe_mac_X550EM_a:
4089                 rx_buffer_size = X550_RX_BUFFER_SIZE;
4090                 break;
4091         default:
4092                 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4093                 break;
4094         }
4095
4096         if (config_dcb_rx) {
4097                 /* Set RX buffer size */
4098                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4099                 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4100
4101                 for (i = 0; i < nb_tcs; i++) {
4102                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4103                 }
4104                 /* zero alloc all unused TCs */
4105                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4106                         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4107                 }
4108         }
4109         if (config_dcb_tx) {
4110                 /* Only support an equally distributed
4111                  *  Tx packet buffer strategy.
4112                  */
4113                 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4114                 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4115
4116                 for (i = 0; i < nb_tcs; i++) {
4117                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4118                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4119                 }
4120                 /* Clear unused TCs, if any, to zero buffer size*/
4121                 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4122                         IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4123                         IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4124                 }
4125         }
4126
4127         /*Calculates traffic class credits*/
4128         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4129                                 IXGBE_DCB_TX_CONFIG);
4130         ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4131                                 IXGBE_DCB_RX_CONFIG);
4132
4133         if (config_dcb_rx) {
4134                 /* Unpack CEE standard containers */
4135                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4136                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4137                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4138                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4139                 /* Configure PG(ETS) RX */
4140                 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4141         }
4142
4143         if (config_dcb_tx) {
4144                 /* Unpack CEE standard containers */
4145                 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4146                 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4147                 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4148                 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4149                 /* Configure PG(ETS) TX */
4150                 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4151         }
4152
4153         /*Configure queue statistics registers*/
4154         ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4155
4156         /* Check if the PFC is supported */
4157         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4158                 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4159                 for (i = 0; i < nb_tcs; i++) {
4160                         /*
4161                         * If the TC count is 8,and the default high_water is 48,
4162                         * the low_water is 16 as default.
4163                         */
4164                         hw->fc.high_water[i] = (pbsize * 3) / 4;
4165                         hw->fc.low_water[i] = pbsize / 4;
4166                         /* Enable pfc for this TC */
4167                         tc = &dcb_config->tc_config[i];
4168                         tc->pfc = ixgbe_dcb_pfc_enabled;
4169                 }
4170                 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4171                 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4172                         pfc_en &= 0x0F;
4173                 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4174         }
4175
4176         return ret;
4177 }
4178
4179 /**
4180  * ixgbe_configure_dcb - Configure DCB  Hardware
4181  * @dev: pointer to rte_eth_dev
4182  */
4183 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4184 {
4185         struct ixgbe_dcb_config *dcb_cfg =
4186                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4187         struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4188
4189         PMD_INIT_FUNC_TRACE();
4190
4191         /* check support mq_mode for DCB */
4192         if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4193             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4194             (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4195                 return;
4196
4197         if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4198                 return;
4199
4200         /** Configure DCB hardware **/
4201         ixgbe_dcb_hw_configure(dev, dcb_cfg);
4202 }
4203
4204 /*
4205  * VMDq only support for 10 GbE NIC.
4206  */
4207 static void
4208 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4209 {
4210         struct rte_eth_vmdq_rx_conf *cfg;
4211         struct ixgbe_hw *hw;
4212         enum rte_eth_nb_pools num_pools;
4213         uint32_t mrqc, vt_ctl, vlanctrl;
4214         uint32_t vmolr = 0;
4215         int i;
4216
4217         PMD_INIT_FUNC_TRACE();
4218         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4219         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4220         num_pools = cfg->nb_queue_pools;
4221
4222         ixgbe_rss_disable(dev);
4223
4224         /* MRQC: enable vmdq */
4225         mrqc = IXGBE_MRQC_VMDQEN;
4226         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4227
4228         /* PFVTCTL: turn on virtualisation and set the default pool */
4229         vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4230         if (cfg->enable_default_pool)
4231                 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4232         else
4233                 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4234
4235         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4236
4237         for (i = 0; i < (int)num_pools; i++) {
4238                 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4239                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4240         }
4241
4242         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4243         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4244         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4245         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4246
4247         /* VFTA - enable all vlan filters */
4248         for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4249                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4250
4251         /* VFRE: pool enabling for receive - 64 */
4252         IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4253         if (num_pools == ETH_64_POOLS)
4254                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4255
4256         /*
4257          * MPSAR - allow pools to read specific mac addresses
4258          * In this case, all pools should be able to read from mac addr 0
4259          */
4260         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4261         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4262
4263         /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4264         for (i = 0; i < cfg->nb_pool_maps; i++) {
4265                 /* set vlan id in VF register and set the valid bit */
4266                 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4267                                 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4268                 /*
4269                  * Put the allowed pools in VFB reg. As we only have 16 or 64
4270                  * pools, we only need to use the first half of the register
4271                  * i.e. bits 0-31
4272                  */
4273                 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4274                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4275                                         (cfg->pool_map[i].pools & UINT32_MAX));
4276                 else
4277                         IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4278                                         ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4279
4280         }
4281
4282         /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4283         if (cfg->enable_loop_back) {
4284                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4285                 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4286                         IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4287         }
4288
4289         IXGBE_WRITE_FLUSH(hw);
4290 }
4291
4292 /*
4293  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4294  * @hw: pointer to hardware structure
4295  */
4296 static void
4297 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4298 {
4299         uint32_t reg;
4300         uint32_t q;
4301
4302         PMD_INIT_FUNC_TRACE();
4303         /*PF VF Transmit Enable*/
4304         IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4305         IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4306
4307         /* Disable the Tx desc arbiter so that MTQC can be changed */
4308         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4309         reg |= IXGBE_RTTDCS_ARBDIS;
4310         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4311
4312         reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4313         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4314
4315         /* Disable drop for all queues */
4316         for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4317                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4318                   (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4319
4320         /* Enable the Tx desc arbiter */
4321         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4322         reg &= ~IXGBE_RTTDCS_ARBDIS;
4323         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4324
4325         IXGBE_WRITE_FLUSH(hw);
4326 }
4327
4328 static int __attribute__((cold))
4329 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4330 {
4331         struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4332         uint64_t dma_addr;
4333         unsigned int i;
4334
4335         /* Initialize software ring entries */
4336         for (i = 0; i < rxq->nb_rx_desc; i++) {
4337                 volatile union ixgbe_adv_rx_desc *rxd;
4338                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4339
4340                 if (mbuf == NULL) {
4341                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4342                                      (unsigned) rxq->queue_id);
4343                         return -ENOMEM;
4344                 }
4345
4346                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4347                 mbuf->port = rxq->port_id;
4348
4349                 dma_addr =
4350                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4351                 rxd = &rxq->rx_ring[i];
4352                 rxd->read.hdr_addr = 0;
4353                 rxd->read.pkt_addr = dma_addr;
4354                 rxe[i].mbuf = mbuf;
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int
4361 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4362 {
4363         struct ixgbe_hw *hw;
4364         uint32_t mrqc;
4365
4366         ixgbe_rss_configure(dev);
4367
4368         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4369
4370         /* MRQC: enable VF RSS */
4371         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4372         mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4373         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4374         case ETH_64_POOLS:
4375                 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4376                 break;
4377
4378         case ETH_32_POOLS:
4379                 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4380                 break;
4381
4382         default:
4383                 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4384                 return -EINVAL;
4385         }
4386
4387         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4388
4389         return 0;
4390 }
4391
4392 static int
4393 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4394 {
4395         struct ixgbe_hw *hw =
4396                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4397
4398         switch (RTE_ETH_DEV_SRIOV(dev).active) {
4399         case ETH_64_POOLS:
4400                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4401                         IXGBE_MRQC_VMDQEN);
4402                 break;
4403
4404         case ETH_32_POOLS:
4405                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4406                         IXGBE_MRQC_VMDQRT4TCEN);
4407                 break;
4408
4409         case ETH_16_POOLS:
4410                 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4411                         IXGBE_MRQC_VMDQRT8TCEN);
4412                 break;
4413         default:
4414                 PMD_INIT_LOG(ERR,
4415                         "invalid pool number in IOV mode");
4416                 break;
4417         }
4418         return 0;
4419 }
4420
4421 static int
4422 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4423 {
4424         struct ixgbe_hw *hw =
4425                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4426
4427         if (hw->mac.type == ixgbe_mac_82598EB)
4428                 return 0;
4429
4430         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4431                 /*
4432                  * SRIOV inactive scheme
4433                  * any DCB/RSS w/o VMDq multi-queue setting
4434                  */
4435                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4436                 case ETH_MQ_RX_RSS:
4437                 case ETH_MQ_RX_DCB_RSS:
4438                 case ETH_MQ_RX_VMDQ_RSS:
4439                         ixgbe_rss_configure(dev);
4440                         break;
4441
4442                 case ETH_MQ_RX_VMDQ_DCB:
4443                         ixgbe_vmdq_dcb_configure(dev);
4444                         break;
4445
4446                 case ETH_MQ_RX_VMDQ_ONLY:
4447                         ixgbe_vmdq_rx_hw_configure(dev);
4448                         break;
4449
4450                 case ETH_MQ_RX_NONE:
4451                 default:
4452                         /* if mq_mode is none, disable rss mode.*/
4453                         ixgbe_rss_disable(dev);
4454                         break;
4455                 }
4456         } else {
4457                 /* SRIOV active scheme
4458                  * Support RSS together with SRIOV.
4459                  */
4460                 switch (dev->data->dev_conf.rxmode.mq_mode) {
4461                 case ETH_MQ_RX_RSS:
4462                 case ETH_MQ_RX_VMDQ_RSS:
4463                         ixgbe_config_vf_rss(dev);
4464                         break;
4465                 case ETH_MQ_RX_VMDQ_DCB:
4466                 case ETH_MQ_RX_DCB:
4467                 /* In SRIOV, the configuration is the same as VMDq case */
4468                         ixgbe_vmdq_dcb_configure(dev);
4469                         break;
4470                 /* DCB/RSS together with SRIOV is not supported */
4471                 case ETH_MQ_RX_VMDQ_DCB_RSS:
4472                 case ETH_MQ_RX_DCB_RSS:
4473                         PMD_INIT_LOG(ERR,
4474                                 "Could not support DCB/RSS with VMDq & SRIOV");
4475                         return -1;
4476                 default:
4477                         ixgbe_config_vf_default(dev);
4478                         break;
4479                 }
4480         }
4481
4482         return 0;
4483 }
4484
4485 static int
4486 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4487 {
4488         struct ixgbe_hw *hw =
4489                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4490         uint32_t mtqc;
4491         uint32_t rttdcs;
4492
4493         if (hw->mac.type == ixgbe_mac_82598EB)
4494                 return 0;
4495
4496         /* disable arbiter before setting MTQC */
4497         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4498         rttdcs |= IXGBE_RTTDCS_ARBDIS;
4499         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4500
4501         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4502                 /*
4503                  * SRIOV inactive scheme
4504                  * any DCB w/o VMDq multi-queue setting
4505                  */
4506                 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4507                         ixgbe_vmdq_tx_hw_configure(hw);
4508                 else {
4509                         mtqc = IXGBE_MTQC_64Q_1PB;
4510                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4511                 }
4512         } else {
4513                 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4514
4515                 /*
4516                  * SRIOV active scheme
4517                  * FIXME if support DCB together with VMDq & SRIOV
4518                  */
4519                 case ETH_64_POOLS:
4520                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4521                         break;
4522                 case ETH_32_POOLS:
4523                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4524                         break;
4525                 case ETH_16_POOLS:
4526                         mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4527                                 IXGBE_MTQC_8TC_8TQ;
4528                         break;
4529                 default:
4530                         mtqc = IXGBE_MTQC_64Q_1PB;
4531                         PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4532                 }
4533                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4534         }
4535
4536         /* re-enable arbiter */
4537         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4538         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4539
4540         return 0;
4541 }
4542
4543 /**
4544  * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4545  *
4546  * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4547  * spec rev. 3.0 chapter 8.2.3.8.13.
4548  *
4549  * @pool Memory pool of the Rx queue
4550  */
4551 static inline uint32_t
4552 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4553 {
4554         struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4555
4556         /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4557         uint16_t maxdesc =
4558                 RTE_IPV4_MAX_PKT_LEN /
4559                         (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4560
4561         if (maxdesc >= 16)
4562                 return IXGBE_RSCCTL_MAXDESC_16;
4563         else if (maxdesc >= 8)
4564                 return IXGBE_RSCCTL_MAXDESC_8;
4565         else if (maxdesc >= 4)
4566                 return IXGBE_RSCCTL_MAXDESC_4;
4567         else
4568                 return IXGBE_RSCCTL_MAXDESC_1;
4569 }
4570
4571 /**
4572  * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4573  * interrupt
4574  *
4575  * (Taken from FreeBSD tree)
4576  * (yes this is all very magic and confusing :)
4577  *
4578  * @dev port handle
4579  * @entry the register array entry
4580  * @vector the MSIX vector for this queue
4581  * @type RX/TX/MISC
4582  */
4583 static void
4584 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4585 {
4586         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4587         u32 ivar, index;
4588
4589         vector |= IXGBE_IVAR_ALLOC_VAL;
4590
4591         switch (hw->mac.type) {
4592
4593         case ixgbe_mac_82598EB:
4594                 if (type == -1)
4595                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4596                 else
4597                         entry += (type * 64);
4598                 index = (entry >> 2) & 0x1F;
4599                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4600                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4601                 ivar |= (vector << (8 * (entry & 0x3)));
4602                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4603                 break;
4604
4605         case ixgbe_mac_82599EB:
4606         case ixgbe_mac_X540:
4607                 if (type == -1) { /* MISC IVAR */
4608                         index = (entry & 1) * 8;
4609                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4610                         ivar &= ~(0xFF << index);
4611                         ivar |= (vector << index);
4612                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4613                 } else {        /* RX/TX IVARS */
4614                         index = (16 * (entry & 1)) + (8 * type);
4615                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4616                         ivar &= ~(0xFF << index);
4617                         ivar |= (vector << index);
4618                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4619                 }
4620
4621                 break;
4622
4623         default:
4624                 break;
4625         }
4626 }
4627
4628 void __attribute__((cold))
4629 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4630 {
4631         uint16_t i, rx_using_sse;
4632         struct ixgbe_adapter *adapter =
4633                 (struct ixgbe_adapter *)dev->data->dev_private;
4634
4635         /*
4636          * In order to allow Vector Rx there are a few configuration
4637          * conditions to be met and Rx Bulk Allocation should be allowed.
4638          */
4639         if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4640             !adapter->rx_bulk_alloc_allowed) {
4641                 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4642                                     "preconditions or RTE_IXGBE_INC_VECTOR is "
4643                                     "not enabled",
4644                              dev->data->port_id);
4645
4646                 adapter->rx_vec_allowed = false;
4647         }
4648
4649         /*
4650          * Initialize the appropriate LRO callback.
4651          *
4652          * If all queues satisfy the bulk allocation preconditions
4653          * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4654          * Otherwise use a single allocation version.
4655          */
4656         if (dev->data->lro) {
4657                 if (adapter->rx_bulk_alloc_allowed) {
4658                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4659                                            "allocation version");
4660                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4661                 } else {
4662                         PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4663                                            "allocation version");
4664                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4665                 }
4666         } else if (dev->data->scattered_rx) {
4667                 /*
4668                  * Set the non-LRO scattered callback: there are Vector and
4669                  * single allocation versions.
4670                  */
4671                 if (adapter->rx_vec_allowed) {
4672                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4673                                             "callback (port=%d).",
4674                                      dev->data->port_id);
4675
4676                         dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4677                 } else if (adapter->rx_bulk_alloc_allowed) {
4678                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4679                                            "allocation callback (port=%d).",
4680                                      dev->data->port_id);
4681                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4682                 } else {
4683                         PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4684                                             "single allocation) "
4685                                             "Scattered Rx callback "
4686                                             "(port=%d).",
4687                                      dev->data->port_id);
4688
4689                         dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4690                 }
4691         /*
4692          * Below we set "simple" callbacks according to port/queues parameters.
4693          * If parameters allow we are going to choose between the following
4694          * callbacks:
4695          *    - Vector
4696          *    - Bulk Allocation
4697          *    - Single buffer allocation (the simplest one)
4698          */
4699         } else if (adapter->rx_vec_allowed) {
4700                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4701                                     "burst size no less than %d (port=%d).",
4702                              RTE_IXGBE_DESCS_PER_LOOP,
4703                              dev->data->port_id);
4704
4705                 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4706         } else if (adapter->rx_bulk_alloc_allowed) {
4707                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4708                                     "satisfied. Rx Burst Bulk Alloc function "
4709                                     "will be used on port=%d.",
4710                              dev->data->port_id);
4711
4712                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4713         } else {
4714                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4715                                     "satisfied, or Scattered Rx is requested "
4716                                     "(port=%d).",
4717                              dev->data->port_id);
4718
4719                 dev->rx_pkt_burst = ixgbe_recv_pkts;
4720         }
4721
4722         /* Propagate information about RX function choice through all queues. */
4723
4724         rx_using_sse =
4725                 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4726                 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4727
4728         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4729                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4730
4731                 rxq->rx_using_sse = rx_using_sse;
4732 #ifdef RTE_LIBRTE_SECURITY
4733                 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4734                                 DEV_RX_OFFLOAD_SECURITY);
4735 #endif
4736         }
4737 }
4738
4739 /**
4740  * ixgbe_set_rsc - configure RSC related port HW registers
4741  *
4742  * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4743  * of 82599 Spec (x540 configuration is virtually the same).
4744  *
4745  * @dev port handle
4746  *
4747  * Returns 0 in case of success or a non-zero error code
4748  */
4749 static int
4750 ixgbe_set_rsc(struct rte_eth_dev *dev)
4751 {
4752         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4753         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4754         struct rte_eth_dev_info dev_info = { 0 };
4755         bool rsc_capable = false;
4756         uint16_t i;
4757         uint32_t rdrxctl;
4758         uint32_t rfctl;
4759
4760         /* Sanity check */
4761         dev->dev_ops->dev_infos_get(dev, &dev_info);
4762         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4763                 rsc_capable = true;
4764
4765         if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4766                 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4767                                    "support it");
4768                 return -EINVAL;
4769         }
4770
4771         /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4772
4773         if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4774              (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4775                 /*
4776                  * According to chapter of 4.6.7.2.1 of the Spec Rev.
4777                  * 3.0 RSC configuration requires HW CRC stripping being
4778                  * enabled. If user requested both HW CRC stripping off
4779                  * and RSC on - return an error.
4780                  */
4781                 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4782                                     "is disabled");
4783                 return -EINVAL;
4784         }
4785
4786         /* RFCTL configuration  */
4787         rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4788         if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4789                 /*
4790                  * Since NFS packets coalescing is not supported - clear
4791                  * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4792                  * enabled.
4793                  */
4794                 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4795                            IXGBE_RFCTL_NFSR_DIS);
4796         else
4797                 rfctl |= IXGBE_RFCTL_RSC_DIS;
4798         IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4799
4800         /* If LRO hasn't been requested - we are done here. */
4801         if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4802                 return 0;
4803
4804         /* Set RDRXCTL.RSCACKC bit */
4805         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4806         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4807         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4808
4809         /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4810         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4811                 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4812                 uint32_t srrctl =
4813                         IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4814                 uint32_t rscctl =
4815                         IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4816                 uint32_t psrtype =
4817                         IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4818                 uint32_t eitr =
4819                         IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4820
4821                 /*
4822                  * ixgbe PMD doesn't support header-split at the moment.
4823                  *
4824                  * Following the 4.6.7.2.1 chapter of the 82599/x540
4825                  * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4826                  * should be configured even if header split is not
4827                  * enabled. We will configure it 128 bytes following the
4828                  * recommendation in the spec.
4829                  */
4830                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4831                 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4832                                             IXGBE_SRRCTL_BSIZEHDR_MASK;
4833
4834                 /*
4835                  * TODO: Consider setting the Receive Descriptor Minimum
4836                  * Threshold Size for an RSC case. This is not an obviously
4837                  * beneficiary option but the one worth considering...
4838                  */
4839
4840                 rscctl |= IXGBE_RSCCTL_RSCEN;
4841                 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4842                 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4843
4844                 /*
4845                  * RSC: Set ITR interval corresponding to 2K ints/s.
4846                  *
4847                  * Full-sized RSC aggregations for a 10Gb/s link will
4848                  * arrive at about 20K aggregation/s rate.
4849                  *
4850                  * 2K inst/s rate will make only 10% of the
4851                  * aggregations to be closed due to the interrupt timer
4852                  * expiration for a streaming at wire-speed case.
4853                  *
4854                  * For a sparse streaming case this setting will yield
4855                  * at most 500us latency for a single RSC aggregation.
4856                  */
4857                 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4858                 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4859                 eitr |= IXGBE_EITR_CNT_WDIS;
4860
4861                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4862                 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4863                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4864                 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4865
4866                 /*
4867                  * RSC requires the mapping of the queue to the
4868                  * interrupt vector.
4869                  */
4870                 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4871         }
4872
4873         dev->data->lro = 1;
4874
4875         PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4876
4877         return 0;
4878 }
4879
4880 /*
4881  * Initializes Receive Unit.
4882  */
4883 int __attribute__((cold))
4884 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4885 {
4886         struct ixgbe_hw     *hw;
4887         struct ixgbe_rx_queue *rxq;
4888         uint64_t bus_addr;
4889         uint32_t rxctrl;
4890         uint32_t fctrl;
4891         uint32_t hlreg0;
4892         uint32_t maxfrs;
4893         uint32_t srrctl;
4894         uint32_t rdrxctl;
4895         uint32_t rxcsum;
4896         uint16_t buf_size;
4897         uint16_t i;
4898         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4899         int rc;
4900
4901         PMD_INIT_FUNC_TRACE();
4902         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4903
4904         /*
4905          * Make sure receives are disabled while setting
4906          * up the RX context (registers, descriptor rings, etc.).
4907          */
4908         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4909         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4910
4911         /* Enable receipt of broadcasted frames */
4912         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4913         fctrl |= IXGBE_FCTRL_BAM;
4914         fctrl |= IXGBE_FCTRL_DPF;
4915         fctrl |= IXGBE_FCTRL_PMCF;
4916         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4917
4918         /*
4919          * Configure CRC stripping, if any.
4920          */
4921         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4922         if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4923                 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4924         else
4925                 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4926
4927         /*
4928          * Configure jumbo frame support, if any.
4929          */
4930         if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4931                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4932                 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4933                 maxfrs &= 0x0000FFFF;
4934                 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4935                 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4936         } else
4937                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4938
4939         /*
4940          * If loopback mode is configured, set LPBK bit.
4941          */
4942         if (dev->data->dev_conf.lpbk_mode != 0) {
4943                 rc = ixgbe_check_supported_loopback_mode(dev);
4944                 if (rc < 0) {
4945                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4946                         return rc;
4947                 }
4948                 hlreg0 |= IXGBE_HLREG0_LPBK;
4949         } else {
4950                 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4951         }
4952
4953         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4954
4955         /*
4956          * Assume no header split and no VLAN strip support
4957          * on any Rx queue first .
4958          */
4959         rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4960         /* Setup RX queues */
4961         for (i = 0; i < dev->data->nb_rx_queues; i++) {
4962                 rxq = dev->data->rx_queues[i];
4963
4964                 /*
4965                  * Reset crc_len in case it was changed after queue setup by a
4966                  * call to configure.
4967                  */
4968                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4969                         rxq->crc_len = RTE_ETHER_CRC_LEN;
4970                 else
4971                         rxq->crc_len = 0;
4972
4973                 /* Setup the Base and Length of the Rx Descriptor Rings */
4974                 bus_addr = rxq->rx_ring_phys_addr;
4975                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4976                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4977                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4978                                 (uint32_t)(bus_addr >> 32));
4979                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4980                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4981                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4982                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4983
4984                 /* Configure the SRRCTL register */
4985                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4986
4987                 /* Set if packets are dropped when no descriptors available */
4988                 if (rxq->drop_en)
4989                         srrctl |= IXGBE_SRRCTL_DROP_EN;
4990
4991                 /*
4992                  * Configure the RX buffer size in the BSIZEPACKET field of
4993                  * the SRRCTL register of the queue.
4994                  * The value is in 1 KB resolution. Valid values can be from
4995                  * 1 KB to 16 KB.
4996                  */
4997                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4998                         RTE_PKTMBUF_HEADROOM);
4999                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5000                            IXGBE_SRRCTL_BSIZEPKT_MASK);
5001
5002                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5003
5004                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5005                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5006
5007                 /* It adds dual VLAN length for supporting dual VLAN */
5008                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5009                                             2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5010                         dev->data->scattered_rx = 1;
5011                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5012                         rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5013         }
5014
5015         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5016                 dev->data->scattered_rx = 1;
5017
5018         /*
5019          * Device configured with multiple RX queues.
5020          */
5021         ixgbe_dev_mq_rx_configure(dev);
5022
5023         /*
5024          * Setup the Checksum Register.
5025          * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5026          * Enable IP/L4 checkum computation by hardware if requested to do so.
5027          */
5028         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5029         rxcsum |= IXGBE_RXCSUM_PCSD;
5030         if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5031                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5032         else
5033                 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5034
5035         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5036
5037         if (hw->mac.type == ixgbe_mac_82599EB ||
5038             hw->mac.type == ixgbe_mac_X540) {
5039                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5040                 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5041                         rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5042                 else
5043                         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5044                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5045                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5046         }
5047
5048         rc = ixgbe_set_rsc(dev);
5049         if (rc)
5050                 return rc;
5051
5052         ixgbe_set_rx_function(dev);
5053
5054         return 0;
5055 }
5056
5057 /*
5058  * Initializes Transmit Unit.
5059  */
5060 void __attribute__((cold))
5061 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5062 {
5063         struct ixgbe_hw     *hw;
5064         struct ixgbe_tx_queue *txq;
5065         uint64_t bus_addr;
5066         uint32_t hlreg0;
5067         uint32_t txctrl;
5068         uint16_t i;
5069
5070         PMD_INIT_FUNC_TRACE();
5071         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5072
5073         /* Enable TX CRC (checksum offload requirement) and hw padding
5074          * (TSO requirement)
5075          */
5076         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5077         hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5078         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5079
5080         /* Setup the Base and Length of the Tx Descriptor Rings */
5081         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5082                 txq = dev->data->tx_queues[i];
5083
5084                 bus_addr = txq->tx_ring_phys_addr;
5085                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5086                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5087                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5088                                 (uint32_t)(bus_addr >> 32));
5089                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5090                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5091                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5092                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5093                 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5094
5095                 /*
5096                  * Disable Tx Head Writeback RO bit, since this hoses
5097                  * bookkeeping if things aren't delivered in order.
5098                  */
5099                 switch (hw->mac.type) {
5100                 case ixgbe_mac_82598EB:
5101                         txctrl = IXGBE_READ_REG(hw,
5102                                                 IXGBE_DCA_TXCTRL(txq->reg_idx));
5103                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5104                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5105                                         txctrl);
5106                         break;
5107
5108                 case ixgbe_mac_82599EB:
5109                 case ixgbe_mac_X540:
5110                 case ixgbe_mac_X550:
5111                 case ixgbe_mac_X550EM_x:
5112                 case ixgbe_mac_X550EM_a:
5113                 default:
5114                         txctrl = IXGBE_READ_REG(hw,
5115                                                 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5116                         txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5117                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5118                                         txctrl);
5119                         break;
5120                 }
5121         }
5122
5123         /* Device configured with multiple TX queues. */
5124         ixgbe_dev_mq_tx_configure(dev);
5125 }
5126
5127 /*
5128  * Check if requested loopback mode is supported
5129  */
5130 int
5131 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5132 {
5133         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5134
5135         if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5136                 if (hw->mac.type == ixgbe_mac_82599EB ||
5137                      hw->mac.type == ixgbe_mac_X540 ||
5138                      hw->mac.type == ixgbe_mac_X550 ||
5139                      hw->mac.type == ixgbe_mac_X550EM_x ||
5140                      hw->mac.type == ixgbe_mac_X550EM_a)
5141                         return 0;
5142
5143         return -ENOTSUP;
5144 }
5145
5146 /*
5147  * Set up link for 82599 loopback mode Tx->Rx.
5148  */
5149 static inline void __attribute__((cold))
5150 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5151 {
5152         PMD_INIT_FUNC_TRACE();
5153
5154         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5155                 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5156                                 IXGBE_SUCCESS) {
5157                         PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5158                         /* ignore error */
5159                         return;
5160                 }
5161         }
5162
5163         /* Restart link */
5164         IXGBE_WRITE_REG(hw,
5165                         IXGBE_AUTOC,
5166                         IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5167         ixgbe_reset_pipeline_82599(hw);
5168
5169         hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5170         msec_delay(50);
5171 }
5172
5173
5174 /*
5175  * Start Transmit and Receive Units.
5176  */
5177 int __attribute__((cold))
5178 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5179 {
5180         struct ixgbe_hw     *hw;
5181         struct ixgbe_tx_queue *txq;
5182         struct ixgbe_rx_queue *rxq;
5183         uint32_t txdctl;
5184         uint32_t dmatxctl;
5185         uint32_t rxctrl;
5186         uint16_t i;
5187         int ret = 0;
5188
5189         PMD_INIT_FUNC_TRACE();
5190         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5191
5192         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5193                 txq = dev->data->tx_queues[i];
5194                 /* Setup Transmit Threshold Registers */
5195                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5196                 txdctl |= txq->pthresh & 0x7F;
5197                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5198                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5199                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5200         }
5201
5202         if (hw->mac.type != ixgbe_mac_82598EB) {
5203                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5204                 dmatxctl |= IXGBE_DMATXCTL_TE;
5205                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5206         }
5207
5208         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5209                 txq = dev->data->tx_queues[i];
5210                 if (!txq->tx_deferred_start) {
5211                         ret = ixgbe_dev_tx_queue_start(dev, i);
5212                         if (ret < 0)
5213                                 return ret;
5214                 }
5215         }
5216
5217         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5218                 rxq = dev->data->rx_queues[i];
5219                 if (!rxq->rx_deferred_start) {
5220                         ret = ixgbe_dev_rx_queue_start(dev, i);
5221                         if (ret < 0)
5222                                 return ret;
5223                 }
5224         }
5225
5226         /* Enable Receive engine */
5227         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5228         if (hw->mac.type == ixgbe_mac_82598EB)
5229                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5230         rxctrl |= IXGBE_RXCTRL_RXEN;
5231         hw->mac.ops.enable_rx_dma(hw, rxctrl);
5232
5233         /* If loopback mode is enabled, set up the link accordingly */
5234         if (dev->data->dev_conf.lpbk_mode != 0) {
5235                 if (hw->mac.type == ixgbe_mac_82599EB)
5236                         ixgbe_setup_loopback_link_82599(hw);
5237                 else if (hw->mac.type == ixgbe_mac_X540 ||
5238                      hw->mac.type == ixgbe_mac_X550 ||
5239                      hw->mac.type == ixgbe_mac_X550EM_x ||
5240                      hw->mac.type == ixgbe_mac_X550EM_a)
5241                         ixgbe_setup_loopback_link_x540_x550(hw, true);
5242         }
5243
5244 #ifdef RTE_LIBRTE_SECURITY
5245         if ((dev->data->dev_conf.rxmode.offloads &
5246                         DEV_RX_OFFLOAD_SECURITY) ||
5247                 (dev->data->dev_conf.txmode.offloads &
5248                         DEV_TX_OFFLOAD_SECURITY)) {
5249                 ret = ixgbe_crypto_enable_ipsec(dev);
5250                 if (ret != 0) {
5251                         PMD_DRV_LOG(ERR,
5252                                     "ixgbe_crypto_enable_ipsec fails with %d.",
5253                                     ret);
5254                         return ret;
5255                 }
5256         }
5257 #endif
5258
5259         return 0;
5260 }
5261
5262 /*
5263  * Start Receive Units for specified queue.
5264  */
5265 int __attribute__((cold))
5266 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5267 {
5268         struct ixgbe_hw     *hw;
5269         struct ixgbe_rx_queue *rxq;
5270         uint32_t rxdctl;
5271         int poll_ms;
5272
5273         PMD_INIT_FUNC_TRACE();
5274         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5275
5276         rxq = dev->data->rx_queues[rx_queue_id];
5277
5278         /* Allocate buffers for descriptor rings */
5279         if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5280                 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5281                              rx_queue_id);
5282                 return -1;
5283         }
5284         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5285         rxdctl |= IXGBE_RXDCTL_ENABLE;
5286         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5287
5288         /* Wait until RX Enable ready */
5289         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5290         do {
5291                 rte_delay_ms(1);
5292                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5293         } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5294         if (!poll_ms)
5295                 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5296         rte_wmb();
5297         IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5298         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5299         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5300
5301         return 0;
5302 }
5303
5304 /*
5305  * Stop Receive Units for specified queue.
5306  */
5307 int __attribute__((cold))
5308 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5309 {
5310         struct ixgbe_hw     *hw;
5311         struct ixgbe_adapter *adapter =
5312                 (struct ixgbe_adapter *)dev->data->dev_private;
5313         struct ixgbe_rx_queue *rxq;
5314         uint32_t rxdctl;
5315         int poll_ms;
5316
5317         PMD_INIT_FUNC_TRACE();
5318         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5319
5320         rxq = dev->data->rx_queues[rx_queue_id];
5321
5322         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5323         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5324         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5325
5326         /* Wait until RX Enable bit clear */
5327         poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5328         do {
5329                 rte_delay_ms(1);
5330                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5331         } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5332         if (!poll_ms)
5333                 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5334
5335         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5336
5337         ixgbe_rx_queue_release_mbufs(rxq);
5338         ixgbe_reset_rx_queue(adapter, rxq);
5339         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5340
5341         return 0;
5342 }
5343
5344
5345 /*
5346  * Start Transmit Units for specified queue.
5347  */
5348 int __attribute__((cold))
5349 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5350 {
5351         struct ixgbe_hw     *hw;
5352         struct ixgbe_tx_queue *txq;
5353         uint32_t txdctl;
5354         int poll_ms;
5355
5356         PMD_INIT_FUNC_TRACE();
5357         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5358
5359         txq = dev->data->tx_queues[tx_queue_id];
5360         IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5361         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5362         txdctl |= IXGBE_TXDCTL_ENABLE;
5363         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5364
5365         /* Wait until TX Enable ready */
5366         if (hw->mac.type == ixgbe_mac_82599EB) {
5367                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5368                 do {
5369                         rte_delay_ms(1);
5370                         txdctl = IXGBE_READ_REG(hw,
5371                                 IXGBE_TXDCTL(txq->reg_idx));
5372                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5373                 if (!poll_ms)
5374                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5375                                 tx_queue_id);
5376         }
5377         rte_wmb();
5378         IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5379         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5380
5381         return 0;
5382 }
5383
5384 /*
5385  * Stop Transmit Units for specified queue.
5386  */
5387 int __attribute__((cold))
5388 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5389 {
5390         struct ixgbe_hw     *hw;
5391         struct ixgbe_tx_queue *txq;
5392         uint32_t txdctl;
5393         uint32_t txtdh, txtdt;
5394         int poll_ms;
5395
5396         PMD_INIT_FUNC_TRACE();
5397         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5398
5399         txq = dev->data->tx_queues[tx_queue_id];
5400
5401         /* Wait until TX queue is empty */
5402         if (hw->mac.type == ixgbe_mac_82599EB) {
5403                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5404                 do {
5405                         rte_delay_us(RTE_IXGBE_WAIT_100_US);
5406                         txtdh = IXGBE_READ_REG(hw,
5407                                                IXGBE_TDH(txq->reg_idx));
5408                         txtdt = IXGBE_READ_REG(hw,
5409                                                IXGBE_TDT(txq->reg_idx));
5410                 } while (--poll_ms && (txtdh != txtdt));
5411                 if (!poll_ms)
5412                         PMD_INIT_LOG(ERR,
5413                                 "Tx Queue %d is not empty when stopping.",
5414                                 tx_queue_id);
5415         }
5416
5417         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5418         txdctl &= ~IXGBE_TXDCTL_ENABLE;
5419         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5420
5421         /* Wait until TX Enable bit clear */
5422         if (hw->mac.type == ixgbe_mac_82599EB) {
5423                 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5424                 do {
5425                         rte_delay_ms(1);
5426                         txdctl = IXGBE_READ_REG(hw,
5427                                                 IXGBE_TXDCTL(txq->reg_idx));
5428                 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5429                 if (!poll_ms)
5430                         PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5431                                 tx_queue_id);
5432         }
5433
5434         if (txq->ops != NULL) {
5435                 txq->ops->release_mbufs(txq);
5436                 txq->ops->reset(txq);
5437         }
5438         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5439
5440         return 0;
5441 }
5442
5443 void
5444 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5445         struct rte_eth_rxq_info *qinfo)
5446 {
5447         struct ixgbe_rx_queue *rxq;
5448
5449         rxq = dev->data->rx_queues[queue_id];
5450
5451         qinfo->mp = rxq->mb_pool;
5452         qinfo->scattered_rx = dev->data->scattered_rx;
5453         qinfo->nb_desc = rxq->nb_rx_desc;
5454
5455         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5456         qinfo->conf.rx_drop_en = rxq->drop_en;
5457         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5458         qinfo->conf.offloads = rxq->offloads;
5459 }
5460
5461 void
5462 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5463         struct rte_eth_txq_info *qinfo)
5464 {
5465         struct ixgbe_tx_queue *txq;
5466
5467         txq = dev->data->tx_queues[queue_id];
5468
5469         qinfo->nb_desc = txq->nb_tx_desc;
5470
5471         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5472         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5473         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5474
5475         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5476         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5477         qinfo->conf.offloads = txq->offloads;
5478         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5479 }
5480
5481 /*
5482  * [VF] Initializes Receive Unit.
5483  */
5484 int __attribute__((cold))
5485 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5486 {
5487         struct ixgbe_hw     *hw;
5488         struct ixgbe_rx_queue *rxq;
5489         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5490         uint64_t bus_addr;
5491         uint32_t srrctl, psrtype = 0;
5492         uint16_t buf_size;
5493         uint16_t i;
5494         int ret;
5495
5496         PMD_INIT_FUNC_TRACE();
5497         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5498
5499         if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5500                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5501                         "it should be power of 2");
5502                 return -1;
5503         }
5504
5505         if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5506                 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5507                         "it should be equal to or less than %d",
5508                         hw->mac.max_rx_queues);
5509                 return -1;
5510         }
5511
5512         /*
5513          * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5514          * disables the VF receipt of packets if the PF MTU is > 1500.
5515          * This is done to deal with 82599 limitations that imposes
5516          * the PF and all VFs to share the same MTU.
5517          * Then, the PF driver enables again the VF receipt of packet when
5518          * the VF driver issues a IXGBE_VF_SET_LPE request.
5519          * In the meantime, the VF device cannot be used, even if the VF driver
5520          * and the Guest VM network stack are ready to accept packets with a
5521          * size up to the PF MTU.
5522          * As a work-around to this PF behaviour, force the call to
5523          * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5524          * VF packets received can work in all cases.
5525          */
5526         ixgbevf_rlpml_set_vf(hw,
5527                 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5528
5529         /*
5530          * Assume no header split and no VLAN strip support
5531          * on any Rx queue first .
5532          */
5533         rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5534         /* Setup RX queues */
5535         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5536                 rxq = dev->data->rx_queues[i];
5537
5538                 /* Allocate buffers for descriptor rings */
5539                 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5540                 if (ret)
5541                         return ret;
5542
5543                 /* Setup the Base and Length of the Rx Descriptor Rings */
5544                 bus_addr = rxq->rx_ring_phys_addr;
5545
5546                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5547                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5548                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5549                                 (uint32_t)(bus_addr >> 32));
5550                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5551                                 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5552                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5553                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5554
5555
5556                 /* Configure the SRRCTL register */
5557                 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5558
5559                 /* Set if packets are dropped when no descriptors available */
5560                 if (rxq->drop_en)
5561                         srrctl |= IXGBE_SRRCTL_DROP_EN;
5562
5563                 /*
5564                  * Configure the RX buffer size in the BSIZEPACKET field of
5565                  * the SRRCTL register of the queue.
5566                  * The value is in 1 KB resolution. Valid values can be from
5567                  * 1 KB to 16 KB.
5568                  */
5569                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5570                         RTE_PKTMBUF_HEADROOM);
5571                 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5572                            IXGBE_SRRCTL_BSIZEPKT_MASK);
5573
5574                 /*
5575                  * VF modification to write virtual function SRRCTL register
5576                  */
5577                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5578
5579                 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5580                                        IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5581
5582                 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5583                     /* It adds dual VLAN length for supporting dual VLAN */
5584                     (rxmode->max_rx_pkt_len +
5585                                 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5586                         if (!dev->data->scattered_rx)
5587                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5588                         dev->data->scattered_rx = 1;
5589                 }
5590
5591                 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5592                         rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5593         }
5594
5595         /* Set RQPL for VF RSS according to max Rx queue */
5596         psrtype |= (dev->data->nb_rx_queues >> 1) <<
5597                 IXGBE_PSRTYPE_RQPL_SHIFT;
5598         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5599
5600         ixgbe_set_rx_function(dev);
5601
5602         return 0;
5603 }
5604
5605 /*
5606  * [VF] Initializes Transmit Unit.
5607  */
5608 void __attribute__((cold))
5609 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5610 {
5611         struct ixgbe_hw     *hw;
5612         struct ixgbe_tx_queue *txq;
5613         uint64_t bus_addr;
5614         uint32_t txctrl;
5615         uint16_t i;
5616
5617         PMD_INIT_FUNC_TRACE();
5618         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5619
5620         /* Setup the Base and Length of the Tx Descriptor Rings */
5621         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5622                 txq = dev->data->tx_queues[i];
5623                 bus_addr = txq->tx_ring_phys_addr;
5624                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5625                                 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5626                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5627                                 (uint32_t)(bus_addr >> 32));
5628                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5629                                 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5630                 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5631                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5632                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5633
5634                 /*
5635                  * Disable Tx Head Writeback RO bit, since this hoses
5636                  * bookkeeping if things aren't delivered in order.
5637                  */
5638                 txctrl = IXGBE_READ_REG(hw,
5639                                 IXGBE_VFDCA_TXCTRL(i));
5640                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5641                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5642                                 txctrl);
5643         }
5644 }
5645
5646 /*
5647  * [VF] Start Transmit and Receive Units.
5648  */
5649 void __attribute__((cold))
5650 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5651 {
5652         struct ixgbe_hw     *hw;
5653         struct ixgbe_tx_queue *txq;
5654         struct ixgbe_rx_queue *rxq;
5655         uint32_t txdctl;
5656         uint32_t rxdctl;
5657         uint16_t i;
5658         int poll_ms;
5659
5660         PMD_INIT_FUNC_TRACE();
5661         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5662
5663         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5664                 txq = dev->data->tx_queues[i];
5665                 /* Setup Transmit Threshold Registers */
5666                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5667                 txdctl |= txq->pthresh & 0x7F;
5668                 txdctl |= ((txq->hthresh & 0x7F) << 8);
5669                 txdctl |= ((txq->wthresh & 0x7F) << 16);
5670                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5671         }
5672
5673         for (i = 0; i < dev->data->nb_tx_queues; i++) {
5674
5675                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5676                 txdctl |= IXGBE_TXDCTL_ENABLE;
5677                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5678
5679                 poll_ms = 10;
5680                 /* Wait until TX Enable ready */
5681                 do {
5682                         rte_delay_ms(1);
5683                         txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5684                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5685                 if (!poll_ms)
5686                         PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5687         }
5688         for (i = 0; i < dev->data->nb_rx_queues; i++) {
5689
5690                 rxq = dev->data->rx_queues[i];
5691
5692                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5693                 rxdctl |= IXGBE_RXDCTL_ENABLE;
5694                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5695
5696                 /* Wait until RX Enable ready */
5697                 poll_ms = 10;
5698                 do {
5699                         rte_delay_ms(1);
5700                         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5701                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5702                 if (!poll_ms)
5703                         PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5704                 rte_wmb();
5705                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5706
5707         }
5708 }
5709
5710 int
5711 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5712                     const struct rte_flow_action_rss *in)
5713 {
5714         if (in->key_len > RTE_DIM(out->key) ||
5715             in->queue_num > RTE_DIM(out->queue))
5716                 return -EINVAL;
5717         out->conf = (struct rte_flow_action_rss){
5718                 .func = in->func,
5719                 .level = in->level,
5720                 .types = in->types,
5721                 .key_len = in->key_len,
5722                 .queue_num = in->queue_num,
5723                 .key = memcpy(out->key, in->key, in->key_len),
5724                 .queue = memcpy(out->queue, in->queue,
5725                                 sizeof(*in->queue) * in->queue_num),
5726         };
5727         return 0;
5728 }
5729
5730 int
5731 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5732                       const struct rte_flow_action_rss *with)
5733 {
5734         return (comp->func == with->func &&
5735                 comp->level == with->level &&
5736                 comp->types == with->types &&
5737                 comp->key_len == with->key_len &&
5738                 comp->queue_num == with->queue_num &&
5739                 !memcmp(comp->key, with->key, with->key_len) &&
5740                 !memcmp(comp->queue, with->queue,
5741                         sizeof(*with->queue) * with->queue_num));
5742 }
5743
5744 int
5745 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5746                 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5747 {
5748         struct ixgbe_hw *hw;
5749         uint32_t reta;
5750         uint16_t i;
5751         uint16_t j;
5752         uint16_t sp_reta_size;
5753         uint32_t reta_reg;
5754         struct rte_eth_rss_conf rss_conf = {
5755                 .rss_key = conf->conf.key_len ?
5756                         (void *)(uintptr_t)conf->conf.key : NULL,
5757                 .rss_key_len = conf->conf.key_len,
5758                 .rss_hf = conf->conf.types,
5759         };
5760         struct ixgbe_filter_info *filter_info =
5761                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5762
5763         PMD_INIT_FUNC_TRACE();
5764         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5765
5766         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5767
5768         if (!add) {
5769                 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5770                                           &conf->conf)) {
5771                         ixgbe_rss_disable(dev);
5772                         memset(&filter_info->rss_info, 0,
5773                                 sizeof(struct ixgbe_rte_flow_rss_conf));
5774                         return 0;
5775                 }
5776                 return -EINVAL;
5777         }
5778
5779         if (filter_info->rss_info.conf.queue_num)
5780                 return -EINVAL;
5781         /* Fill in redirection table
5782          * The byte-swap is needed because NIC registers are in
5783          * little-endian order.
5784          */
5785         reta = 0;
5786         for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5787                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5788
5789                 if (j == conf->conf.queue_num)
5790                         j = 0;
5791                 reta = (reta << 8) | conf->conf.queue[j];
5792                 if ((i & 3) == 3)
5793                         IXGBE_WRITE_REG(hw, reta_reg,
5794                                         rte_bswap32(reta));
5795         }
5796
5797         /* Configure the RSS key and the RSS protocols used to compute
5798          * the RSS hash of input packets.
5799          */
5800         if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5801                 ixgbe_rss_disable(dev);
5802                 return 0;
5803         }
5804         if (rss_conf.rss_key == NULL)
5805                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5806         ixgbe_hw_rss_hash_set(hw, &rss_conf);
5807
5808         if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5809                 return -EINVAL;
5810
5811         return 0;
5812 }
5813
5814 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5815 __rte_weak int
5816 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5817 {
5818         return -1;
5819 }
5820
5821 __rte_weak uint16_t
5822 ixgbe_recv_pkts_vec(
5823         void __rte_unused *rx_queue,
5824         struct rte_mbuf __rte_unused **rx_pkts,
5825         uint16_t __rte_unused nb_pkts)
5826 {
5827         return 0;
5828 }
5829
5830 __rte_weak uint16_t
5831 ixgbe_recv_scattered_pkts_vec(
5832         void __rte_unused *rx_queue,
5833         struct rte_mbuf __rte_unused **rx_pkts,
5834         uint16_t __rte_unused nb_pkts)
5835 {
5836         return 0;
5837 }
5838
5839 __rte_weak int
5840 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5841 {
5842         return -1;
5843 }