remove unused ring includes
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
61 #include <rte_mbuf.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
65 #include <rte_udp.h>
66 #include <rte_tcp.h>
67 #include <rte_sctp.h>
68 #include <rte_string_fns.h>
69
70 #include "e1000_logs.h"
71 #include "base/e1000_api.h"
72 #include "e1000_ethdev.h"
73
74 /* Bit Mask to indicate what bits required for building TX context */
75 #define IGB_TX_OFFLOAD_MASK (                    \
76                 PKT_TX_VLAN_PKT |                \
77                 PKT_TX_IP_CKSUM |                \
78                 PKT_TX_L4_MASK |                 \
79                 PKT_TX_TCP_SEG)
80
81 /**
82  * Structure associated with each descriptor of the RX ring of a RX queue.
83  */
84 struct igb_rx_entry {
85         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
86 };
87
88 /**
89  * Structure associated with each descriptor of the TX ring of a TX queue.
90  */
91 struct igb_tx_entry {
92         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
93         uint16_t next_id; /**< Index of next descriptor in ring. */
94         uint16_t last_id; /**< Index of last scattered descriptor. */
95 };
96
97 /**
98  * Structure associated with each RX queue.
99  */
100 struct igb_rx_queue {
101         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
102         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
103         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
104         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
105         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
106         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
107         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
108         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
109         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
110         uint16_t            rx_tail;    /**< current value of RDT register. */
111         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
112         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
113         uint16_t            queue_id;   /**< RX queue index. */
114         uint16_t            reg_idx;    /**< RX queue register index. */
115         uint8_t             port_id;    /**< Device port identifier. */
116         uint8_t             pthresh;    /**< Prefetch threshold register. */
117         uint8_t             hthresh;    /**< Host threshold register. */
118         uint8_t             wthresh;    /**< Write-back threshold register. */
119         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
120         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
121 };
122
123 /**
124  * Hardware context number
125  */
126 enum igb_advctx_num {
127         IGB_CTX_0    = 0, /**< CTX0    */
128         IGB_CTX_1    = 1, /**< CTX1    */
129         IGB_CTX_NUM  = 2, /**< CTX_NUM */
130 };
131
132 /** Offload features */
133 union igb_tx_offload {
134         uint64_t data;
135         struct {
136                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
137                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
138                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
139                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
140                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
141
142                 /* uint64_t unused:8; */
143         };
144 };
145
146 /*
147  * Compare mask for igb_tx_offload.data,
148  * should be in sync with igb_tx_offload layout.
149  * */
150 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
151 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
152 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
153 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
154 /** Mac + IP + TCP + Mss mask. */
155 #define TX_TSO_CMP_MASK \
156         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
157
158 /**
159  * Strucutre to check if new context need be built
160  */
161 struct igb_advctx_info {
162         uint64_t flags;           /**< ol_flags related to context build. */
163         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
164         union igb_tx_offload tx_offload;
165         /** compare mask for tx offload. */
166         union igb_tx_offload tx_offload_mask;
167 };
168
169 /**
170  * Structure associated with each TX queue.
171  */
172 struct igb_tx_queue {
173         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
174         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
175         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
176         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
177         uint32_t               txd_type;      /**< Device-specific TXD type */
178         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
179         uint16_t               tx_tail; /**< Current value of TDT register. */
180         uint16_t               tx_head;
181         /**< Index of first used TX descriptor. */
182         uint16_t               queue_id; /**< TX queue index. */
183         uint16_t               reg_idx;  /**< TX queue register index. */
184         uint8_t                port_id;  /**< Device port identifier. */
185         uint8_t                pthresh;  /**< Prefetch threshold register. */
186         uint8_t                hthresh;  /**< Host threshold register. */
187         uint8_t                wthresh;  /**< Write-back threshold register. */
188         uint32_t               ctx_curr;
189         /**< Current used hardware descriptor. */
190         uint32_t               ctx_start;
191         /**< Start context position for transmit queue. */
192         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
193         /**< Hardware context history.*/
194 };
195
196 #if 1
197 #define RTE_PMD_USE_PREFETCH
198 #endif
199
200 #ifdef RTE_PMD_USE_PREFETCH
201 #define rte_igb_prefetch(p)     rte_prefetch0(p)
202 #else
203 #define rte_igb_prefetch(p)     do {} while(0)
204 #endif
205
206 #ifdef RTE_PMD_PACKET_PREFETCH
207 #define rte_packet_prefetch(p) rte_prefetch1(p)
208 #else
209 #define rte_packet_prefetch(p)  do {} while(0)
210 #endif
211
212 /*
213  * Macro for VMDq feature for 1 GbE NIC.
214  */
215 #define E1000_VMOLR_SIZE                        (8)
216 #define IGB_TSO_MAX_HDRLEN                      (512)
217 #define IGB_TSO_MAX_MSS                         (9216)
218
219 /*********************************************************************
220  *
221  *  TX function
222  *
223  **********************************************************************/
224
225 /*
226  *There're some limitations in hardware for TCP segmentation offload. We
227  *should check whether the parameters are valid.
228  */
229 static inline uint64_t
230 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
231 {
232         if (!(ol_req & PKT_TX_TCP_SEG))
233                 return ol_req;
234         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
235                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
236                 ol_req &= ~PKT_TX_TCP_SEG;
237                 ol_req |= PKT_TX_TCP_CKSUM;
238         }
239         return ol_req;
240 }
241
242 /*
243  * Advanced context descriptor are almost same between igb/ixgbe
244  * This is a separate function, looking for optimization opportunity here
245  * Rework required to go with the pre-defined values.
246  */
247
248 static inline void
249 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
250                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
251                 uint64_t ol_flags, union igb_tx_offload tx_offload)
252 {
253         uint32_t type_tucmd_mlhl;
254         uint32_t mss_l4len_idx;
255         uint32_t ctx_idx, ctx_curr;
256         uint32_t vlan_macip_lens;
257         union igb_tx_offload tx_offload_mask;
258
259         ctx_curr = txq->ctx_curr;
260         ctx_idx = ctx_curr + txq->ctx_start;
261
262         tx_offload_mask.data = 0;
263         type_tucmd_mlhl = 0;
264
265         /* Specify which HW CTX to upload. */
266         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
267
268         if (ol_flags & PKT_TX_VLAN_PKT)
269                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
270
271         /* check if TCP segmentation required for this packet */
272         if (ol_flags & PKT_TX_TCP_SEG) {
273                 /* implies IP cksum in IPv4 */
274                 if (ol_flags & PKT_TX_IP_CKSUM)
275                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
276                                 E1000_ADVTXD_TUCMD_L4T_TCP |
277                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
278                 else
279                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
280                                 E1000_ADVTXD_TUCMD_L4T_TCP |
281                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
282
283                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
284                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
285                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
286         } else { /* no TSO, check if hardware checksum is needed */
287                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
288                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
289
290                 if (ol_flags & PKT_TX_IP_CKSUM)
291                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
292
293                 switch (ol_flags & PKT_TX_L4_MASK) {
294                 case PKT_TX_UDP_CKSUM:
295                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
296                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
297                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
298                         break;
299                 case PKT_TX_TCP_CKSUM:
300                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
301                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
302                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
303                         break;
304                 case PKT_TX_SCTP_CKSUM:
305                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
306                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
308                         break;
309                 default:
310                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
311                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
312                         break;
313                 }
314         }
315
316         txq->ctx_cache[ctx_curr].flags = ol_flags;
317         txq->ctx_cache[ctx_curr].tx_offload.data =
318                 tx_offload_mask.data & tx_offload.data;
319         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
320
321         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
322         vlan_macip_lens = (uint32_t)tx_offload.data;
323         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
324         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
325         ctx_txd->seqnum_seed = 0;
326 }
327
328 /*
329  * Check which hardware context can be used. Use the existing match
330  * or create a new context descriptor.
331  */
332 static inline uint32_t
333 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
334                 union igb_tx_offload tx_offload)
335 {
336         /* If match with the current context */
337         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
338                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
339                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
340                         return txq->ctx_curr;
341         }
342
343         /* If match with the second context */
344         txq->ctx_curr ^= 1;
345         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
346                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
347                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
348                         return txq->ctx_curr;
349         }
350
351         /* Mismatch, use the previous context */
352         return IGB_CTX_NUM;
353 }
354
355 static inline uint32_t
356 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
357 {
358         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
359         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
360         uint32_t tmp;
361
362         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
363         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
364         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
365         return tmp;
366 }
367
368 static inline uint32_t
369 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
370 {
371         uint32_t cmdtype;
372         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
373         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
374         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
375         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
376         return cmdtype;
377 }
378
379 uint16_t
380 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
381                uint16_t nb_pkts)
382 {
383         struct igb_tx_queue *txq;
384         struct igb_tx_entry *sw_ring;
385         struct igb_tx_entry *txe, *txn;
386         volatile union e1000_adv_tx_desc *txr;
387         volatile union e1000_adv_tx_desc *txd;
388         struct rte_mbuf     *tx_pkt;
389         struct rte_mbuf     *m_seg;
390         uint64_t buf_dma_addr;
391         uint32_t olinfo_status;
392         uint32_t cmd_type_len;
393         uint32_t pkt_len;
394         uint16_t slen;
395         uint64_t ol_flags;
396         uint16_t tx_end;
397         uint16_t tx_id;
398         uint16_t tx_last;
399         uint16_t nb_tx;
400         uint64_t tx_ol_req;
401         uint32_t new_ctx = 0;
402         uint32_t ctx = 0;
403         union igb_tx_offload tx_offload = {0};
404
405         txq = tx_queue;
406         sw_ring = txq->sw_ring;
407         txr     = txq->tx_ring;
408         tx_id   = txq->tx_tail;
409         txe = &sw_ring[tx_id];
410
411         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
412                 tx_pkt = *tx_pkts++;
413                 pkt_len = tx_pkt->pkt_len;
414
415                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
416
417                 /*
418                  * The number of descriptors that must be allocated for a
419                  * packet is the number of segments of that packet, plus 1
420                  * Context Descriptor for the VLAN Tag Identifier, if any.
421                  * Determine the last TX descriptor to allocate in the TX ring
422                  * for the packet, starting from the current position (tx_id)
423                  * in the ring.
424                  */
425                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
426
427                 ol_flags = tx_pkt->ol_flags;
428                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
429
430                 /* If a Context Descriptor need be built . */
431                 if (tx_ol_req) {
432                         tx_offload.l2_len = tx_pkt->l2_len;
433                         tx_offload.l3_len = tx_pkt->l3_len;
434                         tx_offload.l4_len = tx_pkt->l4_len;
435                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
436                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
437                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
438
439                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
440                         /* Only allocate context descriptor if required*/
441                         new_ctx = (ctx == IGB_CTX_NUM);
442                         ctx = txq->ctx_curr + txq->ctx_start;
443                         tx_last = (uint16_t) (tx_last + new_ctx);
444                 }
445                 if (tx_last >= txq->nb_tx_desc)
446                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
447
448                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
449                            " tx_first=%u tx_last=%u",
450                            (unsigned) txq->port_id,
451                            (unsigned) txq->queue_id,
452                            (unsigned) pkt_len,
453                            (unsigned) tx_id,
454                            (unsigned) tx_last);
455
456                 /*
457                  * Check if there are enough free descriptors in the TX ring
458                  * to transmit the next packet.
459                  * This operation is based on the two following rules:
460                  *
461                  *   1- Only check that the last needed TX descriptor can be
462                  *      allocated (by construction, if that descriptor is free,
463                  *      all intermediate ones are also free).
464                  *
465                  *      For this purpose, the index of the last TX descriptor
466                  *      used for a packet (the "last descriptor" of a packet)
467                  *      is recorded in the TX entries (the last one included)
468                  *      that are associated with all TX descriptors allocated
469                  *      for that packet.
470                  *
471                  *   2- Avoid to allocate the last free TX descriptor of the
472                  *      ring, in order to never set the TDT register with the
473                  *      same value stored in parallel by the NIC in the TDH
474                  *      register, which makes the TX engine of the NIC enter
475                  *      in a deadlock situation.
476                  *
477                  *      By extension, avoid to allocate a free descriptor that
478                  *      belongs to the last set of free descriptors allocated
479                  *      to the same packet previously transmitted.
480                  */
481
482                 /*
483                  * The "last descriptor" of the previously sent packet, if any,
484                  * which used the last descriptor to allocate.
485                  */
486                 tx_end = sw_ring[tx_last].last_id;
487
488                 /*
489                  * The next descriptor following that "last descriptor" in the
490                  * ring.
491                  */
492                 tx_end = sw_ring[tx_end].next_id;
493
494                 /*
495                  * The "last descriptor" associated with that next descriptor.
496                  */
497                 tx_end = sw_ring[tx_end].last_id;
498
499                 /*
500                  * Check that this descriptor is free.
501                  */
502                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
503                         if (nb_tx == 0)
504                                 return 0;
505                         goto end_of_tx;
506                 }
507
508                 /*
509                  * Set common flags of all TX Data Descriptors.
510                  *
511                  * The following bits must be set in all Data Descriptors:
512                  *   - E1000_ADVTXD_DTYP_DATA
513                  *   - E1000_ADVTXD_DCMD_DEXT
514                  *
515                  * The following bits must be set in the first Data Descriptor
516                  * and are ignored in the other ones:
517                  *   - E1000_ADVTXD_DCMD_IFCS
518                  *   - E1000_ADVTXD_MAC_1588
519                  *   - E1000_ADVTXD_DCMD_VLE
520                  *
521                  * The following bits must only be set in the last Data
522                  * Descriptor:
523                  *   - E1000_TXD_CMD_EOP
524                  *
525                  * The following bits can be set in any Data Descriptor, but
526                  * are only set in the last Data Descriptor:
527                  *   - E1000_TXD_CMD_RS
528                  */
529                 cmd_type_len = txq->txd_type |
530                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
531                 if (tx_ol_req & PKT_TX_TCP_SEG)
532                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
533                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
534 #if defined(RTE_LIBRTE_IEEE1588)
535                 if (ol_flags & PKT_TX_IEEE1588_TMST)
536                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
537 #endif
538                 if (tx_ol_req) {
539                         /* Setup TX Advanced context descriptor if required */
540                         if (new_ctx) {
541                                 volatile struct e1000_adv_tx_context_desc *
542                                     ctx_txd;
543
544                                 ctx_txd = (volatile struct
545                                     e1000_adv_tx_context_desc *)
546                                     &txr[tx_id];
547
548                                 txn = &sw_ring[txe->next_id];
549                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
550
551                                 if (txe->mbuf != NULL) {
552                                         rte_pktmbuf_free_seg(txe->mbuf);
553                                         txe->mbuf = NULL;
554                                 }
555
556                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
557
558                                 txe->last_id = tx_last;
559                                 tx_id = txe->next_id;
560                                 txe = txn;
561                         }
562
563                         /* Setup the TX Advanced Data Descriptor */
564                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
565                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
566                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
567                 }
568
569                 m_seg = tx_pkt;
570                 do {
571                         txn = &sw_ring[txe->next_id];
572                         txd = &txr[tx_id];
573
574                         if (txe->mbuf != NULL)
575                                 rte_pktmbuf_free_seg(txe->mbuf);
576                         txe->mbuf = m_seg;
577
578                         /*
579                          * Set up transmit descriptor.
580                          */
581                         slen = (uint16_t) m_seg->data_len;
582                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
583                         txd->read.buffer_addr =
584                                 rte_cpu_to_le_64(buf_dma_addr);
585                         txd->read.cmd_type_len =
586                                 rte_cpu_to_le_32(cmd_type_len | slen);
587                         txd->read.olinfo_status =
588                                 rte_cpu_to_le_32(olinfo_status);
589                         txe->last_id = tx_last;
590                         tx_id = txe->next_id;
591                         txe = txn;
592                         m_seg = m_seg->next;
593                 } while (m_seg != NULL);
594
595                 /*
596                  * The last packet data descriptor needs End Of Packet (EOP)
597                  * and Report Status (RS).
598                  */
599                 txd->read.cmd_type_len |=
600                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
601         }
602  end_of_tx:
603         rte_wmb();
604
605         /*
606          * Set the Transmit Descriptor Tail (TDT).
607          */
608         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
609         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
610                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
611                    (unsigned) tx_id, (unsigned) nb_tx);
612         txq->tx_tail = tx_id;
613
614         return nb_tx;
615 }
616
617 /*********************************************************************
618  *
619  *  RX functions
620  *
621  **********************************************************************/
622 #define IGB_PACKET_TYPE_IPV4              0X01
623 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
624 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
625 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
626 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
627 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
628 #define IGB_PACKET_TYPE_IPV6              0X04
629 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
630 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
631 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
632 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
633 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
634 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
635 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
636 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
637 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
638 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
639 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
640 #define IGB_PACKET_TYPE_MAX               0X80
641 #define IGB_PACKET_TYPE_MASK              0X7F
642 #define IGB_PACKET_TYPE_SHIFT             0X04
643 static inline uint32_t
644 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
645 {
646         static const uint32_t
647                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
648                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
649                         RTE_PTYPE_L3_IPV4,
650                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
651                         RTE_PTYPE_L3_IPV4_EXT,
652                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
653                         RTE_PTYPE_L3_IPV6,
654                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
655                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
656                         RTE_PTYPE_INNER_L3_IPV6,
657                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
658                         RTE_PTYPE_L3_IPV6_EXT,
659                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
660                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
661                         RTE_PTYPE_INNER_L3_IPV6_EXT,
662                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
663                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
664                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
665                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
666                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
667                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
668                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
669                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
670                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
671                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
672                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
674                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
675                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
676                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
677                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
678                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
679                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
680                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
681                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
682                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
683                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
684                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
686                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
687                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
688                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
689                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
690         };
691         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
692                 return RTE_PTYPE_UNKNOWN;
693
694         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
695
696         return ptype_table[pkt_info];
697 }
698
699 static inline uint64_t
700 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
701 {
702         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
703
704 #if defined(RTE_LIBRTE_IEEE1588)
705         static uint32_t ip_pkt_etqf_map[8] = {
706                 0, 0, 0, PKT_RX_IEEE1588_PTP,
707                 0, 0, 0, 0,
708         };
709
710         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
711         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
712
713         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
714         if (hw->mac.type == e1000_i210)
715                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
716         else
717                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
718 #else
719         RTE_SET_USED(rxq);
720 #endif
721
722         return pkt_flags;
723 }
724
725 static inline uint64_t
726 rx_desc_status_to_pkt_flags(uint32_t rx_status)
727 {
728         uint64_t pkt_flags;
729
730         /* Check if VLAN present */
731         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
732                 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
733
734 #if defined(RTE_LIBRTE_IEEE1588)
735         if (rx_status & E1000_RXD_STAT_TMST)
736                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
737 #endif
738         return pkt_flags;
739 }
740
741 static inline uint64_t
742 rx_desc_error_to_pkt_flags(uint32_t rx_status)
743 {
744         /*
745          * Bit 30: IPE, IPv4 checksum error
746          * Bit 29: L4I, L4I integrity error
747          */
748
749         static uint64_t error_to_pkt_flags_map[4] = {
750                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
751                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
752         };
753         return error_to_pkt_flags_map[(rx_status >>
754                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
755 }
756
757 uint16_t
758 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
759                uint16_t nb_pkts)
760 {
761         struct igb_rx_queue *rxq;
762         volatile union e1000_adv_rx_desc *rx_ring;
763         volatile union e1000_adv_rx_desc *rxdp;
764         struct igb_rx_entry *sw_ring;
765         struct igb_rx_entry *rxe;
766         struct rte_mbuf *rxm;
767         struct rte_mbuf *nmb;
768         union e1000_adv_rx_desc rxd;
769         uint64_t dma_addr;
770         uint32_t staterr;
771         uint32_t hlen_type_rss;
772         uint16_t pkt_len;
773         uint16_t rx_id;
774         uint16_t nb_rx;
775         uint16_t nb_hold;
776         uint64_t pkt_flags;
777
778         nb_rx = 0;
779         nb_hold = 0;
780         rxq = rx_queue;
781         rx_id = rxq->rx_tail;
782         rx_ring = rxq->rx_ring;
783         sw_ring = rxq->sw_ring;
784         while (nb_rx < nb_pkts) {
785                 /*
786                  * The order of operations here is important as the DD status
787                  * bit must not be read after any other descriptor fields.
788                  * rx_ring and rxdp are pointing to volatile data so the order
789                  * of accesses cannot be reordered by the compiler. If they were
790                  * not volatile, they could be reordered which could lead to
791                  * using invalid descriptor fields when read from rxd.
792                  */
793                 rxdp = &rx_ring[rx_id];
794                 staterr = rxdp->wb.upper.status_error;
795                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
796                         break;
797                 rxd = *rxdp;
798
799                 /*
800                  * End of packet.
801                  *
802                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
803                  * likely to be invalid and to be dropped by the various
804                  * validation checks performed by the network stack.
805                  *
806                  * Allocate a new mbuf to replenish the RX ring descriptor.
807                  * If the allocation fails:
808                  *    - arrange for that RX descriptor to be the first one
809                  *      being parsed the next time the receive function is
810                  *      invoked [on the same queue].
811                  *
812                  *    - Stop parsing the RX ring and return immediately.
813                  *
814                  * This policy do not drop the packet received in the RX
815                  * descriptor for which the allocation of a new mbuf failed.
816                  * Thus, it allows that packet to be later retrieved if
817                  * mbuf have been freed in the mean time.
818                  * As a side effect, holding RX descriptors instead of
819                  * systematically giving them back to the NIC may lead to
820                  * RX ring exhaustion situations.
821                  * However, the NIC can gracefully prevent such situations
822                  * to happen by sending specific "back-pressure" flow control
823                  * frames to its peer(s).
824                  */
825                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
826                            "staterr=0x%x pkt_len=%u",
827                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
828                            (unsigned) rx_id, (unsigned) staterr,
829                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
830
831                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
832                 if (nmb == NULL) {
833                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
834                                    "queue_id=%u", (unsigned) rxq->port_id,
835                                    (unsigned) rxq->queue_id);
836                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
837                         break;
838                 }
839
840                 nb_hold++;
841                 rxe = &sw_ring[rx_id];
842                 rx_id++;
843                 if (rx_id == rxq->nb_rx_desc)
844                         rx_id = 0;
845
846                 /* Prefetch next mbuf while processing current one. */
847                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
848
849                 /*
850                  * When next RX descriptor is on a cache-line boundary,
851                  * prefetch the next 4 RX descriptors and the next 8 pointers
852                  * to mbufs.
853                  */
854                 if ((rx_id & 0x3) == 0) {
855                         rte_igb_prefetch(&rx_ring[rx_id]);
856                         rte_igb_prefetch(&sw_ring[rx_id]);
857                 }
858
859                 rxm = rxe->mbuf;
860                 rxe->mbuf = nmb;
861                 dma_addr =
862                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
863                 rxdp->read.hdr_addr = 0;
864                 rxdp->read.pkt_addr = dma_addr;
865
866                 /*
867                  * Initialize the returned mbuf.
868                  * 1) setup generic mbuf fields:
869                  *    - number of segments,
870                  *    - next segment,
871                  *    - packet length,
872                  *    - RX port identifier.
873                  * 2) integrate hardware offload data, if any:
874                  *    - RSS flag & hash,
875                  *    - IP checksum flag,
876                  *    - VLAN TCI, if any,
877                  *    - error flags.
878                  */
879                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
880                                       rxq->crc_len);
881                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
882                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
883                 rxm->nb_segs = 1;
884                 rxm->next = NULL;
885                 rxm->pkt_len = pkt_len;
886                 rxm->data_len = pkt_len;
887                 rxm->port = rxq->port_id;
888
889                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
890                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
891                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
892                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
893
894                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
895                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
896                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
897                 rxm->ol_flags = pkt_flags;
898                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
899                                                 lo_dword.hs_rss.pkt_info);
900
901                 /*
902                  * Store the mbuf address into the next entry of the array
903                  * of returned packets.
904                  */
905                 rx_pkts[nb_rx++] = rxm;
906         }
907         rxq->rx_tail = rx_id;
908
909         /*
910          * If the number of free RX descriptors is greater than the RX free
911          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
912          * register.
913          * Update the RDT with the value of the last processed RX descriptor
914          * minus 1, to guarantee that the RDT register is never equal to the
915          * RDH register, which creates a "full" ring situtation from the
916          * hardware point of view...
917          */
918         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
919         if (nb_hold > rxq->rx_free_thresh) {
920                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
921                            "nb_hold=%u nb_rx=%u",
922                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
923                            (unsigned) rx_id, (unsigned) nb_hold,
924                            (unsigned) nb_rx);
925                 rx_id = (uint16_t) ((rx_id == 0) ?
926                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
927                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
928                 nb_hold = 0;
929         }
930         rxq->nb_rx_hold = nb_hold;
931         return nb_rx;
932 }
933
934 uint16_t
935 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
936                          uint16_t nb_pkts)
937 {
938         struct igb_rx_queue *rxq;
939         volatile union e1000_adv_rx_desc *rx_ring;
940         volatile union e1000_adv_rx_desc *rxdp;
941         struct igb_rx_entry *sw_ring;
942         struct igb_rx_entry *rxe;
943         struct rte_mbuf *first_seg;
944         struct rte_mbuf *last_seg;
945         struct rte_mbuf *rxm;
946         struct rte_mbuf *nmb;
947         union e1000_adv_rx_desc rxd;
948         uint64_t dma; /* Physical address of mbuf data buffer */
949         uint32_t staterr;
950         uint32_t hlen_type_rss;
951         uint16_t rx_id;
952         uint16_t nb_rx;
953         uint16_t nb_hold;
954         uint16_t data_len;
955         uint64_t pkt_flags;
956
957         nb_rx = 0;
958         nb_hold = 0;
959         rxq = rx_queue;
960         rx_id = rxq->rx_tail;
961         rx_ring = rxq->rx_ring;
962         sw_ring = rxq->sw_ring;
963
964         /*
965          * Retrieve RX context of current packet, if any.
966          */
967         first_seg = rxq->pkt_first_seg;
968         last_seg = rxq->pkt_last_seg;
969
970         while (nb_rx < nb_pkts) {
971         next_desc:
972                 /*
973                  * The order of operations here is important as the DD status
974                  * bit must not be read after any other descriptor fields.
975                  * rx_ring and rxdp are pointing to volatile data so the order
976                  * of accesses cannot be reordered by the compiler. If they were
977                  * not volatile, they could be reordered which could lead to
978                  * using invalid descriptor fields when read from rxd.
979                  */
980                 rxdp = &rx_ring[rx_id];
981                 staterr = rxdp->wb.upper.status_error;
982                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
983                         break;
984                 rxd = *rxdp;
985
986                 /*
987                  * Descriptor done.
988                  *
989                  * Allocate a new mbuf to replenish the RX ring descriptor.
990                  * If the allocation fails:
991                  *    - arrange for that RX descriptor to be the first one
992                  *      being parsed the next time the receive function is
993                  *      invoked [on the same queue].
994                  *
995                  *    - Stop parsing the RX ring and return immediately.
996                  *
997                  * This policy does not drop the packet received in the RX
998                  * descriptor for which the allocation of a new mbuf failed.
999                  * Thus, it allows that packet to be later retrieved if
1000                  * mbuf have been freed in the mean time.
1001                  * As a side effect, holding RX descriptors instead of
1002                  * systematically giving them back to the NIC may lead to
1003                  * RX ring exhaustion situations.
1004                  * However, the NIC can gracefully prevent such situations
1005                  * to happen by sending specific "back-pressure" flow control
1006                  * frames to its peer(s).
1007                  */
1008                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1009                            "staterr=0x%x data_len=%u",
1010                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1011                            (unsigned) rx_id, (unsigned) staterr,
1012                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1013
1014                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1015                 if (nmb == NULL) {
1016                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1017                                    "queue_id=%u", (unsigned) rxq->port_id,
1018                                    (unsigned) rxq->queue_id);
1019                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1020                         break;
1021                 }
1022
1023                 nb_hold++;
1024                 rxe = &sw_ring[rx_id];
1025                 rx_id++;
1026                 if (rx_id == rxq->nb_rx_desc)
1027                         rx_id = 0;
1028
1029                 /* Prefetch next mbuf while processing current one. */
1030                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1031
1032                 /*
1033                  * When next RX descriptor is on a cache-line boundary,
1034                  * prefetch the next 4 RX descriptors and the next 8 pointers
1035                  * to mbufs.
1036                  */
1037                 if ((rx_id & 0x3) == 0) {
1038                         rte_igb_prefetch(&rx_ring[rx_id]);
1039                         rte_igb_prefetch(&sw_ring[rx_id]);
1040                 }
1041
1042                 /*
1043                  * Update RX descriptor with the physical address of the new
1044                  * data buffer of the new allocated mbuf.
1045                  */
1046                 rxm = rxe->mbuf;
1047                 rxe->mbuf = nmb;
1048                 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1049                 rxdp->read.pkt_addr = dma;
1050                 rxdp->read.hdr_addr = 0;
1051
1052                 /*
1053                  * Set data length & data buffer address of mbuf.
1054                  */
1055                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1056                 rxm->data_len = data_len;
1057                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1058
1059                 /*
1060                  * If this is the first buffer of the received packet,
1061                  * set the pointer to the first mbuf of the packet and
1062                  * initialize its context.
1063                  * Otherwise, update the total length and the number of segments
1064                  * of the current scattered packet, and update the pointer to
1065                  * the last mbuf of the current packet.
1066                  */
1067                 if (first_seg == NULL) {
1068                         first_seg = rxm;
1069                         first_seg->pkt_len = data_len;
1070                         first_seg->nb_segs = 1;
1071                 } else {
1072                         first_seg->pkt_len += data_len;
1073                         first_seg->nb_segs++;
1074                         last_seg->next = rxm;
1075                 }
1076
1077                 /*
1078                  * If this is not the last buffer of the received packet,
1079                  * update the pointer to the last mbuf of the current scattered
1080                  * packet and continue to parse the RX ring.
1081                  */
1082                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1083                         last_seg = rxm;
1084                         goto next_desc;
1085                 }
1086
1087                 /*
1088                  * This is the last buffer of the received packet.
1089                  * If the CRC is not stripped by the hardware:
1090                  *   - Subtract the CRC length from the total packet length.
1091                  *   - If the last buffer only contains the whole CRC or a part
1092                  *     of it, free the mbuf associated to the last buffer.
1093                  *     If part of the CRC is also contained in the previous
1094                  *     mbuf, subtract the length of that CRC part from the
1095                  *     data length of the previous mbuf.
1096                  */
1097                 rxm->next = NULL;
1098                 if (unlikely(rxq->crc_len > 0)) {
1099                         first_seg->pkt_len -= ETHER_CRC_LEN;
1100                         if (data_len <= ETHER_CRC_LEN) {
1101                                 rte_pktmbuf_free_seg(rxm);
1102                                 first_seg->nb_segs--;
1103                                 last_seg->data_len = (uint16_t)
1104                                         (last_seg->data_len -
1105                                          (ETHER_CRC_LEN - data_len));
1106                                 last_seg->next = NULL;
1107                         } else
1108                                 rxm->data_len =
1109                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1110                 }
1111
1112                 /*
1113                  * Initialize the first mbuf of the returned packet:
1114                  *    - RX port identifier,
1115                  *    - hardware offload data, if any:
1116                  *      - RSS flag & hash,
1117                  *      - IP checksum flag,
1118                  *      - VLAN TCI, if any,
1119                  *      - error flags.
1120                  */
1121                 first_seg->port = rxq->port_id;
1122                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1123
1124                 /*
1125                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1126                  * set in the pkt_flags field.
1127                  */
1128                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1129                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1130                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1131                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1132                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1133                 first_seg->ol_flags = pkt_flags;
1134                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1135                                         lower.lo_dword.hs_rss.pkt_info);
1136
1137                 /* Prefetch data of first segment, if configured to do so. */
1138                 rte_packet_prefetch((char *)first_seg->buf_addr +
1139                         first_seg->data_off);
1140
1141                 /*
1142                  * Store the mbuf address into the next entry of the array
1143                  * of returned packets.
1144                  */
1145                 rx_pkts[nb_rx++] = first_seg;
1146
1147                 /*
1148                  * Setup receipt context for a new packet.
1149                  */
1150                 first_seg = NULL;
1151         }
1152
1153         /*
1154          * Record index of the next RX descriptor to probe.
1155          */
1156         rxq->rx_tail = rx_id;
1157
1158         /*
1159          * Save receive context.
1160          */
1161         rxq->pkt_first_seg = first_seg;
1162         rxq->pkt_last_seg = last_seg;
1163
1164         /*
1165          * If the number of free RX descriptors is greater than the RX free
1166          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1167          * register.
1168          * Update the RDT with the value of the last processed RX descriptor
1169          * minus 1, to guarantee that the RDT register is never equal to the
1170          * RDH register, which creates a "full" ring situtation from the
1171          * hardware point of view...
1172          */
1173         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1174         if (nb_hold > rxq->rx_free_thresh) {
1175                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1176                            "nb_hold=%u nb_rx=%u",
1177                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1178                            (unsigned) rx_id, (unsigned) nb_hold,
1179                            (unsigned) nb_rx);
1180                 rx_id = (uint16_t) ((rx_id == 0) ?
1181                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1182                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1183                 nb_hold = 0;
1184         }
1185         rxq->nb_rx_hold = nb_hold;
1186         return nb_rx;
1187 }
1188
1189 /*
1190  * Maximum number of Ring Descriptors.
1191  *
1192  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1193  * desscriptors should meet the following condition:
1194  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1195  */
1196
1197 static void
1198 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1199 {
1200         unsigned i;
1201
1202         if (txq->sw_ring != NULL) {
1203                 for (i = 0; i < txq->nb_tx_desc; i++) {
1204                         if (txq->sw_ring[i].mbuf != NULL) {
1205                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1206                                 txq->sw_ring[i].mbuf = NULL;
1207                         }
1208                 }
1209         }
1210 }
1211
1212 static void
1213 igb_tx_queue_release(struct igb_tx_queue *txq)
1214 {
1215         if (txq != NULL) {
1216                 igb_tx_queue_release_mbufs(txq);
1217                 rte_free(txq->sw_ring);
1218                 rte_free(txq);
1219         }
1220 }
1221
1222 void
1223 eth_igb_tx_queue_release(void *txq)
1224 {
1225         igb_tx_queue_release(txq);
1226 }
1227
1228 static void
1229 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1230 {
1231         txq->tx_head = 0;
1232         txq->tx_tail = 0;
1233         txq->ctx_curr = 0;
1234         memset((void*)&txq->ctx_cache, 0,
1235                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1236 }
1237
1238 static void
1239 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1240 {
1241         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1242         struct igb_tx_entry *txe = txq->sw_ring;
1243         uint16_t i, prev;
1244         struct e1000_hw *hw;
1245
1246         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1247         /* Zero out HW ring memory */
1248         for (i = 0; i < txq->nb_tx_desc; i++) {
1249                 txq->tx_ring[i] = zeroed_desc;
1250         }
1251
1252         /* Initialize ring entries */
1253         prev = (uint16_t)(txq->nb_tx_desc - 1);
1254         for (i = 0; i < txq->nb_tx_desc; i++) {
1255                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1256
1257                 txd->wb.status = E1000_TXD_STAT_DD;
1258                 txe[i].mbuf = NULL;
1259                 txe[i].last_id = i;
1260                 txe[prev].next_id = i;
1261                 prev = i;
1262         }
1263
1264         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1265         /* 82575 specific, each tx queue will use 2 hw contexts */
1266         if (hw->mac.type == e1000_82575)
1267                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1268
1269         igb_reset_tx_queue_stat(txq);
1270 }
1271
1272 int
1273 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1274                          uint16_t queue_idx,
1275                          uint16_t nb_desc,
1276                          unsigned int socket_id,
1277                          const struct rte_eth_txconf *tx_conf)
1278 {
1279         const struct rte_memzone *tz;
1280         struct igb_tx_queue *txq;
1281         struct e1000_hw     *hw;
1282         uint32_t size;
1283
1284         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1285
1286         /*
1287          * Validate number of transmit descriptors.
1288          * It must not exceed hardware maximum, and must be multiple
1289          * of E1000_ALIGN.
1290          */
1291         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1292                         (nb_desc > E1000_MAX_RING_DESC) ||
1293                         (nb_desc < E1000_MIN_RING_DESC)) {
1294                 return -EINVAL;
1295         }
1296
1297         /*
1298          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1299          * driver.
1300          */
1301         if (tx_conf->tx_free_thresh != 0)
1302                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1303                              "used for the 1G driver.");
1304         if (tx_conf->tx_rs_thresh != 0)
1305                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1306                              "used for the 1G driver.");
1307         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1308                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1309                              "consider setting the TX WTHRESH value to 4, 8, "
1310                              "or 16.");
1311
1312         /* Free memory prior to re-allocation if needed */
1313         if (dev->data->tx_queues[queue_idx] != NULL) {
1314                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1315                 dev->data->tx_queues[queue_idx] = NULL;
1316         }
1317
1318         /* First allocate the tx queue data structure */
1319         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1320                                                         RTE_CACHE_LINE_SIZE);
1321         if (txq == NULL)
1322                 return -ENOMEM;
1323
1324         /*
1325          * Allocate TX ring hardware descriptors. A memzone large enough to
1326          * handle the maximum ring size is allocated in order to allow for
1327          * resizing in later calls to the queue setup function.
1328          */
1329         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1330         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1331                                       E1000_ALIGN, socket_id);
1332         if (tz == NULL) {
1333                 igb_tx_queue_release(txq);
1334                 return -ENOMEM;
1335         }
1336
1337         txq->nb_tx_desc = nb_desc;
1338         txq->pthresh = tx_conf->tx_thresh.pthresh;
1339         txq->hthresh = tx_conf->tx_thresh.hthresh;
1340         txq->wthresh = tx_conf->tx_thresh.wthresh;
1341         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1342                 txq->wthresh = 1;
1343         txq->queue_id = queue_idx;
1344         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1345                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1346         txq->port_id = dev->data->port_id;
1347
1348         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1349         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1350
1351         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1352         /* Allocate software ring */
1353         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1354                                    sizeof(struct igb_tx_entry) * nb_desc,
1355                                    RTE_CACHE_LINE_SIZE);
1356         if (txq->sw_ring == NULL) {
1357                 igb_tx_queue_release(txq);
1358                 return -ENOMEM;
1359         }
1360         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1361                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1362
1363         igb_reset_tx_queue(txq, dev);
1364         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1365         dev->data->tx_queues[queue_idx] = txq;
1366
1367         return 0;
1368 }
1369
1370 static void
1371 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1372 {
1373         unsigned i;
1374
1375         if (rxq->sw_ring != NULL) {
1376                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1377                         if (rxq->sw_ring[i].mbuf != NULL) {
1378                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1379                                 rxq->sw_ring[i].mbuf = NULL;
1380                         }
1381                 }
1382         }
1383 }
1384
1385 static void
1386 igb_rx_queue_release(struct igb_rx_queue *rxq)
1387 {
1388         if (rxq != NULL) {
1389                 igb_rx_queue_release_mbufs(rxq);
1390                 rte_free(rxq->sw_ring);
1391                 rte_free(rxq);
1392         }
1393 }
1394
1395 void
1396 eth_igb_rx_queue_release(void *rxq)
1397 {
1398         igb_rx_queue_release(rxq);
1399 }
1400
1401 static void
1402 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1403 {
1404         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1405         unsigned i;
1406
1407         /* Zero out HW ring memory */
1408         for (i = 0; i < rxq->nb_rx_desc; i++) {
1409                 rxq->rx_ring[i] = zeroed_desc;
1410         }
1411
1412         rxq->rx_tail = 0;
1413         rxq->pkt_first_seg = NULL;
1414         rxq->pkt_last_seg = NULL;
1415 }
1416
1417 int
1418 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1419                          uint16_t queue_idx,
1420                          uint16_t nb_desc,
1421                          unsigned int socket_id,
1422                          const struct rte_eth_rxconf *rx_conf,
1423                          struct rte_mempool *mp)
1424 {
1425         const struct rte_memzone *rz;
1426         struct igb_rx_queue *rxq;
1427         struct e1000_hw     *hw;
1428         unsigned int size;
1429
1430         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431
1432         /*
1433          * Validate number of receive descriptors.
1434          * It must not exceed hardware maximum, and must be multiple
1435          * of E1000_ALIGN.
1436          */
1437         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1438                         (nb_desc > E1000_MAX_RING_DESC) ||
1439                         (nb_desc < E1000_MIN_RING_DESC)) {
1440                 return -EINVAL;
1441         }
1442
1443         /* Free memory prior to re-allocation if needed */
1444         if (dev->data->rx_queues[queue_idx] != NULL) {
1445                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1446                 dev->data->rx_queues[queue_idx] = NULL;
1447         }
1448
1449         /* First allocate the RX queue data structure. */
1450         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1451                           RTE_CACHE_LINE_SIZE);
1452         if (rxq == NULL)
1453                 return -ENOMEM;
1454         rxq->mb_pool = mp;
1455         rxq->nb_rx_desc = nb_desc;
1456         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1457         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1458         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1459         if (rxq->wthresh > 0 &&
1460             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1461                 rxq->wthresh = 1;
1462         rxq->drop_en = rx_conf->rx_drop_en;
1463         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1464         rxq->queue_id = queue_idx;
1465         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1466                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1467         rxq->port_id = dev->data->port_id;
1468         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1469                                   ETHER_CRC_LEN);
1470
1471         /*
1472          *  Allocate RX ring hardware descriptors. A memzone large enough to
1473          *  handle the maximum ring size is allocated in order to allow for
1474          *  resizing in later calls to the queue setup function.
1475          */
1476         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1477         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1478                                       E1000_ALIGN, socket_id);
1479         if (rz == NULL) {
1480                 igb_rx_queue_release(rxq);
1481                 return -ENOMEM;
1482         }
1483         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1484         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1485         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1486         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1487
1488         /* Allocate software ring. */
1489         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1490                                    sizeof(struct igb_rx_entry) * nb_desc,
1491                                    RTE_CACHE_LINE_SIZE);
1492         if (rxq->sw_ring == NULL) {
1493                 igb_rx_queue_release(rxq);
1494                 return -ENOMEM;
1495         }
1496         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1497                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1498
1499         dev->data->rx_queues[queue_idx] = rxq;
1500         igb_reset_rx_queue(rxq);
1501
1502         return 0;
1503 }
1504
1505 uint32_t
1506 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1507 {
1508 #define IGB_RXQ_SCAN_INTERVAL 4
1509         volatile union e1000_adv_rx_desc *rxdp;
1510         struct igb_rx_queue *rxq;
1511         uint32_t desc = 0;
1512
1513         if (rx_queue_id >= dev->data->nb_rx_queues) {
1514                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1515                 return 0;
1516         }
1517
1518         rxq = dev->data->rx_queues[rx_queue_id];
1519         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1520
1521         while ((desc < rxq->nb_rx_desc) &&
1522                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1523                 desc += IGB_RXQ_SCAN_INTERVAL;
1524                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1525                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1526                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1527                                 desc - rxq->nb_rx_desc]);
1528         }
1529
1530         return 0;
1531 }
1532
1533 int
1534 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1535 {
1536         volatile union e1000_adv_rx_desc *rxdp;
1537         struct igb_rx_queue *rxq = rx_queue;
1538         uint32_t desc;
1539
1540         if (unlikely(offset >= rxq->nb_rx_desc))
1541                 return 0;
1542         desc = rxq->rx_tail + offset;
1543         if (desc >= rxq->nb_rx_desc)
1544                 desc -= rxq->nb_rx_desc;
1545
1546         rxdp = &rxq->rx_ring[desc];
1547         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1548 }
1549
1550 void
1551 igb_dev_clear_queues(struct rte_eth_dev *dev)
1552 {
1553         uint16_t i;
1554         struct igb_tx_queue *txq;
1555         struct igb_rx_queue *rxq;
1556
1557         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1558                 txq = dev->data->tx_queues[i];
1559                 if (txq != NULL) {
1560                         igb_tx_queue_release_mbufs(txq);
1561                         igb_reset_tx_queue(txq, dev);
1562                 }
1563         }
1564
1565         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1566                 rxq = dev->data->rx_queues[i];
1567                 if (rxq != NULL) {
1568                         igb_rx_queue_release_mbufs(rxq);
1569                         igb_reset_rx_queue(rxq);
1570                 }
1571         }
1572 }
1573
1574 void
1575 igb_dev_free_queues(struct rte_eth_dev *dev)
1576 {
1577         uint16_t i;
1578
1579         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1580                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1581                 dev->data->rx_queues[i] = NULL;
1582         }
1583         dev->data->nb_rx_queues = 0;
1584
1585         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1586                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1587                 dev->data->tx_queues[i] = NULL;
1588         }
1589         dev->data->nb_tx_queues = 0;
1590 }
1591
1592 /**
1593  * Receive Side Scaling (RSS).
1594  * See section 7.1.1.7 in the following document:
1595  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1596  *
1597  * Principles:
1598  * The source and destination IP addresses of the IP header and the source and
1599  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1600  * against a configurable random key to compute a 32-bit RSS hash result.
1601  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1602  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1603  * RSS output index which is used as the RX queue index where to store the
1604  * received packets.
1605  * The following output is supplied in the RX write-back descriptor:
1606  *     - 32-bit result of the Microsoft RSS hash function,
1607  *     - 4-bit RSS type field.
1608  */
1609
1610 /*
1611  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1612  * Used as the default key.
1613  */
1614 static uint8_t rss_intel_key[40] = {
1615         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1616         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1617         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1618         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1619         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1620 };
1621
1622 static void
1623 igb_rss_disable(struct rte_eth_dev *dev)
1624 {
1625         struct e1000_hw *hw;
1626         uint32_t mrqc;
1627
1628         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1630         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1631         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1632 }
1633
1634 static void
1635 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1636 {
1637         uint8_t  *hash_key;
1638         uint32_t rss_key;
1639         uint32_t mrqc;
1640         uint64_t rss_hf;
1641         uint16_t i;
1642
1643         hash_key = rss_conf->rss_key;
1644         if (hash_key != NULL) {
1645                 /* Fill in RSS hash key */
1646                 for (i = 0; i < 10; i++) {
1647                         rss_key  = hash_key[(i * 4)];
1648                         rss_key |= hash_key[(i * 4) + 1] << 8;
1649                         rss_key |= hash_key[(i * 4) + 2] << 16;
1650                         rss_key |= hash_key[(i * 4) + 3] << 24;
1651                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1652                 }
1653         }
1654
1655         /* Set configured hashing protocols in MRQC register */
1656         rss_hf = rss_conf->rss_hf;
1657         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1658         if (rss_hf & ETH_RSS_IPV4)
1659                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1660         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1661                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1662         if (rss_hf & ETH_RSS_IPV6)
1663                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1664         if (rss_hf & ETH_RSS_IPV6_EX)
1665                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1666         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1667                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1668         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1669                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1670         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1671                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1672         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1673                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1674         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1675                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1676         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1677 }
1678
1679 int
1680 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1681                         struct rte_eth_rss_conf *rss_conf)
1682 {
1683         struct e1000_hw *hw;
1684         uint32_t mrqc;
1685         uint64_t rss_hf;
1686
1687         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1688
1689         /*
1690          * Before changing anything, first check that the update RSS operation
1691          * does not attempt to disable RSS, if RSS was enabled at
1692          * initialization time, or does not attempt to enable RSS, if RSS was
1693          * disabled at initialization time.
1694          */
1695         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1696         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1697         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1698                 if (rss_hf != 0) /* Enable RSS */
1699                         return -(EINVAL);
1700                 return 0; /* Nothing to do */
1701         }
1702         /* RSS enabled */
1703         if (rss_hf == 0) /* Disable RSS */
1704                 return -(EINVAL);
1705         igb_hw_rss_hash_set(hw, rss_conf);
1706         return 0;
1707 }
1708
1709 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1710                               struct rte_eth_rss_conf *rss_conf)
1711 {
1712         struct e1000_hw *hw;
1713         uint8_t *hash_key;
1714         uint32_t rss_key;
1715         uint32_t mrqc;
1716         uint64_t rss_hf;
1717         uint16_t i;
1718
1719         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720         hash_key = rss_conf->rss_key;
1721         if (hash_key != NULL) {
1722                 /* Return RSS hash key */
1723                 for (i = 0; i < 10; i++) {
1724                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1725                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1726                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1727                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1728                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1729                 }
1730         }
1731
1732         /* Get RSS functions configured in MRQC register */
1733         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1734         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1735                 rss_conf->rss_hf = 0;
1736                 return 0;
1737         }
1738         rss_hf = 0;
1739         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1740                 rss_hf |= ETH_RSS_IPV4;
1741         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1742                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1743         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1744                 rss_hf |= ETH_RSS_IPV6;
1745         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1746                 rss_hf |= ETH_RSS_IPV6_EX;
1747         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1748                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1749         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1750                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1751         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1752                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1753         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1754                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1755         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1756                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1757         rss_conf->rss_hf = rss_hf;
1758         return 0;
1759 }
1760
1761 static void
1762 igb_rss_configure(struct rte_eth_dev *dev)
1763 {
1764         struct rte_eth_rss_conf rss_conf;
1765         struct e1000_hw *hw;
1766         uint32_t shift;
1767         uint16_t i;
1768
1769         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1770
1771         /* Fill in redirection table. */
1772         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1773         for (i = 0; i < 128; i++) {
1774                 union e1000_reta {
1775                         uint32_t dword;
1776                         uint8_t  bytes[4];
1777                 } reta;
1778                 uint8_t q_idx;
1779
1780                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1781                                    i % dev->data->nb_rx_queues : 0);
1782                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1783                 if ((i & 3) == 3)
1784                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1785         }
1786
1787         /*
1788          * Configure the RSS key and the RSS protocols used to compute
1789          * the RSS hash of input packets.
1790          */
1791         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1792         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1793                 igb_rss_disable(dev);
1794                 return;
1795         }
1796         if (rss_conf.rss_key == NULL)
1797                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1798         igb_hw_rss_hash_set(hw, &rss_conf);
1799 }
1800
1801 /*
1802  * Check if the mac type support VMDq or not.
1803  * Return 1 if it supports, otherwise, return 0.
1804  */
1805 static int
1806 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1807 {
1808         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1809
1810         switch (hw->mac.type) {
1811         case e1000_82576:
1812         case e1000_82580:
1813         case e1000_i350:
1814                 return 1;
1815         case e1000_82540:
1816         case e1000_82541:
1817         case e1000_82542:
1818         case e1000_82543:
1819         case e1000_82544:
1820         case e1000_82545:
1821         case e1000_82546:
1822         case e1000_82547:
1823         case e1000_82571:
1824         case e1000_82572:
1825         case e1000_82573:
1826         case e1000_82574:
1827         case e1000_82583:
1828         case e1000_i210:
1829         case e1000_i211:
1830         default:
1831                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1832                 return 0;
1833         }
1834 }
1835
1836 static int
1837 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1838 {
1839         struct rte_eth_vmdq_rx_conf *cfg;
1840         struct e1000_hw *hw;
1841         uint32_t mrqc, vt_ctl, vmolr, rctl;
1842         int i;
1843
1844         PMD_INIT_FUNC_TRACE();
1845
1846         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1848
1849         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1850         if (igb_is_vmdq_supported(dev) == 0)
1851                 return -1;
1852
1853         igb_rss_disable(dev);
1854
1855         /* RCTL: eanble VLAN filter */
1856         rctl = E1000_READ_REG(hw, E1000_RCTL);
1857         rctl |= E1000_RCTL_VFE;
1858         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1859
1860         /* MRQC: enable vmdq */
1861         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1862         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1863         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1864
1865         /* VTCTL:  pool selection according to VLAN tag */
1866         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1867         if (cfg->enable_default_pool)
1868                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1869         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1870         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1871
1872         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1873                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1874                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1875                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1876                         E1000_VMOLR_MPME);
1877
1878                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1879                         vmolr |= E1000_VMOLR_AUPE;
1880                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1881                         vmolr |= E1000_VMOLR_ROMPE;
1882                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1883                         vmolr |= E1000_VMOLR_ROPE;
1884                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1885                         vmolr |= E1000_VMOLR_BAM;
1886                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1887                         vmolr |= E1000_VMOLR_MPME;
1888
1889                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1890         }
1891
1892         /*
1893          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1894          * Both 82576 and 82580 support it
1895          */
1896         if (hw->mac.type != e1000_i350) {
1897                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1898                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1899                         vmolr |= E1000_VMOLR_STRVLAN;
1900                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1901                 }
1902         }
1903
1904         /* VFTA - enable all vlan filters */
1905         for (i = 0; i < IGB_VFTA_SIZE; i++)
1906                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1907
1908         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1909         if (hw->mac.type != e1000_82580)
1910                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1911
1912         /*
1913          * RAH/RAL - allow pools to read specific mac addresses
1914          * In this case, all pools should be able to read from mac addr 0
1915          */
1916         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1917         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1918
1919         /* VLVF: set up filters for vlan tags as configured */
1920         for (i = 0; i < cfg->nb_pool_maps; i++) {
1921                 /* set vlan id in VF register and set the valid bit */
1922                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1923                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1924                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1925                         E1000_VLVF_POOLSEL_MASK)));
1926         }
1927
1928         E1000_WRITE_FLUSH(hw);
1929
1930         return 0;
1931 }
1932
1933
1934 /*********************************************************************
1935  *
1936  *  Enable receive unit.
1937  *
1938  **********************************************************************/
1939
1940 static int
1941 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1942 {
1943         struct igb_rx_entry *rxe = rxq->sw_ring;
1944         uint64_t dma_addr;
1945         unsigned i;
1946
1947         /* Initialize software ring entries. */
1948         for (i = 0; i < rxq->nb_rx_desc; i++) {
1949                 volatile union e1000_adv_rx_desc *rxd;
1950                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1951
1952                 if (mbuf == NULL) {
1953                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1954                                      "queue_id=%hu", rxq->queue_id);
1955                         return -ENOMEM;
1956                 }
1957                 dma_addr =
1958                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1959                 rxd = &rxq->rx_ring[i];
1960                 rxd->read.hdr_addr = 0;
1961                 rxd->read.pkt_addr = dma_addr;
1962                 rxe[i].mbuf = mbuf;
1963         }
1964
1965         return 0;
1966 }
1967
1968 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1969 static int
1970 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1971 {
1972         struct e1000_hw *hw =
1973                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1974         uint32_t mrqc;
1975
1976         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1977                 /*
1978                  * SRIOV active scheme
1979                  * FIXME if support RSS together with VMDq & SRIOV
1980                  */
1981                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1982                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1983                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1984                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1985         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1986                 /*
1987                  * SRIOV inactive scheme
1988                  */
1989                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1990                         case ETH_MQ_RX_RSS:
1991                                 igb_rss_configure(dev);
1992                                 break;
1993                         case ETH_MQ_RX_VMDQ_ONLY:
1994                                 /*Configure general VMDQ only RX parameters*/
1995                                 igb_vmdq_rx_hw_configure(dev);
1996                                 break;
1997                         case ETH_MQ_RX_NONE:
1998                                 /* if mq_mode is none, disable rss mode.*/
1999                         default:
2000                                 igb_rss_disable(dev);
2001                                 break;
2002                 }
2003         }
2004
2005         return 0;
2006 }
2007
2008 int
2009 eth_igb_rx_init(struct rte_eth_dev *dev)
2010 {
2011         struct e1000_hw     *hw;
2012         struct igb_rx_queue *rxq;
2013         uint32_t rctl;
2014         uint32_t rxcsum;
2015         uint32_t srrctl;
2016         uint16_t buf_size;
2017         uint16_t rctl_bsize;
2018         uint16_t i;
2019         int ret;
2020
2021         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2022         srrctl = 0;
2023
2024         /*
2025          * Make sure receives are disabled while setting
2026          * up the descriptor ring.
2027          */
2028         rctl = E1000_READ_REG(hw, E1000_RCTL);
2029         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2030
2031         /*
2032          * Configure support of jumbo frames, if any.
2033          */
2034         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2035                 rctl |= E1000_RCTL_LPE;
2036
2037                 /*
2038                  * Set maximum packet length by default, and might be updated
2039                  * together with enabling/disabling dual VLAN.
2040                  */
2041                 E1000_WRITE_REG(hw, E1000_RLPML,
2042                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2043                                                 VLAN_TAG_SIZE);
2044         } else
2045                 rctl &= ~E1000_RCTL_LPE;
2046
2047         /* Configure and enable each RX queue. */
2048         rctl_bsize = 0;
2049         dev->rx_pkt_burst = eth_igb_recv_pkts;
2050         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2051                 uint64_t bus_addr;
2052                 uint32_t rxdctl;
2053
2054                 rxq = dev->data->rx_queues[i];
2055
2056                 /* Allocate buffers for descriptor rings and set up queue */
2057                 ret = igb_alloc_rx_queue_mbufs(rxq);
2058                 if (ret)
2059                         return ret;
2060
2061                 /*
2062                  * Reset crc_len in case it was changed after queue setup by a
2063                  *  call to configure
2064                  */
2065                 rxq->crc_len =
2066                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2067                                                         0 : ETHER_CRC_LEN);
2068
2069                 bus_addr = rxq->rx_ring_phys_addr;
2070                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2071                                 rxq->nb_rx_desc *
2072                                 sizeof(union e1000_adv_rx_desc));
2073                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2074                                 (uint32_t)(bus_addr >> 32));
2075                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2076
2077                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2078
2079                 /*
2080                  * Configure RX buffer size.
2081                  */
2082                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2083                         RTE_PKTMBUF_HEADROOM);
2084                 if (buf_size >= 1024) {
2085                         /*
2086                          * Configure the BSIZEPACKET field of the SRRCTL
2087                          * register of the queue.
2088                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2089                          * If this field is equal to 0b, then RCTL.BSIZE
2090                          * determines the RX packet buffer size.
2091                          */
2092                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2093                                    E1000_SRRCTL_BSIZEPKT_MASK);
2094                         buf_size = (uint16_t) ((srrctl &
2095                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2096                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2097
2098                         /* It adds dual VLAN length for supporting dual VLAN */
2099                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2100                                                 2 * VLAN_TAG_SIZE) > buf_size){
2101                                 if (!dev->data->scattered_rx)
2102                                         PMD_INIT_LOG(DEBUG,
2103                                                      "forcing scatter mode");
2104                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2105                                 dev->data->scattered_rx = 1;
2106                         }
2107                 } else {
2108                         /*
2109                          * Use BSIZE field of the device RCTL register.
2110                          */
2111                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2112                                 rctl_bsize = buf_size;
2113                         if (!dev->data->scattered_rx)
2114                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2115                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2116                         dev->data->scattered_rx = 1;
2117                 }
2118
2119                 /* Set if packets are dropped when no descriptors available */
2120                 if (rxq->drop_en)
2121                         srrctl |= E1000_SRRCTL_DROP_EN;
2122
2123                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2124
2125                 /* Enable this RX queue. */
2126                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2127                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2128                 rxdctl &= 0xFFF00000;
2129                 rxdctl |= (rxq->pthresh & 0x1F);
2130                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2131                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2132                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2133         }
2134
2135         if (dev->data->dev_conf.rxmode.enable_scatter) {
2136                 if (!dev->data->scattered_rx)
2137                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2138                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2139                 dev->data->scattered_rx = 1;
2140         }
2141
2142         /*
2143          * Setup BSIZE field of RCTL register, if needed.
2144          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2145          * register, since the code above configures the SRRCTL register of
2146          * the RX queue in such a case.
2147          * All configurable sizes are:
2148          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2149          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2150          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2151          *  2048: rctl |= E1000_RCTL_SZ_2048;
2152          *  1024: rctl |= E1000_RCTL_SZ_1024;
2153          *   512: rctl |= E1000_RCTL_SZ_512;
2154          *   256: rctl |= E1000_RCTL_SZ_256;
2155          */
2156         if (rctl_bsize > 0) {
2157                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2158                         rctl |= E1000_RCTL_SZ_512;
2159                 else /* 256 <= buf_size < 512 - use 256 */
2160                         rctl |= E1000_RCTL_SZ_256;
2161         }
2162
2163         /*
2164          * Configure RSS if device configured with multiple RX queues.
2165          */
2166         igb_dev_mq_rx_configure(dev);
2167
2168         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2169         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2170
2171         /*
2172          * Setup the Checksum Register.
2173          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2174          */
2175         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2176         rxcsum |= E1000_RXCSUM_PCSD;
2177
2178         /* Enable both L3/L4 rx checksum offload */
2179         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2180                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2181         else
2182                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2183         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2184
2185         /* Setup the Receive Control Register. */
2186         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2187                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2188
2189                 /* set STRCRC bit in all queues */
2190                 if (hw->mac.type == e1000_i350 ||
2191                     hw->mac.type == e1000_i210 ||
2192                     hw->mac.type == e1000_i211 ||
2193                     hw->mac.type == e1000_i354) {
2194                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2195                                 rxq = dev->data->rx_queues[i];
2196                                 uint32_t dvmolr = E1000_READ_REG(hw,
2197                                         E1000_DVMOLR(rxq->reg_idx));
2198                                 dvmolr |= E1000_DVMOLR_STRCRC;
2199                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2200                         }
2201                 }
2202         } else {
2203                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2204
2205                 /* clear STRCRC bit in all queues */
2206                 if (hw->mac.type == e1000_i350 ||
2207                     hw->mac.type == e1000_i210 ||
2208                     hw->mac.type == e1000_i211 ||
2209                     hw->mac.type == e1000_i354) {
2210                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2211                                 rxq = dev->data->rx_queues[i];
2212                                 uint32_t dvmolr = E1000_READ_REG(hw,
2213                                         E1000_DVMOLR(rxq->reg_idx));
2214                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2215                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2216                         }
2217                 }
2218         }
2219
2220         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2221         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2222                 E1000_RCTL_RDMTS_HALF |
2223                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2224
2225         /* Make sure VLAN Filters are off. */
2226         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2227                 rctl &= ~E1000_RCTL_VFE;
2228         /* Don't store bad packets. */
2229         rctl &= ~E1000_RCTL_SBP;
2230
2231         /* Enable Receives. */
2232         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2233
2234         /*
2235          * Setup the HW Rx Head and Tail Descriptor Pointers.
2236          * This needs to be done after enable.
2237          */
2238         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2239                 rxq = dev->data->rx_queues[i];
2240                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2241                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2242         }
2243
2244         return 0;
2245 }
2246
2247 /*********************************************************************
2248  *
2249  *  Enable transmit unit.
2250  *
2251  **********************************************************************/
2252 void
2253 eth_igb_tx_init(struct rte_eth_dev *dev)
2254 {
2255         struct e1000_hw     *hw;
2256         struct igb_tx_queue *txq;
2257         uint32_t tctl;
2258         uint32_t txdctl;
2259         uint16_t i;
2260
2261         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2262
2263         /* Setup the Base and Length of the Tx Descriptor Rings. */
2264         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2265                 uint64_t bus_addr;
2266                 txq = dev->data->tx_queues[i];
2267                 bus_addr = txq->tx_ring_phys_addr;
2268
2269                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2270                                 txq->nb_tx_desc *
2271                                 sizeof(union e1000_adv_tx_desc));
2272                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2273                                 (uint32_t)(bus_addr >> 32));
2274                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2275
2276                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2277                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2278                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2279
2280                 /* Setup Transmit threshold registers. */
2281                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2282                 txdctl |= txq->pthresh & 0x1F;
2283                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2284                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2285                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2286                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2287         }
2288
2289         /* Program the Transmit Control Register. */
2290         tctl = E1000_READ_REG(hw, E1000_TCTL);
2291         tctl &= ~E1000_TCTL_CT;
2292         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2293                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2294
2295         e1000_config_collision_dist(hw);
2296
2297         /* This write will effectively turn on the transmit unit. */
2298         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2299 }
2300
2301 /*********************************************************************
2302  *
2303  *  Enable VF receive unit.
2304  *
2305  **********************************************************************/
2306 int
2307 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2308 {
2309         struct e1000_hw     *hw;
2310         struct igb_rx_queue *rxq;
2311         uint32_t srrctl;
2312         uint16_t buf_size;
2313         uint16_t rctl_bsize;
2314         uint16_t i;
2315         int ret;
2316
2317         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2318
2319         /* setup MTU */
2320         e1000_rlpml_set_vf(hw,
2321                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2322                 VLAN_TAG_SIZE));
2323
2324         /* Configure and enable each RX queue. */
2325         rctl_bsize = 0;
2326         dev->rx_pkt_burst = eth_igb_recv_pkts;
2327         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2328                 uint64_t bus_addr;
2329                 uint32_t rxdctl;
2330
2331                 rxq = dev->data->rx_queues[i];
2332
2333                 /* Allocate buffers for descriptor rings and set up queue */
2334                 ret = igb_alloc_rx_queue_mbufs(rxq);
2335                 if (ret)
2336                         return ret;
2337
2338                 bus_addr = rxq->rx_ring_phys_addr;
2339                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2340                                 rxq->nb_rx_desc *
2341                                 sizeof(union e1000_adv_rx_desc));
2342                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2343                                 (uint32_t)(bus_addr >> 32));
2344                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2345
2346                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2347
2348                 /*
2349                  * Configure RX buffer size.
2350                  */
2351                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2352                         RTE_PKTMBUF_HEADROOM);
2353                 if (buf_size >= 1024) {
2354                         /*
2355                          * Configure the BSIZEPACKET field of the SRRCTL
2356                          * register of the queue.
2357                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2358                          * If this field is equal to 0b, then RCTL.BSIZE
2359                          * determines the RX packet buffer size.
2360                          */
2361                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2362                                    E1000_SRRCTL_BSIZEPKT_MASK);
2363                         buf_size = (uint16_t) ((srrctl &
2364                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2365                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2366
2367                         /* It adds dual VLAN length for supporting dual VLAN */
2368                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2369                                                 2 * VLAN_TAG_SIZE) > buf_size){
2370                                 if (!dev->data->scattered_rx)
2371                                         PMD_INIT_LOG(DEBUG,
2372                                                      "forcing scatter mode");
2373                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2374                                 dev->data->scattered_rx = 1;
2375                         }
2376                 } else {
2377                         /*
2378                          * Use BSIZE field of the device RCTL register.
2379                          */
2380                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2381                                 rctl_bsize = buf_size;
2382                         if (!dev->data->scattered_rx)
2383                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2384                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2385                         dev->data->scattered_rx = 1;
2386                 }
2387
2388                 /* Set if packets are dropped when no descriptors available */
2389                 if (rxq->drop_en)
2390                         srrctl |= E1000_SRRCTL_DROP_EN;
2391
2392                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2393
2394                 /* Enable this RX queue. */
2395                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2396                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2397                 rxdctl &= 0xFFF00000;
2398                 rxdctl |= (rxq->pthresh & 0x1F);
2399                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2400                 if (hw->mac.type == e1000_vfadapt) {
2401                         /*
2402                          * Workaround of 82576 VF Erratum
2403                          * force set WTHRESH to 1
2404                          * to avoid Write-Back not triggered sometimes
2405                          */
2406                         rxdctl |= 0x10000;
2407                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2408                 }
2409                 else
2410                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2411                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2412         }
2413
2414         if (dev->data->dev_conf.rxmode.enable_scatter) {
2415                 if (!dev->data->scattered_rx)
2416                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2417                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2418                 dev->data->scattered_rx = 1;
2419         }
2420
2421         /*
2422          * Setup the HW Rx Head and Tail Descriptor Pointers.
2423          * This needs to be done after enable.
2424          */
2425         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2426                 rxq = dev->data->rx_queues[i];
2427                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2428                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2429         }
2430
2431         return 0;
2432 }
2433
2434 /*********************************************************************
2435  *
2436  *  Enable VF transmit unit.
2437  *
2438  **********************************************************************/
2439 void
2440 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2441 {
2442         struct e1000_hw     *hw;
2443         struct igb_tx_queue *txq;
2444         uint32_t txdctl;
2445         uint16_t i;
2446
2447         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2448
2449         /* Setup the Base and Length of the Tx Descriptor Rings. */
2450         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2451                 uint64_t bus_addr;
2452
2453                 txq = dev->data->tx_queues[i];
2454                 bus_addr = txq->tx_ring_phys_addr;
2455                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2456                                 txq->nb_tx_desc *
2457                                 sizeof(union e1000_adv_tx_desc));
2458                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2459                                 (uint32_t)(bus_addr >> 32));
2460                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2461
2462                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2463                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2464                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2465
2466                 /* Setup Transmit threshold registers. */
2467                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2468                 txdctl |= txq->pthresh & 0x1F;
2469                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2470                 if (hw->mac.type == e1000_82576) {
2471                         /*
2472                          * Workaround of 82576 VF Erratum
2473                          * force set WTHRESH to 1
2474                          * to avoid Write-Back not triggered sometimes
2475                          */
2476                         txdctl |= 0x10000;
2477                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2478                 }
2479                 else
2480                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2481                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2482                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2483         }
2484
2485 }
2486
2487 void
2488 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2489         struct rte_eth_rxq_info *qinfo)
2490 {
2491         struct igb_rx_queue *rxq;
2492
2493         rxq = dev->data->rx_queues[queue_id];
2494
2495         qinfo->mp = rxq->mb_pool;
2496         qinfo->scattered_rx = dev->data->scattered_rx;
2497         qinfo->nb_desc = rxq->nb_rx_desc;
2498
2499         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2500         qinfo->conf.rx_drop_en = rxq->drop_en;
2501 }
2502
2503 void
2504 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2505         struct rte_eth_txq_info *qinfo)
2506 {
2507         struct igb_tx_queue *txq;
2508
2509         txq = dev->data->tx_queues[queue_id];
2510
2511         qinfo->nb_desc = txq->nb_tx_desc;
2512
2513         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2514         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2515         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2516 }