igb: enable TSO support
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK |                 \
80                 PKT_TX_TCP_SEG)
81
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
84 {
85         struct rte_mbuf *m;
86
87         m = __rte_mbuf_raw_alloc(mp);
88         __rte_mbuf_sanity_check_raw(m, 0);
89         return (m);
90 }
91
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
94
95 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
96         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97
98 /**
99  * Structure associated with each descriptor of the RX ring of a RX queue.
100  */
101 struct igb_rx_entry {
102         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
103 };
104
105 /**
106  * Structure associated with each descriptor of the TX ring of a TX queue.
107  */
108 struct igb_tx_entry {
109         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
110         uint16_t next_id; /**< Index of next descriptor in ring. */
111         uint16_t last_id; /**< Index of last scattered descriptor. */
112 };
113
114 /**
115  * Structure associated with each RX queue.
116  */
117 struct igb_rx_queue {
118         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
119         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
120         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
121         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
122         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
123         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
124         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
125         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
126         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
127         uint16_t            rx_tail;    /**< current value of RDT register. */
128         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
129         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
130         uint16_t            queue_id;   /**< RX queue index. */
131         uint16_t            reg_idx;    /**< RX queue register index. */
132         uint8_t             port_id;    /**< Device port identifier. */
133         uint8_t             pthresh;    /**< Prefetch threshold register. */
134         uint8_t             hthresh;    /**< Host threshold register. */
135         uint8_t             wthresh;    /**< Write-back threshold register. */
136         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
137         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
138 };
139
140 /**
141  * Hardware context number
142  */
143 enum igb_advctx_num {
144         IGB_CTX_0    = 0, /**< CTX0    */
145         IGB_CTX_1    = 1, /**< CTX1    */
146         IGB_CTX_NUM  = 2, /**< CTX_NUM */
147 };
148
149 /** Offload features */
150 union igb_tx_offload {
151         uint64_t data;
152         struct {
153                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
154                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
155                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
156                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
157                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
158
159                 /* uint64_t unused:8; */
160         };
161 };
162
163 /*
164  * Compare mask for igb_tx_offload.data,
165  * should be in sync with igb_tx_offload layout.
166  * */
167 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
168 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
169 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
170 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
171 /** Mac + IP + TCP + Mss mask. */
172 #define TX_TSO_CMP_MASK \
173         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
174
175 /**
176  * Strucutre to check if new context need be built
177  */
178 struct igb_advctx_info {
179         uint64_t flags;           /**< ol_flags related to context build. */
180         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
181         union igb_tx_offload tx_offload;
182         /** compare mask for tx offload. */
183         union igb_tx_offload tx_offload_mask;
184 };
185
186 /**
187  * Structure associated with each TX queue.
188  */
189 struct igb_tx_queue {
190         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
191         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
192         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
193         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
194         uint32_t               txd_type;      /**< Device-specific TXD type */
195         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
196         uint16_t               tx_tail; /**< Current value of TDT register. */
197         uint16_t               tx_head;
198         /**< Index of first used TX descriptor. */
199         uint16_t               queue_id; /**< TX queue index. */
200         uint16_t               reg_idx;  /**< TX queue register index. */
201         uint8_t                port_id;  /**< Device port identifier. */
202         uint8_t                pthresh;  /**< Prefetch threshold register. */
203         uint8_t                hthresh;  /**< Host threshold register. */
204         uint8_t                wthresh;  /**< Write-back threshold register. */
205         uint32_t               ctx_curr;
206         /**< Current used hardware descriptor. */
207         uint32_t               ctx_start;
208         /**< Start context position for transmit queue. */
209         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
210         /**< Hardware context history.*/
211 };
212
213 #if 1
214 #define RTE_PMD_USE_PREFETCH
215 #endif
216
217 #ifdef RTE_PMD_USE_PREFETCH
218 #define rte_igb_prefetch(p)     rte_prefetch0(p)
219 #else
220 #define rte_igb_prefetch(p)     do {} while(0)
221 #endif
222
223 #ifdef RTE_PMD_PACKET_PREFETCH
224 #define rte_packet_prefetch(p) rte_prefetch1(p)
225 #else
226 #define rte_packet_prefetch(p)  do {} while(0)
227 #endif
228
229 /*
230  * Macro for VMDq feature for 1 GbE NIC.
231  */
232 #define E1000_VMOLR_SIZE                        (8)
233 #define IGB_TSO_MAX_HDRLEN                      (512)
234 #define IGB_TSO_MAX_MSS                         (9216)
235
236 /*********************************************************************
237  *
238  *  TX function
239  *
240  **********************************************************************/
241
242 /*
243  *There're some limitations in hardware for TCP segmentation offload. We
244  *should check whether the parameters are valid.
245  */
246 static inline uint64_t
247 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
248 {
249         if (!(ol_req & PKT_TX_TCP_SEG))
250                 return ol_req;
251         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
252                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
253                 ol_req &= ~PKT_TX_TCP_SEG;
254                 ol_req |= PKT_TX_TCP_CKSUM;
255         }
256         return ol_req;
257 }
258
259 /*
260  * Advanced context descriptor are almost same between igb/ixgbe
261  * This is a separate function, looking for optimization opportunity here
262  * Rework required to go with the pre-defined values.
263  */
264
265 static inline void
266 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
267                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
268                 uint64_t ol_flags, union igb_tx_offload tx_offload)
269 {
270         uint32_t type_tucmd_mlhl;
271         uint32_t mss_l4len_idx;
272         uint32_t ctx_idx, ctx_curr;
273         uint32_t vlan_macip_lens;
274         union igb_tx_offload tx_offload_mask;
275
276         ctx_curr = txq->ctx_curr;
277         ctx_idx = ctx_curr + txq->ctx_start;
278
279         tx_offload_mask.data = 0;
280         type_tucmd_mlhl = 0;
281
282         /* Specify which HW CTX to upload. */
283         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
284
285         if (ol_flags & PKT_TX_VLAN_PKT)
286                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
287
288         /* check if TCP segmentation required for this packet */
289         if (ol_flags & PKT_TX_TCP_SEG) {
290                 /* implies IP cksum in IPv4 */
291                 if (ol_flags & PKT_TX_IP_CKSUM)
292                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
293                                 E1000_ADVTXD_TUCMD_L4T_TCP |
294                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
295                 else
296                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
297                                 E1000_ADVTXD_TUCMD_L4T_TCP |
298                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
299
300                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
301                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
302                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
303         } else { /* no TSO, check if hardware checksum is needed */
304                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
305                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
306
307                 if (ol_flags & PKT_TX_IP_CKSUM)
308                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
309
310                 switch (ol_flags & PKT_TX_L4_MASK) {
311                 case PKT_TX_UDP_CKSUM:
312                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
313                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
314                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
315                         break;
316                 case PKT_TX_TCP_CKSUM:
317                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
318                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
319                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
320                         break;
321                 case PKT_TX_SCTP_CKSUM:
322                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
323                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
324                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
325                         break;
326                 default:
327                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
328                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
329                         break;
330                 }
331         }
332
333         txq->ctx_cache[ctx_curr].flags = ol_flags;
334         txq->ctx_cache[ctx_idx].tx_offload.data =
335                 tx_offload_mask.data & tx_offload.data;
336         txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
337
338         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
339         vlan_macip_lens = (uint32_t)tx_offload.data;
340         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
341         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
342         ctx_txd->seqnum_seed = 0;
343 }
344
345 /*
346  * Check which hardware context can be used. Use the existing match
347  * or create a new context descriptor.
348  */
349 static inline uint32_t
350 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
351                 union igb_tx_offload tx_offload)
352 {
353         /* If match with the current context */
354         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
355                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
356                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
357                         return txq->ctx_curr;
358         }
359
360         /* If match with the second context */
361         txq->ctx_curr ^= 1;
362         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
363                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
364                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
365                         return txq->ctx_curr;
366         }
367
368         /* Mismatch, use the previous context */
369         return (IGB_CTX_NUM);
370 }
371
372 static inline uint32_t
373 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
374 {
375         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
376         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
377         uint32_t tmp;
378
379         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
380         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
381         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
382         return tmp;
383 }
384
385 static inline uint32_t
386 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
387 {
388         uint32_t cmdtype;
389         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
390         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
391         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
392         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
393         return cmdtype;
394 }
395
396 uint16_t
397 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
398                uint16_t nb_pkts)
399 {
400         struct igb_tx_queue *txq;
401         struct igb_tx_entry *sw_ring;
402         struct igb_tx_entry *txe, *txn;
403         volatile union e1000_adv_tx_desc *txr;
404         volatile union e1000_adv_tx_desc *txd;
405         struct rte_mbuf     *tx_pkt;
406         struct rte_mbuf     *m_seg;
407         uint64_t buf_dma_addr;
408         uint32_t olinfo_status;
409         uint32_t cmd_type_len;
410         uint32_t pkt_len;
411         uint16_t slen;
412         uint64_t ol_flags;
413         uint16_t tx_end;
414         uint16_t tx_id;
415         uint16_t tx_last;
416         uint16_t nb_tx;
417         uint64_t tx_ol_req;
418         uint32_t new_ctx = 0;
419         uint32_t ctx = 0;
420         union igb_tx_offload tx_offload = {0};
421
422         txq = tx_queue;
423         sw_ring = txq->sw_ring;
424         txr     = txq->tx_ring;
425         tx_id   = txq->tx_tail;
426         txe = &sw_ring[tx_id];
427
428         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
429                 tx_pkt = *tx_pkts++;
430                 pkt_len = tx_pkt->pkt_len;
431
432                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
433
434                 /*
435                  * The number of descriptors that must be allocated for a
436                  * packet is the number of segments of that packet, plus 1
437                  * Context Descriptor for the VLAN Tag Identifier, if any.
438                  * Determine the last TX descriptor to allocate in the TX ring
439                  * for the packet, starting from the current position (tx_id)
440                  * in the ring.
441                  */
442                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
443
444                 ol_flags = tx_pkt->ol_flags;
445                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
446
447                 /* If a Context Descriptor need be built . */
448                 if (tx_ol_req) {
449                         tx_offload.l2_len = tx_pkt->l2_len;
450                         tx_offload.l3_len = tx_pkt->l3_len;
451                         tx_offload.l4_len = tx_pkt->l4_len;
452                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
453                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
454                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
455
456                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
457                         /* Only allocate context descriptor if required*/
458                         new_ctx = (ctx == IGB_CTX_NUM);
459                         ctx = txq->ctx_curr;
460                         tx_last = (uint16_t) (tx_last + new_ctx);
461                 }
462                 if (tx_last >= txq->nb_tx_desc)
463                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
464
465                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
466                            " tx_first=%u tx_last=%u",
467                            (unsigned) txq->port_id,
468                            (unsigned) txq->queue_id,
469                            (unsigned) pkt_len,
470                            (unsigned) tx_id,
471                            (unsigned) tx_last);
472
473                 /*
474                  * Check if there are enough free descriptors in the TX ring
475                  * to transmit the next packet.
476                  * This operation is based on the two following rules:
477                  *
478                  *   1- Only check that the last needed TX descriptor can be
479                  *      allocated (by construction, if that descriptor is free,
480                  *      all intermediate ones are also free).
481                  *
482                  *      For this purpose, the index of the last TX descriptor
483                  *      used for a packet (the "last descriptor" of a packet)
484                  *      is recorded in the TX entries (the last one included)
485                  *      that are associated with all TX descriptors allocated
486                  *      for that packet.
487                  *
488                  *   2- Avoid to allocate the last free TX descriptor of the
489                  *      ring, in order to never set the TDT register with the
490                  *      same value stored in parallel by the NIC in the TDH
491                  *      register, which makes the TX engine of the NIC enter
492                  *      in a deadlock situation.
493                  *
494                  *      By extension, avoid to allocate a free descriptor that
495                  *      belongs to the last set of free descriptors allocated
496                  *      to the same packet previously transmitted.
497                  */
498
499                 /*
500                  * The "last descriptor" of the previously sent packet, if any,
501                  * which used the last descriptor to allocate.
502                  */
503                 tx_end = sw_ring[tx_last].last_id;
504
505                 /*
506                  * The next descriptor following that "last descriptor" in the
507                  * ring.
508                  */
509                 tx_end = sw_ring[tx_end].next_id;
510
511                 /*
512                  * The "last descriptor" associated with that next descriptor.
513                  */
514                 tx_end = sw_ring[tx_end].last_id;
515
516                 /*
517                  * Check that this descriptor is free.
518                  */
519                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
520                         if (nb_tx == 0)
521                                 return (0);
522                         goto end_of_tx;
523                 }
524
525                 /*
526                  * Set common flags of all TX Data Descriptors.
527                  *
528                  * The following bits must be set in all Data Descriptors:
529                  *   - E1000_ADVTXD_DTYP_DATA
530                  *   - E1000_ADVTXD_DCMD_DEXT
531                  *
532                  * The following bits must be set in the first Data Descriptor
533                  * and are ignored in the other ones:
534                  *   - E1000_ADVTXD_DCMD_IFCS
535                  *   - E1000_ADVTXD_MAC_1588
536                  *   - E1000_ADVTXD_DCMD_VLE
537                  *
538                  * The following bits must only be set in the last Data
539                  * Descriptor:
540                  *   - E1000_TXD_CMD_EOP
541                  *
542                  * The following bits can be set in any Data Descriptor, but
543                  * are only set in the last Data Descriptor:
544                  *   - E1000_TXD_CMD_RS
545                  */
546                 cmd_type_len = txq->txd_type |
547                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
548                 if (tx_ol_req & PKT_TX_TCP_SEG)
549                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
550                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
551 #if defined(RTE_LIBRTE_IEEE1588)
552                 if (ol_flags & PKT_TX_IEEE1588_TMST)
553                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
554 #endif
555                 if (tx_ol_req) {
556                         /* Setup TX Advanced context descriptor if required */
557                         if (new_ctx) {
558                                 volatile struct e1000_adv_tx_context_desc *
559                                     ctx_txd;
560
561                                 ctx_txd = (volatile struct
562                                     e1000_adv_tx_context_desc *)
563                                     &txr[tx_id];
564
565                                 txn = &sw_ring[txe->next_id];
566                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
567
568                                 if (txe->mbuf != NULL) {
569                                         rte_pktmbuf_free_seg(txe->mbuf);
570                                         txe->mbuf = NULL;
571                                 }
572
573                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
574
575                                 txe->last_id = tx_last;
576                                 tx_id = txe->next_id;
577                                 txe = txn;
578                         }
579
580                         /* Setup the TX Advanced Data Descriptor */
581                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
582                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
583                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
584                 }
585
586                 m_seg = tx_pkt;
587                 do {
588                         txn = &sw_ring[txe->next_id];
589                         txd = &txr[tx_id];
590
591                         if (txe->mbuf != NULL)
592                                 rte_pktmbuf_free_seg(txe->mbuf);
593                         txe->mbuf = m_seg;
594
595                         /*
596                          * Set up transmit descriptor.
597                          */
598                         slen = (uint16_t) m_seg->data_len;
599                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
600                         txd->read.buffer_addr =
601                                 rte_cpu_to_le_64(buf_dma_addr);
602                         txd->read.cmd_type_len =
603                                 rte_cpu_to_le_32(cmd_type_len | slen);
604                         txd->read.olinfo_status =
605                                 rte_cpu_to_le_32(olinfo_status);
606                         txe->last_id = tx_last;
607                         tx_id = txe->next_id;
608                         txe = txn;
609                         m_seg = m_seg->next;
610                 } while (m_seg != NULL);
611
612                 /*
613                  * The last packet data descriptor needs End Of Packet (EOP)
614                  * and Report Status (RS).
615                  */
616                 txd->read.cmd_type_len |=
617                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
618         }
619  end_of_tx:
620         rte_wmb();
621
622         /*
623          * Set the Transmit Descriptor Tail (TDT).
624          */
625         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
626         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
627                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
628                    (unsigned) tx_id, (unsigned) nb_tx);
629         txq->tx_tail = tx_id;
630
631         return (nb_tx);
632 }
633
634 /*********************************************************************
635  *
636  *  RX functions
637  *
638  **********************************************************************/
639 #define IGB_PACKET_TYPE_IPV4              0X01
640 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
641 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
642 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
643 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
644 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
645 #define IGB_PACKET_TYPE_IPV6              0X04
646 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
647 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
648 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
649 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
650 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
651 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
652 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
653 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
654 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
655 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
656 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
657 #define IGB_PACKET_TYPE_MAX               0X80
658 #define IGB_PACKET_TYPE_MASK              0X7F
659 #define IGB_PACKET_TYPE_SHIFT             0X04
660 static inline uint32_t
661 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
662 {
663         static const uint32_t
664                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
665                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
666                         RTE_PTYPE_L3_IPV4,
667                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
668                         RTE_PTYPE_L3_IPV4_EXT,
669                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
670                         RTE_PTYPE_L3_IPV6,
671                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
672                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673                         RTE_PTYPE_INNER_L3_IPV6,
674                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
675                         RTE_PTYPE_L3_IPV6_EXT,
676                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
677                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
678                         RTE_PTYPE_INNER_L3_IPV6_EXT,
679                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
680                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
681                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
682                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
683                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
684                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
686                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
687                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
688                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
689                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
690                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
691                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
692                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
693                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
694                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
695                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
696                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
698                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
700                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
703                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
705                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
706                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
707         };
708         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
709                 return RTE_PTYPE_UNKNOWN;
710
711         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
712
713         return ptype_table[pkt_info];
714 }
715
716 static inline uint64_t
717 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
718 {
719         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
720
721 #if defined(RTE_LIBRTE_IEEE1588)
722         static uint32_t ip_pkt_etqf_map[8] = {
723                 0, 0, 0, PKT_RX_IEEE1588_PTP,
724                 0, 0, 0, 0,
725         };
726
727         pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
728 #endif
729
730         return pkt_flags;
731 }
732
733 static inline uint64_t
734 rx_desc_status_to_pkt_flags(uint32_t rx_status)
735 {
736         uint64_t pkt_flags;
737
738         /* Check if VLAN present */
739         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
740
741 #if defined(RTE_LIBRTE_IEEE1588)
742         if (rx_status & E1000_RXD_STAT_TMST)
743                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
744 #endif
745         return pkt_flags;
746 }
747
748 static inline uint64_t
749 rx_desc_error_to_pkt_flags(uint32_t rx_status)
750 {
751         /*
752          * Bit 30: IPE, IPv4 checksum error
753          * Bit 29: L4I, L4I integrity error
754          */
755
756         static uint64_t error_to_pkt_flags_map[4] = {
757                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
758                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
759         };
760         return error_to_pkt_flags_map[(rx_status >>
761                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
762 }
763
764 uint16_t
765 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
766                uint16_t nb_pkts)
767 {
768         struct igb_rx_queue *rxq;
769         volatile union e1000_adv_rx_desc *rx_ring;
770         volatile union e1000_adv_rx_desc *rxdp;
771         struct igb_rx_entry *sw_ring;
772         struct igb_rx_entry *rxe;
773         struct rte_mbuf *rxm;
774         struct rte_mbuf *nmb;
775         union e1000_adv_rx_desc rxd;
776         uint64_t dma_addr;
777         uint32_t staterr;
778         uint32_t hlen_type_rss;
779         uint16_t pkt_len;
780         uint16_t rx_id;
781         uint16_t nb_rx;
782         uint16_t nb_hold;
783         uint64_t pkt_flags;
784
785         nb_rx = 0;
786         nb_hold = 0;
787         rxq = rx_queue;
788         rx_id = rxq->rx_tail;
789         rx_ring = rxq->rx_ring;
790         sw_ring = rxq->sw_ring;
791         while (nb_rx < nb_pkts) {
792                 /*
793                  * The order of operations here is important as the DD status
794                  * bit must not be read after any other descriptor fields.
795                  * rx_ring and rxdp are pointing to volatile data so the order
796                  * of accesses cannot be reordered by the compiler. If they were
797                  * not volatile, they could be reordered which could lead to
798                  * using invalid descriptor fields when read from rxd.
799                  */
800                 rxdp = &rx_ring[rx_id];
801                 staterr = rxdp->wb.upper.status_error;
802                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
803                         break;
804                 rxd = *rxdp;
805
806                 /*
807                  * End of packet.
808                  *
809                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
810                  * likely to be invalid and to be dropped by the various
811                  * validation checks performed by the network stack.
812                  *
813                  * Allocate a new mbuf to replenish the RX ring descriptor.
814                  * If the allocation fails:
815                  *    - arrange for that RX descriptor to be the first one
816                  *      being parsed the next time the receive function is
817                  *      invoked [on the same queue].
818                  *
819                  *    - Stop parsing the RX ring and return immediately.
820                  *
821                  * This policy do not drop the packet received in the RX
822                  * descriptor for which the allocation of a new mbuf failed.
823                  * Thus, it allows that packet to be later retrieved if
824                  * mbuf have been freed in the mean time.
825                  * As a side effect, holding RX descriptors instead of
826                  * systematically giving them back to the NIC may lead to
827                  * RX ring exhaustion situations.
828                  * However, the NIC can gracefully prevent such situations
829                  * to happen by sending specific "back-pressure" flow control
830                  * frames to its peer(s).
831                  */
832                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
833                            "staterr=0x%x pkt_len=%u",
834                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
835                            (unsigned) rx_id, (unsigned) staterr,
836                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
837
838                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
839                 if (nmb == NULL) {
840                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
841                                    "queue_id=%u", (unsigned) rxq->port_id,
842                                    (unsigned) rxq->queue_id);
843                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
844                         break;
845                 }
846
847                 nb_hold++;
848                 rxe = &sw_ring[rx_id];
849                 rx_id++;
850                 if (rx_id == rxq->nb_rx_desc)
851                         rx_id = 0;
852
853                 /* Prefetch next mbuf while processing current one. */
854                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
855
856                 /*
857                  * When next RX descriptor is on a cache-line boundary,
858                  * prefetch the next 4 RX descriptors and the next 8 pointers
859                  * to mbufs.
860                  */
861                 if ((rx_id & 0x3) == 0) {
862                         rte_igb_prefetch(&rx_ring[rx_id]);
863                         rte_igb_prefetch(&sw_ring[rx_id]);
864                 }
865
866                 rxm = rxe->mbuf;
867                 rxe->mbuf = nmb;
868                 dma_addr =
869                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
870                 rxdp->read.hdr_addr = 0;
871                 rxdp->read.pkt_addr = dma_addr;
872
873                 /*
874                  * Initialize the returned mbuf.
875                  * 1) setup generic mbuf fields:
876                  *    - number of segments,
877                  *    - next segment,
878                  *    - packet length,
879                  *    - RX port identifier.
880                  * 2) integrate hardware offload data, if any:
881                  *    - RSS flag & hash,
882                  *    - IP checksum flag,
883                  *    - VLAN TCI, if any,
884                  *    - error flags.
885                  */
886                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
887                                       rxq->crc_len);
888                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
889                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
890                 rxm->nb_segs = 1;
891                 rxm->next = NULL;
892                 rxm->pkt_len = pkt_len;
893                 rxm->data_len = pkt_len;
894                 rxm->port = rxq->port_id;
895
896                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
897                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
898                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
899                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
900
901                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
902                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
903                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
904                 rxm->ol_flags = pkt_flags;
905                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
906                                                 lo_dword.hs_rss.pkt_info);
907
908                 /*
909                  * Store the mbuf address into the next entry of the array
910                  * of returned packets.
911                  */
912                 rx_pkts[nb_rx++] = rxm;
913         }
914         rxq->rx_tail = rx_id;
915
916         /*
917          * If the number of free RX descriptors is greater than the RX free
918          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
919          * register.
920          * Update the RDT with the value of the last processed RX descriptor
921          * minus 1, to guarantee that the RDT register is never equal to the
922          * RDH register, which creates a "full" ring situtation from the
923          * hardware point of view...
924          */
925         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
926         if (nb_hold > rxq->rx_free_thresh) {
927                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
928                            "nb_hold=%u nb_rx=%u",
929                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
930                            (unsigned) rx_id, (unsigned) nb_hold,
931                            (unsigned) nb_rx);
932                 rx_id = (uint16_t) ((rx_id == 0) ?
933                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
934                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
935                 nb_hold = 0;
936         }
937         rxq->nb_rx_hold = nb_hold;
938         return (nb_rx);
939 }
940
941 uint16_t
942 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
943                          uint16_t nb_pkts)
944 {
945         struct igb_rx_queue *rxq;
946         volatile union e1000_adv_rx_desc *rx_ring;
947         volatile union e1000_adv_rx_desc *rxdp;
948         struct igb_rx_entry *sw_ring;
949         struct igb_rx_entry *rxe;
950         struct rte_mbuf *first_seg;
951         struct rte_mbuf *last_seg;
952         struct rte_mbuf *rxm;
953         struct rte_mbuf *nmb;
954         union e1000_adv_rx_desc rxd;
955         uint64_t dma; /* Physical address of mbuf data buffer */
956         uint32_t staterr;
957         uint32_t hlen_type_rss;
958         uint16_t rx_id;
959         uint16_t nb_rx;
960         uint16_t nb_hold;
961         uint16_t data_len;
962         uint64_t pkt_flags;
963
964         nb_rx = 0;
965         nb_hold = 0;
966         rxq = rx_queue;
967         rx_id = rxq->rx_tail;
968         rx_ring = rxq->rx_ring;
969         sw_ring = rxq->sw_ring;
970
971         /*
972          * Retrieve RX context of current packet, if any.
973          */
974         first_seg = rxq->pkt_first_seg;
975         last_seg = rxq->pkt_last_seg;
976
977         while (nb_rx < nb_pkts) {
978         next_desc:
979                 /*
980                  * The order of operations here is important as the DD status
981                  * bit must not be read after any other descriptor fields.
982                  * rx_ring and rxdp are pointing to volatile data so the order
983                  * of accesses cannot be reordered by the compiler. If they were
984                  * not volatile, they could be reordered which could lead to
985                  * using invalid descriptor fields when read from rxd.
986                  */
987                 rxdp = &rx_ring[rx_id];
988                 staterr = rxdp->wb.upper.status_error;
989                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
990                         break;
991                 rxd = *rxdp;
992
993                 /*
994                  * Descriptor done.
995                  *
996                  * Allocate a new mbuf to replenish the RX ring descriptor.
997                  * If the allocation fails:
998                  *    - arrange for that RX descriptor to be the first one
999                  *      being parsed the next time the receive function is
1000                  *      invoked [on the same queue].
1001                  *
1002                  *    - Stop parsing the RX ring and return immediately.
1003                  *
1004                  * This policy does not drop the packet received in the RX
1005                  * descriptor for which the allocation of a new mbuf failed.
1006                  * Thus, it allows that packet to be later retrieved if
1007                  * mbuf have been freed in the mean time.
1008                  * As a side effect, holding RX descriptors instead of
1009                  * systematically giving them back to the NIC may lead to
1010                  * RX ring exhaustion situations.
1011                  * However, the NIC can gracefully prevent such situations
1012                  * to happen by sending specific "back-pressure" flow control
1013                  * frames to its peer(s).
1014                  */
1015                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1016                            "staterr=0x%x data_len=%u",
1017                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1018                            (unsigned) rx_id, (unsigned) staterr,
1019                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1020
1021                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1022                 if (nmb == NULL) {
1023                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1024                                    "queue_id=%u", (unsigned) rxq->port_id,
1025                                    (unsigned) rxq->queue_id);
1026                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1027                         break;
1028                 }
1029
1030                 nb_hold++;
1031                 rxe = &sw_ring[rx_id];
1032                 rx_id++;
1033                 if (rx_id == rxq->nb_rx_desc)
1034                         rx_id = 0;
1035
1036                 /* Prefetch next mbuf while processing current one. */
1037                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1038
1039                 /*
1040                  * When next RX descriptor is on a cache-line boundary,
1041                  * prefetch the next 4 RX descriptors and the next 8 pointers
1042                  * to mbufs.
1043                  */
1044                 if ((rx_id & 0x3) == 0) {
1045                         rte_igb_prefetch(&rx_ring[rx_id]);
1046                         rte_igb_prefetch(&sw_ring[rx_id]);
1047                 }
1048
1049                 /*
1050                  * Update RX descriptor with the physical address of the new
1051                  * data buffer of the new allocated mbuf.
1052                  */
1053                 rxm = rxe->mbuf;
1054                 rxe->mbuf = nmb;
1055                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1056                 rxdp->read.pkt_addr = dma;
1057                 rxdp->read.hdr_addr = 0;
1058
1059                 /*
1060                  * Set data length & data buffer address of mbuf.
1061                  */
1062                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1063                 rxm->data_len = data_len;
1064                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1065
1066                 /*
1067                  * If this is the first buffer of the received packet,
1068                  * set the pointer to the first mbuf of the packet and
1069                  * initialize its context.
1070                  * Otherwise, update the total length and the number of segments
1071                  * of the current scattered packet, and update the pointer to
1072                  * the last mbuf of the current packet.
1073                  */
1074                 if (first_seg == NULL) {
1075                         first_seg = rxm;
1076                         first_seg->pkt_len = data_len;
1077                         first_seg->nb_segs = 1;
1078                 } else {
1079                         first_seg->pkt_len += data_len;
1080                         first_seg->nb_segs++;
1081                         last_seg->next = rxm;
1082                 }
1083
1084                 /*
1085                  * If this is not the last buffer of the received packet,
1086                  * update the pointer to the last mbuf of the current scattered
1087                  * packet and continue to parse the RX ring.
1088                  */
1089                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1090                         last_seg = rxm;
1091                         goto next_desc;
1092                 }
1093
1094                 /*
1095                  * This is the last buffer of the received packet.
1096                  * If the CRC is not stripped by the hardware:
1097                  *   - Subtract the CRC length from the total packet length.
1098                  *   - If the last buffer only contains the whole CRC or a part
1099                  *     of it, free the mbuf associated to the last buffer.
1100                  *     If part of the CRC is also contained in the previous
1101                  *     mbuf, subtract the length of that CRC part from the
1102                  *     data length of the previous mbuf.
1103                  */
1104                 rxm->next = NULL;
1105                 if (unlikely(rxq->crc_len > 0)) {
1106                         first_seg->pkt_len -= ETHER_CRC_LEN;
1107                         if (data_len <= ETHER_CRC_LEN) {
1108                                 rte_pktmbuf_free_seg(rxm);
1109                                 first_seg->nb_segs--;
1110                                 last_seg->data_len = (uint16_t)
1111                                         (last_seg->data_len -
1112                                          (ETHER_CRC_LEN - data_len));
1113                                 last_seg->next = NULL;
1114                         } else
1115                                 rxm->data_len =
1116                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1117                 }
1118
1119                 /*
1120                  * Initialize the first mbuf of the returned packet:
1121                  *    - RX port identifier,
1122                  *    - hardware offload data, if any:
1123                  *      - RSS flag & hash,
1124                  *      - IP checksum flag,
1125                  *      - VLAN TCI, if any,
1126                  *      - error flags.
1127                  */
1128                 first_seg->port = rxq->port_id;
1129                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1130
1131                 /*
1132                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1133                  * set in the pkt_flags field.
1134                  */
1135                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1136                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1137                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1138                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1139                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1140                 first_seg->ol_flags = pkt_flags;
1141                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1142                                         lower.lo_dword.hs_rss.pkt_info);
1143
1144                 /* Prefetch data of first segment, if configured to do so. */
1145                 rte_packet_prefetch((char *)first_seg->buf_addr +
1146                         first_seg->data_off);
1147
1148                 /*
1149                  * Store the mbuf address into the next entry of the array
1150                  * of returned packets.
1151                  */
1152                 rx_pkts[nb_rx++] = first_seg;
1153
1154                 /*
1155                  * Setup receipt context for a new packet.
1156                  */
1157                 first_seg = NULL;
1158         }
1159
1160         /*
1161          * Record index of the next RX descriptor to probe.
1162          */
1163         rxq->rx_tail = rx_id;
1164
1165         /*
1166          * Save receive context.
1167          */
1168         rxq->pkt_first_seg = first_seg;
1169         rxq->pkt_last_seg = last_seg;
1170
1171         /*
1172          * If the number of free RX descriptors is greater than the RX free
1173          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1174          * register.
1175          * Update the RDT with the value of the last processed RX descriptor
1176          * minus 1, to guarantee that the RDT register is never equal to the
1177          * RDH register, which creates a "full" ring situtation from the
1178          * hardware point of view...
1179          */
1180         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1181         if (nb_hold > rxq->rx_free_thresh) {
1182                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1183                            "nb_hold=%u nb_rx=%u",
1184                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1185                            (unsigned) rx_id, (unsigned) nb_hold,
1186                            (unsigned) nb_rx);
1187                 rx_id = (uint16_t) ((rx_id == 0) ?
1188                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1189                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1190                 nb_hold = 0;
1191         }
1192         rxq->nb_rx_hold = nb_hold;
1193         return (nb_rx);
1194 }
1195
1196 /*
1197  * Rings setup and release.
1198  *
1199  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1200  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1201  * This will also optimize cache line size effect.
1202  * H/W supports up to cache line size 128.
1203  */
1204 #define IGB_ALIGN 128
1205
1206 /*
1207  * Maximum number of Ring Descriptors.
1208  *
1209  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1210  * desscriptors should meet the following condition:
1211  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1212  */
1213 #define IGB_MIN_RING_DESC 32
1214 #define IGB_MAX_RING_DESC 4096
1215
1216 static const struct rte_memzone *
1217 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1218                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1219 {
1220         char z_name[RTE_MEMZONE_NAMESIZE];
1221         const struct rte_memzone *mz;
1222
1223         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1224                         dev->driver->pci_drv.name, ring_name,
1225                                 dev->data->port_id, queue_id);
1226         mz = rte_memzone_lookup(z_name);
1227         if (mz)
1228                 return mz;
1229
1230 #ifdef RTE_LIBRTE_XEN_DOM0
1231         return rte_memzone_reserve_bounded(z_name, ring_size,
1232                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1233 #else
1234         return rte_memzone_reserve_aligned(z_name, ring_size,
1235                         socket_id, 0, IGB_ALIGN);
1236 #endif
1237 }
1238
1239 static void
1240 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1241 {
1242         unsigned i;
1243
1244         if (txq->sw_ring != NULL) {
1245                 for (i = 0; i < txq->nb_tx_desc; i++) {
1246                         if (txq->sw_ring[i].mbuf != NULL) {
1247                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1248                                 txq->sw_ring[i].mbuf = NULL;
1249                         }
1250                 }
1251         }
1252 }
1253
1254 static void
1255 igb_tx_queue_release(struct igb_tx_queue *txq)
1256 {
1257         if (txq != NULL) {
1258                 igb_tx_queue_release_mbufs(txq);
1259                 rte_free(txq->sw_ring);
1260                 rte_free(txq);
1261         }
1262 }
1263
1264 void
1265 eth_igb_tx_queue_release(void *txq)
1266 {
1267         igb_tx_queue_release(txq);
1268 }
1269
1270 static void
1271 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1272 {
1273         txq->tx_head = 0;
1274         txq->tx_tail = 0;
1275         txq->ctx_curr = 0;
1276         memset((void*)&txq->ctx_cache, 0,
1277                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1278 }
1279
1280 static void
1281 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1282 {
1283         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1284         struct igb_tx_entry *txe = txq->sw_ring;
1285         uint16_t i, prev;
1286         struct e1000_hw *hw;
1287
1288         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289         /* Zero out HW ring memory */
1290         for (i = 0; i < txq->nb_tx_desc; i++) {
1291                 txq->tx_ring[i] = zeroed_desc;
1292         }
1293
1294         /* Initialize ring entries */
1295         prev = (uint16_t)(txq->nb_tx_desc - 1);
1296         for (i = 0; i < txq->nb_tx_desc; i++) {
1297                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1298
1299                 txd->wb.status = E1000_TXD_STAT_DD;
1300                 txe[i].mbuf = NULL;
1301                 txe[i].last_id = i;
1302                 txe[prev].next_id = i;
1303                 prev = i;
1304         }
1305
1306         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1307         /* 82575 specific, each tx queue will use 2 hw contexts */
1308         if (hw->mac.type == e1000_82575)
1309                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1310
1311         igb_reset_tx_queue_stat(txq);
1312 }
1313
1314 int
1315 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1316                          uint16_t queue_idx,
1317                          uint16_t nb_desc,
1318                          unsigned int socket_id,
1319                          const struct rte_eth_txconf *tx_conf)
1320 {
1321         const struct rte_memzone *tz;
1322         struct igb_tx_queue *txq;
1323         struct e1000_hw     *hw;
1324         uint32_t size;
1325
1326         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1327
1328         /*
1329          * Validate number of transmit descriptors.
1330          * It must not exceed hardware maximum, and must be multiple
1331          * of IGB_ALIGN.
1332          */
1333         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1334             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1335                 return -EINVAL;
1336         }
1337
1338         /*
1339          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1340          * driver.
1341          */
1342         if (tx_conf->tx_free_thresh != 0)
1343                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1344                              "used for the 1G driver.");
1345         if (tx_conf->tx_rs_thresh != 0)
1346                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1347                              "used for the 1G driver.");
1348         if (tx_conf->tx_thresh.wthresh == 0)
1349                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1350                              "consider setting the TX WTHRESH value to 4, 8, "
1351                              "or 16.");
1352
1353         /* Free memory prior to re-allocation if needed */
1354         if (dev->data->tx_queues[queue_idx] != NULL) {
1355                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1356                 dev->data->tx_queues[queue_idx] = NULL;
1357         }
1358
1359         /* First allocate the tx queue data structure */
1360         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1361                                                         RTE_CACHE_LINE_SIZE);
1362         if (txq == NULL)
1363                 return (-ENOMEM);
1364
1365         /*
1366          * Allocate TX ring hardware descriptors. A memzone large enough to
1367          * handle the maximum ring size is allocated in order to allow for
1368          * resizing in later calls to the queue setup function.
1369          */
1370         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1371         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1372                                         size, socket_id);
1373         if (tz == NULL) {
1374                 igb_tx_queue_release(txq);
1375                 return (-ENOMEM);
1376         }
1377
1378         txq->nb_tx_desc = nb_desc;
1379         txq->pthresh = tx_conf->tx_thresh.pthresh;
1380         txq->hthresh = tx_conf->tx_thresh.hthresh;
1381         txq->wthresh = tx_conf->tx_thresh.wthresh;
1382         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1383                 txq->wthresh = 1;
1384         txq->queue_id = queue_idx;
1385         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1386                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1387         txq->port_id = dev->data->port_id;
1388
1389         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1390 #ifndef RTE_LIBRTE_XEN_DOM0
1391         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1392 #else
1393         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1394 #endif
1395          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1396         /* Allocate software ring */
1397         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1398                                    sizeof(struct igb_tx_entry) * nb_desc,
1399                                    RTE_CACHE_LINE_SIZE);
1400         if (txq->sw_ring == NULL) {
1401                 igb_tx_queue_release(txq);
1402                 return (-ENOMEM);
1403         }
1404         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1405                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1406
1407         igb_reset_tx_queue(txq, dev);
1408         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1409         dev->data->tx_queues[queue_idx] = txq;
1410
1411         return (0);
1412 }
1413
1414 static void
1415 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1416 {
1417         unsigned i;
1418
1419         if (rxq->sw_ring != NULL) {
1420                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1421                         if (rxq->sw_ring[i].mbuf != NULL) {
1422                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1423                                 rxq->sw_ring[i].mbuf = NULL;
1424                         }
1425                 }
1426         }
1427 }
1428
1429 static void
1430 igb_rx_queue_release(struct igb_rx_queue *rxq)
1431 {
1432         if (rxq != NULL) {
1433                 igb_rx_queue_release_mbufs(rxq);
1434                 rte_free(rxq->sw_ring);
1435                 rte_free(rxq);
1436         }
1437 }
1438
1439 void
1440 eth_igb_rx_queue_release(void *rxq)
1441 {
1442         igb_rx_queue_release(rxq);
1443 }
1444
1445 static void
1446 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1447 {
1448         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1449         unsigned i;
1450
1451         /* Zero out HW ring memory */
1452         for (i = 0; i < rxq->nb_rx_desc; i++) {
1453                 rxq->rx_ring[i] = zeroed_desc;
1454         }
1455
1456         rxq->rx_tail = 0;
1457         rxq->pkt_first_seg = NULL;
1458         rxq->pkt_last_seg = NULL;
1459 }
1460
1461 int
1462 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1463                          uint16_t queue_idx,
1464                          uint16_t nb_desc,
1465                          unsigned int socket_id,
1466                          const struct rte_eth_rxconf *rx_conf,
1467                          struct rte_mempool *mp)
1468 {
1469         const struct rte_memzone *rz;
1470         struct igb_rx_queue *rxq;
1471         struct e1000_hw     *hw;
1472         unsigned int size;
1473
1474         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1475
1476         /*
1477          * Validate number of receive descriptors.
1478          * It must not exceed hardware maximum, and must be multiple
1479          * of IGB_ALIGN.
1480          */
1481         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1482             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1483                 return (-EINVAL);
1484         }
1485
1486         /* Free memory prior to re-allocation if needed */
1487         if (dev->data->rx_queues[queue_idx] != NULL) {
1488                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1489                 dev->data->rx_queues[queue_idx] = NULL;
1490         }
1491
1492         /* First allocate the RX queue data structure. */
1493         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1494                           RTE_CACHE_LINE_SIZE);
1495         if (rxq == NULL)
1496                 return (-ENOMEM);
1497         rxq->mb_pool = mp;
1498         rxq->nb_rx_desc = nb_desc;
1499         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1500         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1501         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1502         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1503                 rxq->wthresh = 1;
1504         rxq->drop_en = rx_conf->rx_drop_en;
1505         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1506         rxq->queue_id = queue_idx;
1507         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1508                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1509         rxq->port_id = dev->data->port_id;
1510         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1511                                   ETHER_CRC_LEN);
1512
1513         /*
1514          *  Allocate RX ring hardware descriptors. A memzone large enough to
1515          *  handle the maximum ring size is allocated in order to allow for
1516          *  resizing in later calls to the queue setup function.
1517          */
1518         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1519         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1520         if (rz == NULL) {
1521                 igb_rx_queue_release(rxq);
1522                 return (-ENOMEM);
1523         }
1524         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1525         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1526 #ifndef RTE_LIBRTE_XEN_DOM0
1527         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1528 #else
1529         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1530 #endif
1531         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1532
1533         /* Allocate software ring. */
1534         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1535                                    sizeof(struct igb_rx_entry) * nb_desc,
1536                                    RTE_CACHE_LINE_SIZE);
1537         if (rxq->sw_ring == NULL) {
1538                 igb_rx_queue_release(rxq);
1539                 return (-ENOMEM);
1540         }
1541         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1542                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1543
1544         dev->data->rx_queues[queue_idx] = rxq;
1545         igb_reset_rx_queue(rxq);
1546
1547         return 0;
1548 }
1549
1550 uint32_t
1551 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1552 {
1553 #define IGB_RXQ_SCAN_INTERVAL 4
1554         volatile union e1000_adv_rx_desc *rxdp;
1555         struct igb_rx_queue *rxq;
1556         uint32_t desc = 0;
1557
1558         if (rx_queue_id >= dev->data->nb_rx_queues) {
1559                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1560                 return 0;
1561         }
1562
1563         rxq = dev->data->rx_queues[rx_queue_id];
1564         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1565
1566         while ((desc < rxq->nb_rx_desc) &&
1567                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1568                 desc += IGB_RXQ_SCAN_INTERVAL;
1569                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1570                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1571                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1572                                 desc - rxq->nb_rx_desc]);
1573         }
1574
1575         return 0;
1576 }
1577
1578 int
1579 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1580 {
1581         volatile union e1000_adv_rx_desc *rxdp;
1582         struct igb_rx_queue *rxq = rx_queue;
1583         uint32_t desc;
1584
1585         if (unlikely(offset >= rxq->nb_rx_desc))
1586                 return 0;
1587         desc = rxq->rx_tail + offset;
1588         if (desc >= rxq->nb_rx_desc)
1589                 desc -= rxq->nb_rx_desc;
1590
1591         rxdp = &rxq->rx_ring[desc];
1592         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1593 }
1594
1595 void
1596 igb_dev_clear_queues(struct rte_eth_dev *dev)
1597 {
1598         uint16_t i;
1599         struct igb_tx_queue *txq;
1600         struct igb_rx_queue *rxq;
1601
1602         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1603                 txq = dev->data->tx_queues[i];
1604                 if (txq != NULL) {
1605                         igb_tx_queue_release_mbufs(txq);
1606                         igb_reset_tx_queue(txq, dev);
1607                 }
1608         }
1609
1610         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1611                 rxq = dev->data->rx_queues[i];
1612                 if (rxq != NULL) {
1613                         igb_rx_queue_release_mbufs(rxq);
1614                         igb_reset_rx_queue(rxq);
1615                 }
1616         }
1617 }
1618
1619 void
1620 igb_dev_free_queues(struct rte_eth_dev *dev)
1621 {
1622         uint16_t i;
1623
1624         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1625                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1626                 dev->data->rx_queues[i] = NULL;
1627         }
1628         dev->data->nb_rx_queues = 0;
1629
1630         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1631                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1632                 dev->data->tx_queues[i] = NULL;
1633         }
1634         dev->data->nb_tx_queues = 0;
1635 }
1636
1637 /**
1638  * Receive Side Scaling (RSS).
1639  * See section 7.1.1.7 in the following document:
1640  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1641  *
1642  * Principles:
1643  * The source and destination IP addresses of the IP header and the source and
1644  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1645  * against a configurable random key to compute a 32-bit RSS hash result.
1646  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1647  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1648  * RSS output index which is used as the RX queue index where to store the
1649  * received packets.
1650  * The following output is supplied in the RX write-back descriptor:
1651  *     - 32-bit result of the Microsoft RSS hash function,
1652  *     - 4-bit RSS type field.
1653  */
1654
1655 /*
1656  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1657  * Used as the default key.
1658  */
1659 static uint8_t rss_intel_key[40] = {
1660         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1661         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1662         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1663         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1664         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1665 };
1666
1667 static void
1668 igb_rss_disable(struct rte_eth_dev *dev)
1669 {
1670         struct e1000_hw *hw;
1671         uint32_t mrqc;
1672
1673         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1674         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1675         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1676         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1677 }
1678
1679 static void
1680 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1681 {
1682         uint8_t  *hash_key;
1683         uint32_t rss_key;
1684         uint32_t mrqc;
1685         uint64_t rss_hf;
1686         uint16_t i;
1687
1688         hash_key = rss_conf->rss_key;
1689         if (hash_key != NULL) {
1690                 /* Fill in RSS hash key */
1691                 for (i = 0; i < 10; i++) {
1692                         rss_key  = hash_key[(i * 4)];
1693                         rss_key |= hash_key[(i * 4) + 1] << 8;
1694                         rss_key |= hash_key[(i * 4) + 2] << 16;
1695                         rss_key |= hash_key[(i * 4) + 3] << 24;
1696                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1697                 }
1698         }
1699
1700         /* Set configured hashing protocols in MRQC register */
1701         rss_hf = rss_conf->rss_hf;
1702         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1703         if (rss_hf & ETH_RSS_IPV4)
1704                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1705         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1706                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1707         if (rss_hf & ETH_RSS_IPV6)
1708                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1709         if (rss_hf & ETH_RSS_IPV6_EX)
1710                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1711         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1712                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1713         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1714                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1715         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1716                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1717         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1718                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1719         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1720                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1721         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1722 }
1723
1724 int
1725 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1726                         struct rte_eth_rss_conf *rss_conf)
1727 {
1728         struct e1000_hw *hw;
1729         uint32_t mrqc;
1730         uint64_t rss_hf;
1731
1732         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733
1734         /*
1735          * Before changing anything, first check that the update RSS operation
1736          * does not attempt to disable RSS, if RSS was enabled at
1737          * initialization time, or does not attempt to enable RSS, if RSS was
1738          * disabled at initialization time.
1739          */
1740         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1741         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1742         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1743                 if (rss_hf != 0) /* Enable RSS */
1744                         return -(EINVAL);
1745                 return 0; /* Nothing to do */
1746         }
1747         /* RSS enabled */
1748         if (rss_hf == 0) /* Disable RSS */
1749                 return -(EINVAL);
1750         igb_hw_rss_hash_set(hw, rss_conf);
1751         return 0;
1752 }
1753
1754 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1755                               struct rte_eth_rss_conf *rss_conf)
1756 {
1757         struct e1000_hw *hw;
1758         uint8_t *hash_key;
1759         uint32_t rss_key;
1760         uint32_t mrqc;
1761         uint64_t rss_hf;
1762         uint16_t i;
1763
1764         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765         hash_key = rss_conf->rss_key;
1766         if (hash_key != NULL) {
1767                 /* Return RSS hash key */
1768                 for (i = 0; i < 10; i++) {
1769                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1770                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1771                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1772                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1773                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1774                 }
1775         }
1776
1777         /* Get RSS functions configured in MRQC register */
1778         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1779         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1780                 rss_conf->rss_hf = 0;
1781                 return 0;
1782         }
1783         rss_hf = 0;
1784         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1785                 rss_hf |= ETH_RSS_IPV4;
1786         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1787                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1788         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1789                 rss_hf |= ETH_RSS_IPV6;
1790         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1791                 rss_hf |= ETH_RSS_IPV6_EX;
1792         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1793                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1794         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1795                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1796         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1797                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1798         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1799                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1800         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1801                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1802         rss_conf->rss_hf = rss_hf;
1803         return 0;
1804 }
1805
1806 static void
1807 igb_rss_configure(struct rte_eth_dev *dev)
1808 {
1809         struct rte_eth_rss_conf rss_conf;
1810         struct e1000_hw *hw;
1811         uint32_t shift;
1812         uint16_t i;
1813
1814         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1815
1816         /* Fill in redirection table. */
1817         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1818         for (i = 0; i < 128; i++) {
1819                 union e1000_reta {
1820                         uint32_t dword;
1821                         uint8_t  bytes[4];
1822                 } reta;
1823                 uint8_t q_idx;
1824
1825                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1826                                    i % dev->data->nb_rx_queues : 0);
1827                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1828                 if ((i & 3) == 3)
1829                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1830         }
1831
1832         /*
1833          * Configure the RSS key and the RSS protocols used to compute
1834          * the RSS hash of input packets.
1835          */
1836         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1837         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1838                 igb_rss_disable(dev);
1839                 return;
1840         }
1841         if (rss_conf.rss_key == NULL)
1842                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1843         igb_hw_rss_hash_set(hw, &rss_conf);
1844 }
1845
1846 /*
1847  * Check if the mac type support VMDq or not.
1848  * Return 1 if it supports, otherwise, return 0.
1849  */
1850 static int
1851 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1852 {
1853         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1854
1855         switch (hw->mac.type) {
1856         case e1000_82576:
1857         case e1000_82580:
1858         case e1000_i350:
1859                 return 1;
1860         case e1000_82540:
1861         case e1000_82541:
1862         case e1000_82542:
1863         case e1000_82543:
1864         case e1000_82544:
1865         case e1000_82545:
1866         case e1000_82546:
1867         case e1000_82547:
1868         case e1000_82571:
1869         case e1000_82572:
1870         case e1000_82573:
1871         case e1000_82574:
1872         case e1000_82583:
1873         case e1000_i210:
1874         case e1000_i211:
1875         default:
1876                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1877                 return 0;
1878         }
1879 }
1880
1881 static int
1882 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1883 {
1884         struct rte_eth_vmdq_rx_conf *cfg;
1885         struct e1000_hw *hw;
1886         uint32_t mrqc, vt_ctl, vmolr, rctl;
1887         int i;
1888
1889         PMD_INIT_FUNC_TRACE();
1890
1891         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1893
1894         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1895         if (igb_is_vmdq_supported(dev) == 0)
1896                 return -1;
1897
1898         igb_rss_disable(dev);
1899
1900         /* RCTL: eanble VLAN filter */
1901         rctl = E1000_READ_REG(hw, E1000_RCTL);
1902         rctl |= E1000_RCTL_VFE;
1903         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1904
1905         /* MRQC: enable vmdq */
1906         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1907         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1908         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1909
1910         /* VTCTL:  pool selection according to VLAN tag */
1911         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1912         if (cfg->enable_default_pool)
1913                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1914         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1915         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1916
1917         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1918                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1919                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1920                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1921                         E1000_VMOLR_MPME);
1922
1923                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1924                         vmolr |= E1000_VMOLR_AUPE;
1925                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1926                         vmolr |= E1000_VMOLR_ROMPE;
1927                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1928                         vmolr |= E1000_VMOLR_ROPE;
1929                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1930                         vmolr |= E1000_VMOLR_BAM;
1931                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1932                         vmolr |= E1000_VMOLR_MPME;
1933
1934                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1935         }
1936
1937         /*
1938          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1939          * Both 82576 and 82580 support it
1940          */
1941         if (hw->mac.type != e1000_i350) {
1942                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1943                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1944                         vmolr |= E1000_VMOLR_STRVLAN;
1945                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1946                 }
1947         }
1948
1949         /* VFTA - enable all vlan filters */
1950         for (i = 0; i < IGB_VFTA_SIZE; i++)
1951                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1952
1953         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1954         if (hw->mac.type != e1000_82580)
1955                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1956
1957         /*
1958          * RAH/RAL - allow pools to read specific mac addresses
1959          * In this case, all pools should be able to read from mac addr 0
1960          */
1961         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1962         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1963
1964         /* VLVF: set up filters for vlan tags as configured */
1965         for (i = 0; i < cfg->nb_pool_maps; i++) {
1966                 /* set vlan id in VF register and set the valid bit */
1967                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1968                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1969                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1970                         E1000_VLVF_POOLSEL_MASK)));
1971         }
1972
1973         E1000_WRITE_FLUSH(hw);
1974
1975         return 0;
1976 }
1977
1978
1979 /*********************************************************************
1980  *
1981  *  Enable receive unit.
1982  *
1983  **********************************************************************/
1984
1985 static int
1986 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1987 {
1988         struct igb_rx_entry *rxe = rxq->sw_ring;
1989         uint64_t dma_addr;
1990         unsigned i;
1991
1992         /* Initialize software ring entries. */
1993         for (i = 0; i < rxq->nb_rx_desc; i++) {
1994                 volatile union e1000_adv_rx_desc *rxd;
1995                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1996
1997                 if (mbuf == NULL) {
1998                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1999                                      "queue_id=%hu", rxq->queue_id);
2000                         return (-ENOMEM);
2001                 }
2002                 dma_addr =
2003                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
2004                 rxd = &rxq->rx_ring[i];
2005                 rxd->read.hdr_addr = 0;
2006                 rxd->read.pkt_addr = dma_addr;
2007                 rxe[i].mbuf = mbuf;
2008         }
2009
2010         return 0;
2011 }
2012
2013 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2014 static int
2015 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2016 {
2017         struct e1000_hw *hw =
2018                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2019         uint32_t mrqc;
2020
2021         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2022                 /*
2023                  * SRIOV active scheme
2024                  * FIXME if support RSS together with VMDq & SRIOV
2025                  */
2026                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2027                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2028                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2029                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2030         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2031                 /*
2032                  * SRIOV inactive scheme
2033                  */
2034                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2035                         case ETH_MQ_RX_RSS:
2036                                 igb_rss_configure(dev);
2037                                 break;
2038                         case ETH_MQ_RX_VMDQ_ONLY:
2039                                 /*Configure general VMDQ only RX parameters*/
2040                                 igb_vmdq_rx_hw_configure(dev);
2041                                 break;
2042                         case ETH_MQ_RX_NONE:
2043                                 /* if mq_mode is none, disable rss mode.*/
2044                         default:
2045                                 igb_rss_disable(dev);
2046                                 break;
2047                 }
2048         }
2049
2050         return 0;
2051 }
2052
2053 int
2054 eth_igb_rx_init(struct rte_eth_dev *dev)
2055 {
2056         struct e1000_hw     *hw;
2057         struct igb_rx_queue *rxq;
2058         uint32_t rctl;
2059         uint32_t rxcsum;
2060         uint32_t srrctl;
2061         uint16_t buf_size;
2062         uint16_t rctl_bsize;
2063         uint16_t i;
2064         int ret;
2065
2066         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2067         srrctl = 0;
2068
2069         /*
2070          * Make sure receives are disabled while setting
2071          * up the descriptor ring.
2072          */
2073         rctl = E1000_READ_REG(hw, E1000_RCTL);
2074         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2075
2076         /*
2077          * Configure support of jumbo frames, if any.
2078          */
2079         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2080                 rctl |= E1000_RCTL_LPE;
2081
2082                 /*
2083                  * Set maximum packet length by default, and might be updated
2084                  * together with enabling/disabling dual VLAN.
2085                  */
2086                 E1000_WRITE_REG(hw, E1000_RLPML,
2087                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2088                                                 VLAN_TAG_SIZE);
2089         } else
2090                 rctl &= ~E1000_RCTL_LPE;
2091
2092         /* Configure and enable each RX queue. */
2093         rctl_bsize = 0;
2094         dev->rx_pkt_burst = eth_igb_recv_pkts;
2095         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2096                 uint64_t bus_addr;
2097                 uint32_t rxdctl;
2098
2099                 rxq = dev->data->rx_queues[i];
2100
2101                 /* Allocate buffers for descriptor rings and set up queue */
2102                 ret = igb_alloc_rx_queue_mbufs(rxq);
2103                 if (ret)
2104                         return ret;
2105
2106                 /*
2107                  * Reset crc_len in case it was changed after queue setup by a
2108                  *  call to configure
2109                  */
2110                 rxq->crc_len =
2111                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2112                                                         0 : ETHER_CRC_LEN);
2113
2114                 bus_addr = rxq->rx_ring_phys_addr;
2115                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2116                                 rxq->nb_rx_desc *
2117                                 sizeof(union e1000_adv_rx_desc));
2118                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2119                                 (uint32_t)(bus_addr >> 32));
2120                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2121
2122                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2123
2124                 /*
2125                  * Configure RX buffer size.
2126                  */
2127                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2128                         RTE_PKTMBUF_HEADROOM);
2129                 if (buf_size >= 1024) {
2130                         /*
2131                          * Configure the BSIZEPACKET field of the SRRCTL
2132                          * register of the queue.
2133                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2134                          * If this field is equal to 0b, then RCTL.BSIZE
2135                          * determines the RX packet buffer size.
2136                          */
2137                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2138                                    E1000_SRRCTL_BSIZEPKT_MASK);
2139                         buf_size = (uint16_t) ((srrctl &
2140                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2141                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2142
2143                         /* It adds dual VLAN length for supporting dual VLAN */
2144                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2145                                                 2 * VLAN_TAG_SIZE) > buf_size){
2146                                 if (!dev->data->scattered_rx)
2147                                         PMD_INIT_LOG(DEBUG,
2148                                                      "forcing scatter mode");
2149                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2150                                 dev->data->scattered_rx = 1;
2151                         }
2152                 } else {
2153                         /*
2154                          * Use BSIZE field of the device RCTL register.
2155                          */
2156                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2157                                 rctl_bsize = buf_size;
2158                         if (!dev->data->scattered_rx)
2159                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2160                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2161                         dev->data->scattered_rx = 1;
2162                 }
2163
2164                 /* Set if packets are dropped when no descriptors available */
2165                 if (rxq->drop_en)
2166                         srrctl |= E1000_SRRCTL_DROP_EN;
2167
2168                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2169
2170                 /* Enable this RX queue. */
2171                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2172                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2173                 rxdctl &= 0xFFF00000;
2174                 rxdctl |= (rxq->pthresh & 0x1F);
2175                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2176                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2177                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2178         }
2179
2180         if (dev->data->dev_conf.rxmode.enable_scatter) {
2181                 if (!dev->data->scattered_rx)
2182                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2183                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2184                 dev->data->scattered_rx = 1;
2185         }
2186
2187         /*
2188          * Setup BSIZE field of RCTL register, if needed.
2189          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2190          * register, since the code above configures the SRRCTL register of
2191          * the RX queue in such a case.
2192          * All configurable sizes are:
2193          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2194          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2195          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2196          *  2048: rctl |= E1000_RCTL_SZ_2048;
2197          *  1024: rctl |= E1000_RCTL_SZ_1024;
2198          *   512: rctl |= E1000_RCTL_SZ_512;
2199          *   256: rctl |= E1000_RCTL_SZ_256;
2200          */
2201         if (rctl_bsize > 0) {
2202                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2203                         rctl |= E1000_RCTL_SZ_512;
2204                 else /* 256 <= buf_size < 512 - use 256 */
2205                         rctl |= E1000_RCTL_SZ_256;
2206         }
2207
2208         /*
2209          * Configure RSS if device configured with multiple RX queues.
2210          */
2211         igb_dev_mq_rx_configure(dev);
2212
2213         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2214         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2215
2216         /*
2217          * Setup the Checksum Register.
2218          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2219          */
2220         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2221         rxcsum |= E1000_RXCSUM_PCSD;
2222
2223         /* Enable both L3/L4 rx checksum offload */
2224         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2225                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2226         else
2227                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2228         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2229
2230         /* Setup the Receive Control Register. */
2231         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2232                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2233
2234                 /* set STRCRC bit in all queues */
2235                 if (hw->mac.type == e1000_i350 ||
2236                     hw->mac.type == e1000_i210 ||
2237                     hw->mac.type == e1000_i211 ||
2238                     hw->mac.type == e1000_i354) {
2239                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2240                                 rxq = dev->data->rx_queues[i];
2241                                 uint32_t dvmolr = E1000_READ_REG(hw,
2242                                         E1000_DVMOLR(rxq->reg_idx));
2243                                 dvmolr |= E1000_DVMOLR_STRCRC;
2244                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2245                         }
2246                 }
2247         } else {
2248                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2249
2250                 /* clear STRCRC bit in all queues */
2251                 if (hw->mac.type == e1000_i350 ||
2252                     hw->mac.type == e1000_i210 ||
2253                     hw->mac.type == e1000_i211 ||
2254                     hw->mac.type == e1000_i354) {
2255                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2256                                 rxq = dev->data->rx_queues[i];
2257                                 uint32_t dvmolr = E1000_READ_REG(hw,
2258                                         E1000_DVMOLR(rxq->reg_idx));
2259                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2260                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2261                         }
2262                 }
2263         }
2264
2265         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2266         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2267                 E1000_RCTL_RDMTS_HALF |
2268                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2269
2270         /* Make sure VLAN Filters are off. */
2271         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2272                 rctl &= ~E1000_RCTL_VFE;
2273         /* Don't store bad packets. */
2274         rctl &= ~E1000_RCTL_SBP;
2275
2276         /* Enable Receives. */
2277         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2278
2279         /*
2280          * Setup the HW Rx Head and Tail Descriptor Pointers.
2281          * This needs to be done after enable.
2282          */
2283         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2284                 rxq = dev->data->rx_queues[i];
2285                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2286                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2287         }
2288
2289         return 0;
2290 }
2291
2292 /*********************************************************************
2293  *
2294  *  Enable transmit unit.
2295  *
2296  **********************************************************************/
2297 void
2298 eth_igb_tx_init(struct rte_eth_dev *dev)
2299 {
2300         struct e1000_hw     *hw;
2301         struct igb_tx_queue *txq;
2302         uint32_t tctl;
2303         uint32_t txdctl;
2304         uint16_t i;
2305
2306         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307
2308         /* Setup the Base and Length of the Tx Descriptor Rings. */
2309         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2310                 uint64_t bus_addr;
2311                 txq = dev->data->tx_queues[i];
2312                 bus_addr = txq->tx_ring_phys_addr;
2313
2314                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2315                                 txq->nb_tx_desc *
2316                                 sizeof(union e1000_adv_tx_desc));
2317                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2318                                 (uint32_t)(bus_addr >> 32));
2319                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2320
2321                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2322                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2323                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2324
2325                 /* Setup Transmit threshold registers. */
2326                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2327                 txdctl |= txq->pthresh & 0x1F;
2328                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2329                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2330                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2331                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2332         }
2333
2334         /* Program the Transmit Control Register. */
2335         tctl = E1000_READ_REG(hw, E1000_TCTL);
2336         tctl &= ~E1000_TCTL_CT;
2337         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2338                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2339
2340         e1000_config_collision_dist(hw);
2341
2342         /* This write will effectively turn on the transmit unit. */
2343         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2344 }
2345
2346 /*********************************************************************
2347  *
2348  *  Enable VF receive unit.
2349  *
2350  **********************************************************************/
2351 int
2352 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2353 {
2354         struct e1000_hw     *hw;
2355         struct igb_rx_queue *rxq;
2356         uint32_t srrctl;
2357         uint16_t buf_size;
2358         uint16_t rctl_bsize;
2359         uint16_t i;
2360         int ret;
2361
2362         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363
2364         /* setup MTU */
2365         e1000_rlpml_set_vf(hw,
2366                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2367                 VLAN_TAG_SIZE));
2368
2369         /* Configure and enable each RX queue. */
2370         rctl_bsize = 0;
2371         dev->rx_pkt_burst = eth_igb_recv_pkts;
2372         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2373                 uint64_t bus_addr;
2374                 uint32_t rxdctl;
2375
2376                 rxq = dev->data->rx_queues[i];
2377
2378                 /* Allocate buffers for descriptor rings and set up queue */
2379                 ret = igb_alloc_rx_queue_mbufs(rxq);
2380                 if (ret)
2381                         return ret;
2382
2383                 bus_addr = rxq->rx_ring_phys_addr;
2384                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2385                                 rxq->nb_rx_desc *
2386                                 sizeof(union e1000_adv_rx_desc));
2387                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2388                                 (uint32_t)(bus_addr >> 32));
2389                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2390
2391                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2392
2393                 /*
2394                  * Configure RX buffer size.
2395                  */
2396                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2397                         RTE_PKTMBUF_HEADROOM);
2398                 if (buf_size >= 1024) {
2399                         /*
2400                          * Configure the BSIZEPACKET field of the SRRCTL
2401                          * register of the queue.
2402                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2403                          * If this field is equal to 0b, then RCTL.BSIZE
2404                          * determines the RX packet buffer size.
2405                          */
2406                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2407                                    E1000_SRRCTL_BSIZEPKT_MASK);
2408                         buf_size = (uint16_t) ((srrctl &
2409                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2410                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2411
2412                         /* It adds dual VLAN length for supporting dual VLAN */
2413                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2414                                                 2 * VLAN_TAG_SIZE) > buf_size){
2415                                 if (!dev->data->scattered_rx)
2416                                         PMD_INIT_LOG(DEBUG,
2417                                                      "forcing scatter mode");
2418                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2419                                 dev->data->scattered_rx = 1;
2420                         }
2421                 } else {
2422                         /*
2423                          * Use BSIZE field of the device RCTL register.
2424                          */
2425                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2426                                 rctl_bsize = buf_size;
2427                         if (!dev->data->scattered_rx)
2428                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2429                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2430                         dev->data->scattered_rx = 1;
2431                 }
2432
2433                 /* Set if packets are dropped when no descriptors available */
2434                 if (rxq->drop_en)
2435                         srrctl |= E1000_SRRCTL_DROP_EN;
2436
2437                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2438
2439                 /* Enable this RX queue. */
2440                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2441                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2442                 rxdctl &= 0xFFF00000;
2443                 rxdctl |= (rxq->pthresh & 0x1F);
2444                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2445                 if (hw->mac.type == e1000_vfadapt) {
2446                         /*
2447                          * Workaround of 82576 VF Erratum
2448                          * force set WTHRESH to 1
2449                          * to avoid Write-Back not triggered sometimes
2450                          */
2451                         rxdctl |= 0x10000;
2452                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2453                 }
2454                 else
2455                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2456                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2457         }
2458
2459         if (dev->data->dev_conf.rxmode.enable_scatter) {
2460                 if (!dev->data->scattered_rx)
2461                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2462                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2463                 dev->data->scattered_rx = 1;
2464         }
2465
2466         /*
2467          * Setup the HW Rx Head and Tail Descriptor Pointers.
2468          * This needs to be done after enable.
2469          */
2470         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2471                 rxq = dev->data->rx_queues[i];
2472                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2473                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2474         }
2475
2476         return 0;
2477 }
2478
2479 /*********************************************************************
2480  *
2481  *  Enable VF transmit unit.
2482  *
2483  **********************************************************************/
2484 void
2485 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2486 {
2487         struct e1000_hw     *hw;
2488         struct igb_tx_queue *txq;
2489         uint32_t txdctl;
2490         uint16_t i;
2491
2492         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493
2494         /* Setup the Base and Length of the Tx Descriptor Rings. */
2495         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2496                 uint64_t bus_addr;
2497
2498                 txq = dev->data->tx_queues[i];
2499                 bus_addr = txq->tx_ring_phys_addr;
2500                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2501                                 txq->nb_tx_desc *
2502                                 sizeof(union e1000_adv_tx_desc));
2503                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2504                                 (uint32_t)(bus_addr >> 32));
2505                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2506
2507                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2508                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2509                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2510
2511                 /* Setup Transmit threshold registers. */
2512                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2513                 txdctl |= txq->pthresh & 0x1F;
2514                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2515                 if (hw->mac.type == e1000_82576) {
2516                         /*
2517                          * Workaround of 82576 VF Erratum
2518                          * force set WTHRESH to 1
2519                          * to avoid Write-Back not triggered sometimes
2520                          */
2521                         txdctl |= 0x10000;
2522                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2523                 }
2524                 else
2525                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2526                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2527                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2528         }
2529
2530 }