b13930eefb12d1476629f5780228625de66a2cb0
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK)
80
81 static inline struct rte_mbuf *
82 rte_rxmbuf_alloc(struct rte_mempool *mp)
83 {
84         struct rte_mbuf *m;
85
86         m = __rte_mbuf_raw_alloc(mp);
87         __rte_mbuf_sanity_check_raw(m, 0);
88         return (m);
89 }
90
91 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
92         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
93
94 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
95         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
96
97 /**
98  * Structure associated with each descriptor of the RX ring of a RX queue.
99  */
100 struct igb_rx_entry {
101         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
102 };
103
104 /**
105  * Structure associated with each descriptor of the TX ring of a TX queue.
106  */
107 struct igb_tx_entry {
108         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
109         uint16_t next_id; /**< Index of next descriptor in ring. */
110         uint16_t last_id; /**< Index of last scattered descriptor. */
111 };
112
113 /**
114  * Structure associated with each RX queue.
115  */
116 struct igb_rx_queue {
117         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
118         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
119         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
120         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
121         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
122         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
123         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
124         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
125         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
126         uint16_t            rx_tail;    /**< current value of RDT register. */
127         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
128         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
129         uint16_t            queue_id;   /**< RX queue index. */
130         uint16_t            reg_idx;    /**< RX queue register index. */
131         uint8_t             port_id;    /**< Device port identifier. */
132         uint8_t             pthresh;    /**< Prefetch threshold register. */
133         uint8_t             hthresh;    /**< Host threshold register. */
134         uint8_t             wthresh;    /**< Write-back threshold register. */
135         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
136         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
137 };
138
139 /**
140  * Hardware context number
141  */
142 enum igb_advctx_num {
143         IGB_CTX_0    = 0, /**< CTX0    */
144         IGB_CTX_1    = 1, /**< CTX1    */
145         IGB_CTX_NUM  = 2, /**< CTX_NUM */
146 };
147
148 /** Offload features */
149 union igb_vlan_macip {
150         uint32_t data;
151         struct {
152                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
153                 uint16_t vlan_tci;
154                 /**< VLAN Tag Control Identifier (CPU order). */
155         } f;
156 };
157
158 /*
159  * Compare mask for vlan_macip_len.data,
160  * should be in sync with igb_vlan_macip.f layout.
161  * */
162 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
163 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
164 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
165 /** MAC+IP  length. */
166 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
167
168 /**
169  * Strucutre to check if new context need be built
170  */
171 struct igb_advctx_info {
172         uint64_t flags;           /**< ol_flags related to context build. */
173         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
174         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
175 };
176
177 /**
178  * Structure associated with each TX queue.
179  */
180 struct igb_tx_queue {
181         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
182         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
183         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
184         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
185         uint32_t               txd_type;      /**< Device-specific TXD type */
186         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
187         uint16_t               tx_tail; /**< Current value of TDT register. */
188         uint16_t               tx_head;
189         /**< Index of first used TX descriptor. */
190         uint16_t               queue_id; /**< TX queue index. */
191         uint16_t               reg_idx;  /**< TX queue register index. */
192         uint8_t                port_id;  /**< Device port identifier. */
193         uint8_t                pthresh;  /**< Prefetch threshold register. */
194         uint8_t                hthresh;  /**< Host threshold register. */
195         uint8_t                wthresh;  /**< Write-back threshold register. */
196         uint32_t               ctx_curr;
197         /**< Current used hardware descriptor. */
198         uint32_t               ctx_start;
199         /**< Start context position for transmit queue. */
200         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
201         /**< Hardware context history.*/
202 };
203
204 #if 1
205 #define RTE_PMD_USE_PREFETCH
206 #endif
207
208 #ifdef RTE_PMD_USE_PREFETCH
209 #define rte_igb_prefetch(p)     rte_prefetch0(p)
210 #else
211 #define rte_igb_prefetch(p)     do {} while(0)
212 #endif
213
214 #ifdef RTE_PMD_PACKET_PREFETCH
215 #define rte_packet_prefetch(p) rte_prefetch1(p)
216 #else
217 #define rte_packet_prefetch(p)  do {} while(0)
218 #endif
219
220 /*
221  * Macro for VMDq feature for 1 GbE NIC.
222  */
223 #define E1000_VMOLR_SIZE                        (8)
224
225 /*********************************************************************
226  *
227  *  TX function
228  *
229  **********************************************************************/
230
231 /*
232  * Advanced context descriptor are almost same between igb/ixgbe
233  * This is a separate function, looking for optimization opportunity here
234  * Rework required to go with the pre-defined values.
235  */
236
237 static inline void
238 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
239                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
240                 uint64_t ol_flags, uint32_t vlan_macip_lens)
241 {
242         uint32_t type_tucmd_mlhl;
243         uint32_t mss_l4len_idx;
244         uint32_t ctx_idx, ctx_curr;
245         uint32_t cmp_mask;
246
247         ctx_curr = txq->ctx_curr;
248         ctx_idx = ctx_curr + txq->ctx_start;
249
250         cmp_mask = 0;
251         type_tucmd_mlhl = 0;
252
253         if (ol_flags & PKT_TX_VLAN_PKT) {
254                 cmp_mask |= TX_VLAN_CMP_MASK;
255         }
256
257         if (ol_flags & PKT_TX_IP_CKSUM) {
258                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
259                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
260         }
261
262         /* Specify which HW CTX to upload. */
263         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264         switch (ol_flags & PKT_TX_L4_MASK) {
265         case PKT_TX_UDP_CKSUM:
266                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
267                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
268                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
269                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
270                 break;
271         case PKT_TX_TCP_CKSUM:
272                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
273                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
276                 break;
277         case PKT_TX_SCTP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         default:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 break;
287         }
288
289         txq->ctx_cache[ctx_curr].flags           = ol_flags;
290         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
291         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
292                 vlan_macip_lens & cmp_mask;
293
294         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
295         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
296         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
297         ctx_txd->seqnum_seed     = 0;
298 }
299
300 /*
301  * Check which hardware context can be used. Use the existing match
302  * or create a new context descriptor.
303  */
304 static inline uint32_t
305 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
306                 uint32_t vlan_macip_lens)
307 {
308         /* If match with the current context */
309         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
310                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
311                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
312                         return txq->ctx_curr;
313         }
314
315         /* If match with the second context */
316         txq->ctx_curr ^= 1;
317         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320                         return txq->ctx_curr;
321         }
322
323         /* Mismatch, use the previous context */
324         return (IGB_CTX_NUM);
325 }
326
327 static inline uint32_t
328 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
329 {
330         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
331         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
332         uint32_t tmp;
333
334         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
335         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
336         return tmp;
337 }
338
339 static inline uint32_t
340 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
341 {
342         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
343         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
344 }
345
346 uint16_t
347 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
348                uint16_t nb_pkts)
349 {
350         struct igb_tx_queue *txq;
351         struct igb_tx_entry *sw_ring;
352         struct igb_tx_entry *txe, *txn;
353         volatile union e1000_adv_tx_desc *txr;
354         volatile union e1000_adv_tx_desc *txd;
355         struct rte_mbuf     *tx_pkt;
356         struct rte_mbuf     *m_seg;
357         union igb_vlan_macip vlan_macip_lens;
358         union {
359                 uint16_t u16;
360                 struct {
361                         uint16_t l3_len:9;
362                         uint16_t l2_len:7;
363                 };
364         } l2_l3_len;
365         uint64_t buf_dma_addr;
366         uint32_t olinfo_status;
367         uint32_t cmd_type_len;
368         uint32_t pkt_len;
369         uint16_t slen;
370         uint64_t ol_flags;
371         uint16_t tx_end;
372         uint16_t tx_id;
373         uint16_t tx_last;
374         uint16_t nb_tx;
375         uint64_t tx_ol_req;
376         uint32_t new_ctx = 0;
377         uint32_t ctx = 0;
378
379         txq = tx_queue;
380         sw_ring = txq->sw_ring;
381         txr     = txq->tx_ring;
382         tx_id   = txq->tx_tail;
383         txe = &sw_ring[tx_id];
384
385         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
386                 tx_pkt = *tx_pkts++;
387                 pkt_len = tx_pkt->pkt_len;
388
389                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
390
391                 /*
392                  * The number of descriptors that must be allocated for a
393                  * packet is the number of segments of that packet, plus 1
394                  * Context Descriptor for the VLAN Tag Identifier, if any.
395                  * Determine the last TX descriptor to allocate in the TX ring
396                  * for the packet, starting from the current position (tx_id)
397                  * in the ring.
398                  */
399                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
400
401                 ol_flags = tx_pkt->ol_flags;
402                 l2_l3_len.l2_len = tx_pkt->l2_len;
403                 l2_l3_len.l3_len = tx_pkt->l3_len;
404                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
405                 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
406                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
407
408                 /* If a Context Descriptor need be built . */
409                 if (tx_ol_req) {
410                         ctx = what_advctx_update(txq, tx_ol_req,
411                                 vlan_macip_lens.data);
412                         /* Only allocate context descriptor if required*/
413                         new_ctx = (ctx == IGB_CTX_NUM);
414                         ctx = txq->ctx_curr;
415                         tx_last = (uint16_t) (tx_last + new_ctx);
416                 }
417                 if (tx_last >= txq->nb_tx_desc)
418                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
419
420                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
421                            " tx_first=%u tx_last=%u",
422                            (unsigned) txq->port_id,
423                            (unsigned) txq->queue_id,
424                            (unsigned) pkt_len,
425                            (unsigned) tx_id,
426                            (unsigned) tx_last);
427
428                 /*
429                  * Check if there are enough free descriptors in the TX ring
430                  * to transmit the next packet.
431                  * This operation is based on the two following rules:
432                  *
433                  *   1- Only check that the last needed TX descriptor can be
434                  *      allocated (by construction, if that descriptor is free,
435                  *      all intermediate ones are also free).
436                  *
437                  *      For this purpose, the index of the last TX descriptor
438                  *      used for a packet (the "last descriptor" of a packet)
439                  *      is recorded in the TX entries (the last one included)
440                  *      that are associated with all TX descriptors allocated
441                  *      for that packet.
442                  *
443                  *   2- Avoid to allocate the last free TX descriptor of the
444                  *      ring, in order to never set the TDT register with the
445                  *      same value stored in parallel by the NIC in the TDH
446                  *      register, which makes the TX engine of the NIC enter
447                  *      in a deadlock situation.
448                  *
449                  *      By extension, avoid to allocate a free descriptor that
450                  *      belongs to the last set of free descriptors allocated
451                  *      to the same packet previously transmitted.
452                  */
453
454                 /*
455                  * The "last descriptor" of the previously sent packet, if any,
456                  * which used the last descriptor to allocate.
457                  */
458                 tx_end = sw_ring[tx_last].last_id;
459
460                 /*
461                  * The next descriptor following that "last descriptor" in the
462                  * ring.
463                  */
464                 tx_end = sw_ring[tx_end].next_id;
465
466                 /*
467                  * The "last descriptor" associated with that next descriptor.
468                  */
469                 tx_end = sw_ring[tx_end].last_id;
470
471                 /*
472                  * Check that this descriptor is free.
473                  */
474                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
475                         if (nb_tx == 0)
476                                 return (0);
477                         goto end_of_tx;
478                 }
479
480                 /*
481                  * Set common flags of all TX Data Descriptors.
482                  *
483                  * The following bits must be set in all Data Descriptors:
484                  *   - E1000_ADVTXD_DTYP_DATA
485                  *   - E1000_ADVTXD_DCMD_DEXT
486                  *
487                  * The following bits must be set in the first Data Descriptor
488                  * and are ignored in the other ones:
489                  *   - E1000_ADVTXD_DCMD_IFCS
490                  *   - E1000_ADVTXD_MAC_1588
491                  *   - E1000_ADVTXD_DCMD_VLE
492                  *
493                  * The following bits must only be set in the last Data
494                  * Descriptor:
495                  *   - E1000_TXD_CMD_EOP
496                  *
497                  * The following bits can be set in any Data Descriptor, but
498                  * are only set in the last Data Descriptor:
499                  *   - E1000_TXD_CMD_RS
500                  */
501                 cmd_type_len = txq->txd_type |
502                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
503                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
504 #if defined(RTE_LIBRTE_IEEE1588)
505                 if (ol_flags & PKT_TX_IEEE1588_TMST)
506                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
507 #endif
508                 if (tx_ol_req) {
509                         /* Setup TX Advanced context descriptor if required */
510                         if (new_ctx) {
511                                 volatile struct e1000_adv_tx_context_desc *
512                                     ctx_txd;
513
514                                 ctx_txd = (volatile struct
515                                     e1000_adv_tx_context_desc *)
516                                     &txr[tx_id];
517
518                                 txn = &sw_ring[txe->next_id];
519                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
520
521                                 if (txe->mbuf != NULL) {
522                                         rte_pktmbuf_free_seg(txe->mbuf);
523                                         txe->mbuf = NULL;
524                                 }
525
526                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
527                                     vlan_macip_lens.data);
528
529                                 txe->last_id = tx_last;
530                                 tx_id = txe->next_id;
531                                 txe = txn;
532                         }
533
534                         /* Setup the TX Advanced Data Descriptor */
535                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
536                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
537                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
538                 }
539
540                 m_seg = tx_pkt;
541                 do {
542                         txn = &sw_ring[txe->next_id];
543                         txd = &txr[tx_id];
544
545                         if (txe->mbuf != NULL)
546                                 rte_pktmbuf_free_seg(txe->mbuf);
547                         txe->mbuf = m_seg;
548
549                         /*
550                          * Set up transmit descriptor.
551                          */
552                         slen = (uint16_t) m_seg->data_len;
553                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
554                         txd->read.buffer_addr =
555                                 rte_cpu_to_le_64(buf_dma_addr);
556                         txd->read.cmd_type_len =
557                                 rte_cpu_to_le_32(cmd_type_len | slen);
558                         txd->read.olinfo_status =
559                                 rte_cpu_to_le_32(olinfo_status);
560                         txe->last_id = tx_last;
561                         tx_id = txe->next_id;
562                         txe = txn;
563                         m_seg = m_seg->next;
564                 } while (m_seg != NULL);
565
566                 /*
567                  * The last packet data descriptor needs End Of Packet (EOP)
568                  * and Report Status (RS).
569                  */
570                 txd->read.cmd_type_len |=
571                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
572         }
573  end_of_tx:
574         rte_wmb();
575
576         /*
577          * Set the Transmit Descriptor Tail (TDT).
578          */
579         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
580         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
581                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
582                    (unsigned) tx_id, (unsigned) nb_tx);
583         txq->tx_tail = tx_id;
584
585         return (nb_tx);
586 }
587
588 /*********************************************************************
589  *
590  *  RX functions
591  *
592  **********************************************************************/
593 #ifdef RTE_NEXT_ABI
594 #define IGB_PACKET_TYPE_IPV4              0X01
595 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
596 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
597 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
598 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
599 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
600 #define IGB_PACKET_TYPE_IPV6              0X04
601 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
602 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
603 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
604 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
605 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
606 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
607 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
608 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
609 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
610 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
611 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
612 #define IGB_PACKET_TYPE_MAX               0X80
613 #define IGB_PACKET_TYPE_MASK              0X7F
614 #define IGB_PACKET_TYPE_SHIFT             0X04
615 static inline uint32_t
616 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
617 {
618         static const uint32_t
619                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
620                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
621                         RTE_PTYPE_L3_IPV4,
622                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
623                         RTE_PTYPE_L3_IPV4_EXT,
624                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
625                         RTE_PTYPE_L3_IPV6,
626                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
627                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
628                         RTE_PTYPE_INNER_L3_IPV6,
629                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
630                         RTE_PTYPE_L3_IPV6_EXT,
631                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
632                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
633                         RTE_PTYPE_INNER_L3_IPV6_EXT,
634                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
635                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
636                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
637                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
638                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
639                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
640                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
641                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
642                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
643                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
644                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
645                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
646                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
647                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
648                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
649                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
650                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
651                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
652                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
653                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
654                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
655                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
656                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
657                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
658                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
659                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
660                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
661                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
662         };
663         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
664                 return RTE_PTYPE_UNKNOWN;
665
666         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
667
668         return ptype_table[pkt_info];
669 }
670
671 static inline uint64_t
672 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
673 {
674         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
675
676 #if defined(RTE_LIBRTE_IEEE1588)
677         static uint32_t ip_pkt_etqf_map[8] = {
678                 0, 0, 0, PKT_RX_IEEE1588_PTP,
679                 0, 0, 0, 0,
680         };
681
682         pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
683 #endif
684
685         return pkt_flags;
686 }
687 #else /* RTE_NEXT_ABI */
688 static inline uint64_t
689 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
690 {
691         uint64_t pkt_flags;
692
693         static uint64_t ip_pkt_types_map[16] = {
694                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
695                 PKT_RX_IPV6_HDR, 0, 0, 0,
696                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
697                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
698         };
699
700 #if defined(RTE_LIBRTE_IEEE1588)
701         static uint32_t ip_pkt_etqf_map[8] = {
702                 0, 0, 0, PKT_RX_IEEE1588_PTP,
703                 0, 0, 0, 0,
704         };
705
706         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
707                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
708                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
709 #else
710         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
711                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
712 #endif
713         return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH);
714 }
715 #endif /* RTE_NEXT_ABI */
716
717 static inline uint64_t
718 rx_desc_status_to_pkt_flags(uint32_t rx_status)
719 {
720         uint64_t pkt_flags;
721
722         /* Check if VLAN present */
723         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
724
725 #if defined(RTE_LIBRTE_IEEE1588)
726         if (rx_status & E1000_RXD_STAT_TMST)
727                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
728 #endif
729         return pkt_flags;
730 }
731
732 static inline uint64_t
733 rx_desc_error_to_pkt_flags(uint32_t rx_status)
734 {
735         /*
736          * Bit 30: IPE, IPv4 checksum error
737          * Bit 29: L4I, L4I integrity error
738          */
739
740         static uint64_t error_to_pkt_flags_map[4] = {
741                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
742                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
743         };
744         return error_to_pkt_flags_map[(rx_status >>
745                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
746 }
747
748 uint16_t
749 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
750                uint16_t nb_pkts)
751 {
752         struct igb_rx_queue *rxq;
753         volatile union e1000_adv_rx_desc *rx_ring;
754         volatile union e1000_adv_rx_desc *rxdp;
755         struct igb_rx_entry *sw_ring;
756         struct igb_rx_entry *rxe;
757         struct rte_mbuf *rxm;
758         struct rte_mbuf *nmb;
759         union e1000_adv_rx_desc rxd;
760         uint64_t dma_addr;
761         uint32_t staterr;
762         uint32_t hlen_type_rss;
763         uint16_t pkt_len;
764         uint16_t rx_id;
765         uint16_t nb_rx;
766         uint16_t nb_hold;
767         uint64_t pkt_flags;
768
769         nb_rx = 0;
770         nb_hold = 0;
771         rxq = rx_queue;
772         rx_id = rxq->rx_tail;
773         rx_ring = rxq->rx_ring;
774         sw_ring = rxq->sw_ring;
775         while (nb_rx < nb_pkts) {
776                 /*
777                  * The order of operations here is important as the DD status
778                  * bit must not be read after any other descriptor fields.
779                  * rx_ring and rxdp are pointing to volatile data so the order
780                  * of accesses cannot be reordered by the compiler. If they were
781                  * not volatile, they could be reordered which could lead to
782                  * using invalid descriptor fields when read from rxd.
783                  */
784                 rxdp = &rx_ring[rx_id];
785                 staterr = rxdp->wb.upper.status_error;
786                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
787                         break;
788                 rxd = *rxdp;
789
790                 /*
791                  * End of packet.
792                  *
793                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
794                  * likely to be invalid and to be dropped by the various
795                  * validation checks performed by the network stack.
796                  *
797                  * Allocate a new mbuf to replenish the RX ring descriptor.
798                  * If the allocation fails:
799                  *    - arrange for that RX descriptor to be the first one
800                  *      being parsed the next time the receive function is
801                  *      invoked [on the same queue].
802                  *
803                  *    - Stop parsing the RX ring and return immediately.
804                  *
805                  * This policy do not drop the packet received in the RX
806                  * descriptor for which the allocation of a new mbuf failed.
807                  * Thus, it allows that packet to be later retrieved if
808                  * mbuf have been freed in the mean time.
809                  * As a side effect, holding RX descriptors instead of
810                  * systematically giving them back to the NIC may lead to
811                  * RX ring exhaustion situations.
812                  * However, the NIC can gracefully prevent such situations
813                  * to happen by sending specific "back-pressure" flow control
814                  * frames to its peer(s).
815                  */
816                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
817                            "staterr=0x%x pkt_len=%u",
818                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819                            (unsigned) rx_id, (unsigned) staterr,
820                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
821
822                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
823                 if (nmb == NULL) {
824                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
825                                    "queue_id=%u", (unsigned) rxq->port_id,
826                                    (unsigned) rxq->queue_id);
827                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
828                         break;
829                 }
830
831                 nb_hold++;
832                 rxe = &sw_ring[rx_id];
833                 rx_id++;
834                 if (rx_id == rxq->nb_rx_desc)
835                         rx_id = 0;
836
837                 /* Prefetch next mbuf while processing current one. */
838                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
839
840                 /*
841                  * When next RX descriptor is on a cache-line boundary,
842                  * prefetch the next 4 RX descriptors and the next 8 pointers
843                  * to mbufs.
844                  */
845                 if ((rx_id & 0x3) == 0) {
846                         rte_igb_prefetch(&rx_ring[rx_id]);
847                         rte_igb_prefetch(&sw_ring[rx_id]);
848                 }
849
850                 rxm = rxe->mbuf;
851                 rxe->mbuf = nmb;
852                 dma_addr =
853                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
854                 rxdp->read.hdr_addr = 0;
855                 rxdp->read.pkt_addr = dma_addr;
856
857                 /*
858                  * Initialize the returned mbuf.
859                  * 1) setup generic mbuf fields:
860                  *    - number of segments,
861                  *    - next segment,
862                  *    - packet length,
863                  *    - RX port identifier.
864                  * 2) integrate hardware offload data, if any:
865                  *    - RSS flag & hash,
866                  *    - IP checksum flag,
867                  *    - VLAN TCI, if any,
868                  *    - error flags.
869                  */
870                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
871                                       rxq->crc_len);
872                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
873                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
874                 rxm->nb_segs = 1;
875                 rxm->next = NULL;
876                 rxm->pkt_len = pkt_len;
877                 rxm->data_len = pkt_len;
878                 rxm->port = rxq->port_id;
879
880                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
881                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
882                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
883                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
884
885                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
886                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
887                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
888                 rxm->ol_flags = pkt_flags;
889 #ifdef RTE_NEXT_ABI
890                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
891                                                 lo_dword.hs_rss.pkt_info);
892 #endif
893
894                 /*
895                  * Store the mbuf address into the next entry of the array
896                  * of returned packets.
897                  */
898                 rx_pkts[nb_rx++] = rxm;
899         }
900         rxq->rx_tail = rx_id;
901
902         /*
903          * If the number of free RX descriptors is greater than the RX free
904          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
905          * register.
906          * Update the RDT with the value of the last processed RX descriptor
907          * minus 1, to guarantee that the RDT register is never equal to the
908          * RDH register, which creates a "full" ring situtation from the
909          * hardware point of view...
910          */
911         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
912         if (nb_hold > rxq->rx_free_thresh) {
913                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
914                            "nb_hold=%u nb_rx=%u",
915                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
916                            (unsigned) rx_id, (unsigned) nb_hold,
917                            (unsigned) nb_rx);
918                 rx_id = (uint16_t) ((rx_id == 0) ?
919                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
920                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
921                 nb_hold = 0;
922         }
923         rxq->nb_rx_hold = nb_hold;
924         return (nb_rx);
925 }
926
927 uint16_t
928 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
929                          uint16_t nb_pkts)
930 {
931         struct igb_rx_queue *rxq;
932         volatile union e1000_adv_rx_desc *rx_ring;
933         volatile union e1000_adv_rx_desc *rxdp;
934         struct igb_rx_entry *sw_ring;
935         struct igb_rx_entry *rxe;
936         struct rte_mbuf *first_seg;
937         struct rte_mbuf *last_seg;
938         struct rte_mbuf *rxm;
939         struct rte_mbuf *nmb;
940         union e1000_adv_rx_desc rxd;
941         uint64_t dma; /* Physical address of mbuf data buffer */
942         uint32_t staterr;
943         uint32_t hlen_type_rss;
944         uint16_t rx_id;
945         uint16_t nb_rx;
946         uint16_t nb_hold;
947         uint16_t data_len;
948         uint64_t pkt_flags;
949
950         nb_rx = 0;
951         nb_hold = 0;
952         rxq = rx_queue;
953         rx_id = rxq->rx_tail;
954         rx_ring = rxq->rx_ring;
955         sw_ring = rxq->sw_ring;
956
957         /*
958          * Retrieve RX context of current packet, if any.
959          */
960         first_seg = rxq->pkt_first_seg;
961         last_seg = rxq->pkt_last_seg;
962
963         while (nb_rx < nb_pkts) {
964         next_desc:
965                 /*
966                  * The order of operations here is important as the DD status
967                  * bit must not be read after any other descriptor fields.
968                  * rx_ring and rxdp are pointing to volatile data so the order
969                  * of accesses cannot be reordered by the compiler. If they were
970                  * not volatile, they could be reordered which could lead to
971                  * using invalid descriptor fields when read from rxd.
972                  */
973                 rxdp = &rx_ring[rx_id];
974                 staterr = rxdp->wb.upper.status_error;
975                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
976                         break;
977                 rxd = *rxdp;
978
979                 /*
980                  * Descriptor done.
981                  *
982                  * Allocate a new mbuf to replenish the RX ring descriptor.
983                  * If the allocation fails:
984                  *    - arrange for that RX descriptor to be the first one
985                  *      being parsed the next time the receive function is
986                  *      invoked [on the same queue].
987                  *
988                  *    - Stop parsing the RX ring and return immediately.
989                  *
990                  * This policy does not drop the packet received in the RX
991                  * descriptor for which the allocation of a new mbuf failed.
992                  * Thus, it allows that packet to be later retrieved if
993                  * mbuf have been freed in the mean time.
994                  * As a side effect, holding RX descriptors instead of
995                  * systematically giving them back to the NIC may lead to
996                  * RX ring exhaustion situations.
997                  * However, the NIC can gracefully prevent such situations
998                  * to happen by sending specific "back-pressure" flow control
999                  * frames to its peer(s).
1000                  */
1001                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1002                            "staterr=0x%x data_len=%u",
1003                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1004                            (unsigned) rx_id, (unsigned) staterr,
1005                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1006
1007                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1008                 if (nmb == NULL) {
1009                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1010                                    "queue_id=%u", (unsigned) rxq->port_id,
1011                                    (unsigned) rxq->queue_id);
1012                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1013                         break;
1014                 }
1015
1016                 nb_hold++;
1017                 rxe = &sw_ring[rx_id];
1018                 rx_id++;
1019                 if (rx_id == rxq->nb_rx_desc)
1020                         rx_id = 0;
1021
1022                 /* Prefetch next mbuf while processing current one. */
1023                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1024
1025                 /*
1026                  * When next RX descriptor is on a cache-line boundary,
1027                  * prefetch the next 4 RX descriptors and the next 8 pointers
1028                  * to mbufs.
1029                  */
1030                 if ((rx_id & 0x3) == 0) {
1031                         rte_igb_prefetch(&rx_ring[rx_id]);
1032                         rte_igb_prefetch(&sw_ring[rx_id]);
1033                 }
1034
1035                 /*
1036                  * Update RX descriptor with the physical address of the new
1037                  * data buffer of the new allocated mbuf.
1038                  */
1039                 rxm = rxe->mbuf;
1040                 rxe->mbuf = nmb;
1041                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1042                 rxdp->read.pkt_addr = dma;
1043                 rxdp->read.hdr_addr = 0;
1044
1045                 /*
1046                  * Set data length & data buffer address of mbuf.
1047                  */
1048                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1049                 rxm->data_len = data_len;
1050                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1051
1052                 /*
1053                  * If this is the first buffer of the received packet,
1054                  * set the pointer to the first mbuf of the packet and
1055                  * initialize its context.
1056                  * Otherwise, update the total length and the number of segments
1057                  * of the current scattered packet, and update the pointer to
1058                  * the last mbuf of the current packet.
1059                  */
1060                 if (first_seg == NULL) {
1061                         first_seg = rxm;
1062                         first_seg->pkt_len = data_len;
1063                         first_seg->nb_segs = 1;
1064                 } else {
1065                         first_seg->pkt_len += data_len;
1066                         first_seg->nb_segs++;
1067                         last_seg->next = rxm;
1068                 }
1069
1070                 /*
1071                  * If this is not the last buffer of the received packet,
1072                  * update the pointer to the last mbuf of the current scattered
1073                  * packet and continue to parse the RX ring.
1074                  */
1075                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1076                         last_seg = rxm;
1077                         goto next_desc;
1078                 }
1079
1080                 /*
1081                  * This is the last buffer of the received packet.
1082                  * If the CRC is not stripped by the hardware:
1083                  *   - Subtract the CRC length from the total packet length.
1084                  *   - If the last buffer only contains the whole CRC or a part
1085                  *     of it, free the mbuf associated to the last buffer.
1086                  *     If part of the CRC is also contained in the previous
1087                  *     mbuf, subtract the length of that CRC part from the
1088                  *     data length of the previous mbuf.
1089                  */
1090                 rxm->next = NULL;
1091                 if (unlikely(rxq->crc_len > 0)) {
1092                         first_seg->pkt_len -= ETHER_CRC_LEN;
1093                         if (data_len <= ETHER_CRC_LEN) {
1094                                 rte_pktmbuf_free_seg(rxm);
1095                                 first_seg->nb_segs--;
1096                                 last_seg->data_len = (uint16_t)
1097                                         (last_seg->data_len -
1098                                          (ETHER_CRC_LEN - data_len));
1099                                 last_seg->next = NULL;
1100                         } else
1101                                 rxm->data_len =
1102                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1103                 }
1104
1105                 /*
1106                  * Initialize the first mbuf of the returned packet:
1107                  *    - RX port identifier,
1108                  *    - hardware offload data, if any:
1109                  *      - RSS flag & hash,
1110                  *      - IP checksum flag,
1111                  *      - VLAN TCI, if any,
1112                  *      - error flags.
1113                  */
1114                 first_seg->port = rxq->port_id;
1115                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1116
1117                 /*
1118                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1119                  * set in the pkt_flags field.
1120                  */
1121                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1122                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1123                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1124                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1125                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1126                 first_seg->ol_flags = pkt_flags;
1127 #ifdef RTE_NEXT_ABI
1128                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1129                                         lower.lo_dword.hs_rss.pkt_info);
1130 #endif
1131
1132                 /* Prefetch data of first segment, if configured to do so. */
1133                 rte_packet_prefetch((char *)first_seg->buf_addr +
1134                         first_seg->data_off);
1135
1136                 /*
1137                  * Store the mbuf address into the next entry of the array
1138                  * of returned packets.
1139                  */
1140                 rx_pkts[nb_rx++] = first_seg;
1141
1142                 /*
1143                  * Setup receipt context for a new packet.
1144                  */
1145                 first_seg = NULL;
1146         }
1147
1148         /*
1149          * Record index of the next RX descriptor to probe.
1150          */
1151         rxq->rx_tail = rx_id;
1152
1153         /*
1154          * Save receive context.
1155          */
1156         rxq->pkt_first_seg = first_seg;
1157         rxq->pkt_last_seg = last_seg;
1158
1159         /*
1160          * If the number of free RX descriptors is greater than the RX free
1161          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1162          * register.
1163          * Update the RDT with the value of the last processed RX descriptor
1164          * minus 1, to guarantee that the RDT register is never equal to the
1165          * RDH register, which creates a "full" ring situtation from the
1166          * hardware point of view...
1167          */
1168         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1169         if (nb_hold > rxq->rx_free_thresh) {
1170                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1171                            "nb_hold=%u nb_rx=%u",
1172                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1173                            (unsigned) rx_id, (unsigned) nb_hold,
1174                            (unsigned) nb_rx);
1175                 rx_id = (uint16_t) ((rx_id == 0) ?
1176                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1177                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1178                 nb_hold = 0;
1179         }
1180         rxq->nb_rx_hold = nb_hold;
1181         return (nb_rx);
1182 }
1183
1184 /*
1185  * Rings setup and release.
1186  *
1187  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1188  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1189  * This will also optimize cache line size effect.
1190  * H/W supports up to cache line size 128.
1191  */
1192 #define IGB_ALIGN 128
1193
1194 /*
1195  * Maximum number of Ring Descriptors.
1196  *
1197  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1198  * desscriptors should meet the following condition:
1199  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1200  */
1201 #define IGB_MIN_RING_DESC 32
1202 #define IGB_MAX_RING_DESC 4096
1203
1204 static const struct rte_memzone *
1205 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1206                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1207 {
1208         char z_name[RTE_MEMZONE_NAMESIZE];
1209         const struct rte_memzone *mz;
1210
1211         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1212                         dev->driver->pci_drv.name, ring_name,
1213                                 dev->data->port_id, queue_id);
1214         mz = rte_memzone_lookup(z_name);
1215         if (mz)
1216                 return mz;
1217
1218 #ifdef RTE_LIBRTE_XEN_DOM0
1219         return rte_memzone_reserve_bounded(z_name, ring_size,
1220                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1221 #else
1222         return rte_memzone_reserve_aligned(z_name, ring_size,
1223                         socket_id, 0, IGB_ALIGN);
1224 #endif
1225 }
1226
1227 static void
1228 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1229 {
1230         unsigned i;
1231
1232         if (txq->sw_ring != NULL) {
1233                 for (i = 0; i < txq->nb_tx_desc; i++) {
1234                         if (txq->sw_ring[i].mbuf != NULL) {
1235                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1236                                 txq->sw_ring[i].mbuf = NULL;
1237                         }
1238                 }
1239         }
1240 }
1241
1242 static void
1243 igb_tx_queue_release(struct igb_tx_queue *txq)
1244 {
1245         if (txq != NULL) {
1246                 igb_tx_queue_release_mbufs(txq);
1247                 rte_free(txq->sw_ring);
1248                 rte_free(txq);
1249         }
1250 }
1251
1252 void
1253 eth_igb_tx_queue_release(void *txq)
1254 {
1255         igb_tx_queue_release(txq);
1256 }
1257
1258 static void
1259 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1260 {
1261         txq->tx_head = 0;
1262         txq->tx_tail = 0;
1263         txq->ctx_curr = 0;
1264         memset((void*)&txq->ctx_cache, 0,
1265                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1266 }
1267
1268 static void
1269 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1270 {
1271         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1272         struct igb_tx_entry *txe = txq->sw_ring;
1273         uint16_t i, prev;
1274         struct e1000_hw *hw;
1275
1276         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1277         /* Zero out HW ring memory */
1278         for (i = 0; i < txq->nb_tx_desc; i++) {
1279                 txq->tx_ring[i] = zeroed_desc;
1280         }
1281
1282         /* Initialize ring entries */
1283         prev = (uint16_t)(txq->nb_tx_desc - 1);
1284         for (i = 0; i < txq->nb_tx_desc; i++) {
1285                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1286
1287                 txd->wb.status = E1000_TXD_STAT_DD;
1288                 txe[i].mbuf = NULL;
1289                 txe[i].last_id = i;
1290                 txe[prev].next_id = i;
1291                 prev = i;
1292         }
1293
1294         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1295         /* 82575 specific, each tx queue will use 2 hw contexts */
1296         if (hw->mac.type == e1000_82575)
1297                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1298
1299         igb_reset_tx_queue_stat(txq);
1300 }
1301
1302 int
1303 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1304                          uint16_t queue_idx,
1305                          uint16_t nb_desc,
1306                          unsigned int socket_id,
1307                          const struct rte_eth_txconf *tx_conf)
1308 {
1309         const struct rte_memzone *tz;
1310         struct igb_tx_queue *txq;
1311         struct e1000_hw     *hw;
1312         uint32_t size;
1313
1314         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315
1316         /*
1317          * Validate number of transmit descriptors.
1318          * It must not exceed hardware maximum, and must be multiple
1319          * of IGB_ALIGN.
1320          */
1321         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1322             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1323                 return -EINVAL;
1324         }
1325
1326         /*
1327          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1328          * driver.
1329          */
1330         if (tx_conf->tx_free_thresh != 0)
1331                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1332                              "used for the 1G driver.");
1333         if (tx_conf->tx_rs_thresh != 0)
1334                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1335                              "used for the 1G driver.");
1336         if (tx_conf->tx_thresh.wthresh == 0)
1337                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1338                              "consider setting the TX WTHRESH value to 4, 8, "
1339                              "or 16.");
1340
1341         /* Free memory prior to re-allocation if needed */
1342         if (dev->data->tx_queues[queue_idx] != NULL) {
1343                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1344                 dev->data->tx_queues[queue_idx] = NULL;
1345         }
1346
1347         /* First allocate the tx queue data structure */
1348         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1349                                                         RTE_CACHE_LINE_SIZE);
1350         if (txq == NULL)
1351                 return (-ENOMEM);
1352
1353         /*
1354          * Allocate TX ring hardware descriptors. A memzone large enough to
1355          * handle the maximum ring size is allocated in order to allow for
1356          * resizing in later calls to the queue setup function.
1357          */
1358         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1359         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1360                                         size, socket_id);
1361         if (tz == NULL) {
1362                 igb_tx_queue_release(txq);
1363                 return (-ENOMEM);
1364         }
1365
1366         txq->nb_tx_desc = nb_desc;
1367         txq->pthresh = tx_conf->tx_thresh.pthresh;
1368         txq->hthresh = tx_conf->tx_thresh.hthresh;
1369         txq->wthresh = tx_conf->tx_thresh.wthresh;
1370         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1371                 txq->wthresh = 1;
1372         txq->queue_id = queue_idx;
1373         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1374                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1375         txq->port_id = dev->data->port_id;
1376
1377         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1378 #ifndef RTE_LIBRTE_XEN_DOM0
1379         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1380 #else
1381         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1382 #endif
1383          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1384         /* Allocate software ring */
1385         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1386                                    sizeof(struct igb_tx_entry) * nb_desc,
1387                                    RTE_CACHE_LINE_SIZE);
1388         if (txq->sw_ring == NULL) {
1389                 igb_tx_queue_release(txq);
1390                 return (-ENOMEM);
1391         }
1392         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1393                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1394
1395         igb_reset_tx_queue(txq, dev);
1396         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1397         dev->data->tx_queues[queue_idx] = txq;
1398
1399         return (0);
1400 }
1401
1402 static void
1403 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1404 {
1405         unsigned i;
1406
1407         if (rxq->sw_ring != NULL) {
1408                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1409                         if (rxq->sw_ring[i].mbuf != NULL) {
1410                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1411                                 rxq->sw_ring[i].mbuf = NULL;
1412                         }
1413                 }
1414         }
1415 }
1416
1417 static void
1418 igb_rx_queue_release(struct igb_rx_queue *rxq)
1419 {
1420         if (rxq != NULL) {
1421                 igb_rx_queue_release_mbufs(rxq);
1422                 rte_free(rxq->sw_ring);
1423                 rte_free(rxq);
1424         }
1425 }
1426
1427 void
1428 eth_igb_rx_queue_release(void *rxq)
1429 {
1430         igb_rx_queue_release(rxq);
1431 }
1432
1433 static void
1434 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1435 {
1436         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1437         unsigned i;
1438
1439         /* Zero out HW ring memory */
1440         for (i = 0; i < rxq->nb_rx_desc; i++) {
1441                 rxq->rx_ring[i] = zeroed_desc;
1442         }
1443
1444         rxq->rx_tail = 0;
1445         rxq->pkt_first_seg = NULL;
1446         rxq->pkt_last_seg = NULL;
1447 }
1448
1449 int
1450 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1451                          uint16_t queue_idx,
1452                          uint16_t nb_desc,
1453                          unsigned int socket_id,
1454                          const struct rte_eth_rxconf *rx_conf,
1455                          struct rte_mempool *mp)
1456 {
1457         const struct rte_memzone *rz;
1458         struct igb_rx_queue *rxq;
1459         struct e1000_hw     *hw;
1460         unsigned int size;
1461
1462         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1463
1464         /*
1465          * Validate number of receive descriptors.
1466          * It must not exceed hardware maximum, and must be multiple
1467          * of IGB_ALIGN.
1468          */
1469         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1470             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1471                 return (-EINVAL);
1472         }
1473
1474         /* Free memory prior to re-allocation if needed */
1475         if (dev->data->rx_queues[queue_idx] != NULL) {
1476                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1477                 dev->data->rx_queues[queue_idx] = NULL;
1478         }
1479
1480         /* First allocate the RX queue data structure. */
1481         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1482                           RTE_CACHE_LINE_SIZE);
1483         if (rxq == NULL)
1484                 return (-ENOMEM);
1485         rxq->mb_pool = mp;
1486         rxq->nb_rx_desc = nb_desc;
1487         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1488         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1489         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1490         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1491                 rxq->wthresh = 1;
1492         rxq->drop_en = rx_conf->rx_drop_en;
1493         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1494         rxq->queue_id = queue_idx;
1495         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1496                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1497         rxq->port_id = dev->data->port_id;
1498         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1499                                   ETHER_CRC_LEN);
1500
1501         /*
1502          *  Allocate RX ring hardware descriptors. A memzone large enough to
1503          *  handle the maximum ring size is allocated in order to allow for
1504          *  resizing in later calls to the queue setup function.
1505          */
1506         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1507         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1508         if (rz == NULL) {
1509                 igb_rx_queue_release(rxq);
1510                 return (-ENOMEM);
1511         }
1512         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1513         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1514 #ifndef RTE_LIBRTE_XEN_DOM0
1515         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1516 #else
1517         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1518 #endif
1519         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1520
1521         /* Allocate software ring. */
1522         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1523                                    sizeof(struct igb_rx_entry) * nb_desc,
1524                                    RTE_CACHE_LINE_SIZE);
1525         if (rxq->sw_ring == NULL) {
1526                 igb_rx_queue_release(rxq);
1527                 return (-ENOMEM);
1528         }
1529         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1530                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1531
1532         dev->data->rx_queues[queue_idx] = rxq;
1533         igb_reset_rx_queue(rxq);
1534
1535         return 0;
1536 }
1537
1538 uint32_t
1539 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1540 {
1541 #define IGB_RXQ_SCAN_INTERVAL 4
1542         volatile union e1000_adv_rx_desc *rxdp;
1543         struct igb_rx_queue *rxq;
1544         uint32_t desc = 0;
1545
1546         if (rx_queue_id >= dev->data->nb_rx_queues) {
1547                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1548                 return 0;
1549         }
1550
1551         rxq = dev->data->rx_queues[rx_queue_id];
1552         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1553
1554         while ((desc < rxq->nb_rx_desc) &&
1555                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1556                 desc += IGB_RXQ_SCAN_INTERVAL;
1557                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1558                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1559                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1560                                 desc - rxq->nb_rx_desc]);
1561         }
1562
1563         return 0;
1564 }
1565
1566 int
1567 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1568 {
1569         volatile union e1000_adv_rx_desc *rxdp;
1570         struct igb_rx_queue *rxq = rx_queue;
1571         uint32_t desc;
1572
1573         if (unlikely(offset >= rxq->nb_rx_desc))
1574                 return 0;
1575         desc = rxq->rx_tail + offset;
1576         if (desc >= rxq->nb_rx_desc)
1577                 desc -= rxq->nb_rx_desc;
1578
1579         rxdp = &rxq->rx_ring[desc];
1580         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1581 }
1582
1583 void
1584 igb_dev_clear_queues(struct rte_eth_dev *dev)
1585 {
1586         uint16_t i;
1587         struct igb_tx_queue *txq;
1588         struct igb_rx_queue *rxq;
1589
1590         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1591                 txq = dev->data->tx_queues[i];
1592                 if (txq != NULL) {
1593                         igb_tx_queue_release_mbufs(txq);
1594                         igb_reset_tx_queue(txq, dev);
1595                 }
1596         }
1597
1598         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1599                 rxq = dev->data->rx_queues[i];
1600                 if (rxq != NULL) {
1601                         igb_rx_queue_release_mbufs(rxq);
1602                         igb_reset_rx_queue(rxq);
1603                 }
1604         }
1605 }
1606
1607 void
1608 igb_dev_free_queues(struct rte_eth_dev *dev)
1609 {
1610         uint16_t i;
1611
1612         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1613                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1614                 dev->data->rx_queues[i] = NULL;
1615         }
1616         dev->data->nb_rx_queues = 0;
1617
1618         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1619                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1620                 dev->data->tx_queues[i] = NULL;
1621         }
1622         dev->data->nb_tx_queues = 0;
1623 }
1624
1625 /**
1626  * Receive Side Scaling (RSS).
1627  * See section 7.1.1.7 in the following document:
1628  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1629  *
1630  * Principles:
1631  * The source and destination IP addresses of the IP header and the source and
1632  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1633  * against a configurable random key to compute a 32-bit RSS hash result.
1634  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1635  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1636  * RSS output index which is used as the RX queue index where to store the
1637  * received packets.
1638  * The following output is supplied in the RX write-back descriptor:
1639  *     - 32-bit result of the Microsoft RSS hash function,
1640  *     - 4-bit RSS type field.
1641  */
1642
1643 /*
1644  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1645  * Used as the default key.
1646  */
1647 static uint8_t rss_intel_key[40] = {
1648         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1649         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1650         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1651         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1652         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1653 };
1654
1655 static void
1656 igb_rss_disable(struct rte_eth_dev *dev)
1657 {
1658         struct e1000_hw *hw;
1659         uint32_t mrqc;
1660
1661         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1662         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1663         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1664         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1665 }
1666
1667 static void
1668 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1669 {
1670         uint8_t  *hash_key;
1671         uint32_t rss_key;
1672         uint32_t mrqc;
1673         uint64_t rss_hf;
1674         uint16_t i;
1675
1676         hash_key = rss_conf->rss_key;
1677         if (hash_key != NULL) {
1678                 /* Fill in RSS hash key */
1679                 for (i = 0; i < 10; i++) {
1680                         rss_key  = hash_key[(i * 4)];
1681                         rss_key |= hash_key[(i * 4) + 1] << 8;
1682                         rss_key |= hash_key[(i * 4) + 2] << 16;
1683                         rss_key |= hash_key[(i * 4) + 3] << 24;
1684                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1685                 }
1686         }
1687
1688         /* Set configured hashing protocols in MRQC register */
1689         rss_hf = rss_conf->rss_hf;
1690         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1691         if (rss_hf & ETH_RSS_IPV4)
1692                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1693         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1694                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1695         if (rss_hf & ETH_RSS_IPV6)
1696                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1697         if (rss_hf & ETH_RSS_IPV6_EX)
1698                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1699         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1700                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1701         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1702                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1703         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1704                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1705         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1706                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1707         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1708                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1709         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1710 }
1711
1712 int
1713 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1714                         struct rte_eth_rss_conf *rss_conf)
1715 {
1716         struct e1000_hw *hw;
1717         uint32_t mrqc;
1718         uint64_t rss_hf;
1719
1720         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721
1722         /*
1723          * Before changing anything, first check that the update RSS operation
1724          * does not attempt to disable RSS, if RSS was enabled at
1725          * initialization time, or does not attempt to enable RSS, if RSS was
1726          * disabled at initialization time.
1727          */
1728         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1729         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1730         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1731                 if (rss_hf != 0) /* Enable RSS */
1732                         return -(EINVAL);
1733                 return 0; /* Nothing to do */
1734         }
1735         /* RSS enabled */
1736         if (rss_hf == 0) /* Disable RSS */
1737                 return -(EINVAL);
1738         igb_hw_rss_hash_set(hw, rss_conf);
1739         return 0;
1740 }
1741
1742 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1743                               struct rte_eth_rss_conf *rss_conf)
1744 {
1745         struct e1000_hw *hw;
1746         uint8_t *hash_key;
1747         uint32_t rss_key;
1748         uint32_t mrqc;
1749         uint64_t rss_hf;
1750         uint16_t i;
1751
1752         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1753         hash_key = rss_conf->rss_key;
1754         if (hash_key != NULL) {
1755                 /* Return RSS hash key */
1756                 for (i = 0; i < 10; i++) {
1757                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1758                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1759                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1760                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1761                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1762                 }
1763         }
1764
1765         /* Get RSS functions configured in MRQC register */
1766         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1767         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1768                 rss_conf->rss_hf = 0;
1769                 return 0;
1770         }
1771         rss_hf = 0;
1772         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1773                 rss_hf |= ETH_RSS_IPV4;
1774         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1775                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1776         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1777                 rss_hf |= ETH_RSS_IPV6;
1778         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1779                 rss_hf |= ETH_RSS_IPV6_EX;
1780         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1781                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1782         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1783                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1784         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1785                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1786         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1787                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1788         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1789                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1790         rss_conf->rss_hf = rss_hf;
1791         return 0;
1792 }
1793
1794 static void
1795 igb_rss_configure(struct rte_eth_dev *dev)
1796 {
1797         struct rte_eth_rss_conf rss_conf;
1798         struct e1000_hw *hw;
1799         uint32_t shift;
1800         uint16_t i;
1801
1802         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803
1804         /* Fill in redirection table. */
1805         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1806         for (i = 0; i < 128; i++) {
1807                 union e1000_reta {
1808                         uint32_t dword;
1809                         uint8_t  bytes[4];
1810                 } reta;
1811                 uint8_t q_idx;
1812
1813                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1814                                    i % dev->data->nb_rx_queues : 0);
1815                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1816                 if ((i & 3) == 3)
1817                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1818         }
1819
1820         /*
1821          * Configure the RSS key and the RSS protocols used to compute
1822          * the RSS hash of input packets.
1823          */
1824         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1825         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1826                 igb_rss_disable(dev);
1827                 return;
1828         }
1829         if (rss_conf.rss_key == NULL)
1830                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1831         igb_hw_rss_hash_set(hw, &rss_conf);
1832 }
1833
1834 /*
1835  * Check if the mac type support VMDq or not.
1836  * Return 1 if it supports, otherwise, return 0.
1837  */
1838 static int
1839 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1840 {
1841         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1842
1843         switch (hw->mac.type) {
1844         case e1000_82576:
1845         case e1000_82580:
1846         case e1000_i350:
1847                 return 1;
1848         case e1000_82540:
1849         case e1000_82541:
1850         case e1000_82542:
1851         case e1000_82543:
1852         case e1000_82544:
1853         case e1000_82545:
1854         case e1000_82546:
1855         case e1000_82547:
1856         case e1000_82571:
1857         case e1000_82572:
1858         case e1000_82573:
1859         case e1000_82574:
1860         case e1000_82583:
1861         case e1000_i210:
1862         case e1000_i211:
1863         default:
1864                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1865                 return 0;
1866         }
1867 }
1868
1869 static int
1870 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1871 {
1872         struct rte_eth_vmdq_rx_conf *cfg;
1873         struct e1000_hw *hw;
1874         uint32_t mrqc, vt_ctl, vmolr, rctl;
1875         int i;
1876
1877         PMD_INIT_FUNC_TRACE();
1878
1879         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1881
1882         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1883         if (igb_is_vmdq_supported(dev) == 0)
1884                 return -1;
1885
1886         igb_rss_disable(dev);
1887
1888         /* RCTL: eanble VLAN filter */
1889         rctl = E1000_READ_REG(hw, E1000_RCTL);
1890         rctl |= E1000_RCTL_VFE;
1891         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1892
1893         /* MRQC: enable vmdq */
1894         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1895         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1896         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1897
1898         /* VTCTL:  pool selection according to VLAN tag */
1899         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1900         if (cfg->enable_default_pool)
1901                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1902         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1903         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1904
1905         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1906                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1907                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1908                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1909                         E1000_VMOLR_MPME);
1910
1911                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1912                         vmolr |= E1000_VMOLR_AUPE;
1913                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1914                         vmolr |= E1000_VMOLR_ROMPE;
1915                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1916                         vmolr |= E1000_VMOLR_ROPE;
1917                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1918                         vmolr |= E1000_VMOLR_BAM;
1919                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1920                         vmolr |= E1000_VMOLR_MPME;
1921
1922                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1923         }
1924
1925         /*
1926          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1927          * Both 82576 and 82580 support it
1928          */
1929         if (hw->mac.type != e1000_i350) {
1930                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1931                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1932                         vmolr |= E1000_VMOLR_STRVLAN;
1933                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1934                 }
1935         }
1936
1937         /* VFTA - enable all vlan filters */
1938         for (i = 0; i < IGB_VFTA_SIZE; i++)
1939                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1940
1941         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1942         if (hw->mac.type != e1000_82580)
1943                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1944
1945         /*
1946          * RAH/RAL - allow pools to read specific mac addresses
1947          * In this case, all pools should be able to read from mac addr 0
1948          */
1949         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1950         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1951
1952         /* VLVF: set up filters for vlan tags as configured */
1953         for (i = 0; i < cfg->nb_pool_maps; i++) {
1954                 /* set vlan id in VF register and set the valid bit */
1955                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1956                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1957                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1958                         E1000_VLVF_POOLSEL_MASK)));
1959         }
1960
1961         E1000_WRITE_FLUSH(hw);
1962
1963         return 0;
1964 }
1965
1966
1967 /*********************************************************************
1968  *
1969  *  Enable receive unit.
1970  *
1971  **********************************************************************/
1972
1973 static int
1974 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1975 {
1976         struct igb_rx_entry *rxe = rxq->sw_ring;
1977         uint64_t dma_addr;
1978         unsigned i;
1979
1980         /* Initialize software ring entries. */
1981         for (i = 0; i < rxq->nb_rx_desc; i++) {
1982                 volatile union e1000_adv_rx_desc *rxd;
1983                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1984
1985                 if (mbuf == NULL) {
1986                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1987                                      "queue_id=%hu", rxq->queue_id);
1988                         return (-ENOMEM);
1989                 }
1990                 dma_addr =
1991                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1992                 rxd = &rxq->rx_ring[i];
1993                 rxd->read.hdr_addr = 0;
1994                 rxd->read.pkt_addr = dma_addr;
1995                 rxe[i].mbuf = mbuf;
1996         }
1997
1998         return 0;
1999 }
2000
2001 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2002 static int
2003 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2004 {
2005         struct e1000_hw *hw =
2006                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2007         uint32_t mrqc;
2008
2009         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2010                 /*
2011                  * SRIOV active scheme
2012                  * FIXME if support RSS together with VMDq & SRIOV
2013                  */
2014                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2015                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2016                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2017                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2018         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2019                 /*
2020                  * SRIOV inactive scheme
2021                  */
2022                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2023                         case ETH_MQ_RX_RSS:
2024                                 igb_rss_configure(dev);
2025                                 break;
2026                         case ETH_MQ_RX_VMDQ_ONLY:
2027                                 /*Configure general VMDQ only RX parameters*/
2028                                 igb_vmdq_rx_hw_configure(dev);
2029                                 break;
2030                         case ETH_MQ_RX_NONE:
2031                                 /* if mq_mode is none, disable rss mode.*/
2032                         default:
2033                                 igb_rss_disable(dev);
2034                                 break;
2035                 }
2036         }
2037
2038         return 0;
2039 }
2040
2041 int
2042 eth_igb_rx_init(struct rte_eth_dev *dev)
2043 {
2044         struct e1000_hw     *hw;
2045         struct igb_rx_queue *rxq;
2046         uint32_t rctl;
2047         uint32_t rxcsum;
2048         uint32_t srrctl;
2049         uint16_t buf_size;
2050         uint16_t rctl_bsize;
2051         uint16_t i;
2052         int ret;
2053
2054         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2055         srrctl = 0;
2056
2057         /*
2058          * Make sure receives are disabled while setting
2059          * up the descriptor ring.
2060          */
2061         rctl = E1000_READ_REG(hw, E1000_RCTL);
2062         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2063
2064         /*
2065          * Configure support of jumbo frames, if any.
2066          */
2067         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2068                 rctl |= E1000_RCTL_LPE;
2069
2070                 /*
2071                  * Set maximum packet length by default, and might be updated
2072                  * together with enabling/disabling dual VLAN.
2073                  */
2074                 E1000_WRITE_REG(hw, E1000_RLPML,
2075                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2076                                                 VLAN_TAG_SIZE);
2077         } else
2078                 rctl &= ~E1000_RCTL_LPE;
2079
2080         /* Configure and enable each RX queue. */
2081         rctl_bsize = 0;
2082         dev->rx_pkt_burst = eth_igb_recv_pkts;
2083         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2084                 uint64_t bus_addr;
2085                 uint32_t rxdctl;
2086
2087                 rxq = dev->data->rx_queues[i];
2088
2089                 /* Allocate buffers for descriptor rings and set up queue */
2090                 ret = igb_alloc_rx_queue_mbufs(rxq);
2091                 if (ret)
2092                         return ret;
2093
2094                 /*
2095                  * Reset crc_len in case it was changed after queue setup by a
2096                  *  call to configure
2097                  */
2098                 rxq->crc_len =
2099                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2100                                                         0 : ETHER_CRC_LEN);
2101
2102                 bus_addr = rxq->rx_ring_phys_addr;
2103                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2104                                 rxq->nb_rx_desc *
2105                                 sizeof(union e1000_adv_rx_desc));
2106                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2107                                 (uint32_t)(bus_addr >> 32));
2108                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2109
2110                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111
2112                 /*
2113                  * Configure RX buffer size.
2114                  */
2115                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2116                         RTE_PKTMBUF_HEADROOM);
2117                 if (buf_size >= 1024) {
2118                         /*
2119                          * Configure the BSIZEPACKET field of the SRRCTL
2120                          * register of the queue.
2121                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2122                          * If this field is equal to 0b, then RCTL.BSIZE
2123                          * determines the RX packet buffer size.
2124                          */
2125                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2126                                    E1000_SRRCTL_BSIZEPKT_MASK);
2127                         buf_size = (uint16_t) ((srrctl &
2128                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2129                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2130
2131                         /* It adds dual VLAN length for supporting dual VLAN */
2132                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2133                                                 2 * VLAN_TAG_SIZE) > buf_size){
2134                                 if (!dev->data->scattered_rx)
2135                                         PMD_INIT_LOG(DEBUG,
2136                                                      "forcing scatter mode");
2137                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2138                                 dev->data->scattered_rx = 1;
2139                         }
2140                 } else {
2141                         /*
2142                          * Use BSIZE field of the device RCTL register.
2143                          */
2144                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2145                                 rctl_bsize = buf_size;
2146                         if (!dev->data->scattered_rx)
2147                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2148                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2149                         dev->data->scattered_rx = 1;
2150                 }
2151
2152                 /* Set if packets are dropped when no descriptors available */
2153                 if (rxq->drop_en)
2154                         srrctl |= E1000_SRRCTL_DROP_EN;
2155
2156                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2157
2158                 /* Enable this RX queue. */
2159                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2160                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2161                 rxdctl &= 0xFFF00000;
2162                 rxdctl |= (rxq->pthresh & 0x1F);
2163                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2164                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2165                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2166         }
2167
2168         if (dev->data->dev_conf.rxmode.enable_scatter) {
2169                 if (!dev->data->scattered_rx)
2170                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2171                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2172                 dev->data->scattered_rx = 1;
2173         }
2174
2175         /*
2176          * Setup BSIZE field of RCTL register, if needed.
2177          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2178          * register, since the code above configures the SRRCTL register of
2179          * the RX queue in such a case.
2180          * All configurable sizes are:
2181          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2182          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2183          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2184          *  2048: rctl |= E1000_RCTL_SZ_2048;
2185          *  1024: rctl |= E1000_RCTL_SZ_1024;
2186          *   512: rctl |= E1000_RCTL_SZ_512;
2187          *   256: rctl |= E1000_RCTL_SZ_256;
2188          */
2189         if (rctl_bsize > 0) {
2190                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2191                         rctl |= E1000_RCTL_SZ_512;
2192                 else /* 256 <= buf_size < 512 - use 256 */
2193                         rctl |= E1000_RCTL_SZ_256;
2194         }
2195
2196         /*
2197          * Configure RSS if device configured with multiple RX queues.
2198          */
2199         igb_dev_mq_rx_configure(dev);
2200
2201         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2202         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2203
2204         /*
2205          * Setup the Checksum Register.
2206          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2207          */
2208         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2209         rxcsum |= E1000_RXCSUM_PCSD;
2210
2211         /* Enable both L3/L4 rx checksum offload */
2212         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2213                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2214         else
2215                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2216         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2217
2218         /* Setup the Receive Control Register. */
2219         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2220                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2221
2222                 /* set STRCRC bit in all queues */
2223                 if (hw->mac.type == e1000_i350 ||
2224                     hw->mac.type == e1000_i210 ||
2225                     hw->mac.type == e1000_i211 ||
2226                     hw->mac.type == e1000_i354) {
2227                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2228                                 rxq = dev->data->rx_queues[i];
2229                                 uint32_t dvmolr = E1000_READ_REG(hw,
2230                                         E1000_DVMOLR(rxq->reg_idx));
2231                                 dvmolr |= E1000_DVMOLR_STRCRC;
2232                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2233                         }
2234                 }
2235         } else {
2236                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2237
2238                 /* clear STRCRC bit in all queues */
2239                 if (hw->mac.type == e1000_i350 ||
2240                     hw->mac.type == e1000_i210 ||
2241                     hw->mac.type == e1000_i211 ||
2242                     hw->mac.type == e1000_i354) {
2243                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2244                                 rxq = dev->data->rx_queues[i];
2245                                 uint32_t dvmolr = E1000_READ_REG(hw,
2246                                         E1000_DVMOLR(rxq->reg_idx));
2247                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2248                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2249                         }
2250                 }
2251         }
2252
2253         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2254         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2255                 E1000_RCTL_RDMTS_HALF |
2256                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2257
2258         /* Make sure VLAN Filters are off. */
2259         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2260                 rctl &= ~E1000_RCTL_VFE;
2261         /* Don't store bad packets. */
2262         rctl &= ~E1000_RCTL_SBP;
2263
2264         /* Enable Receives. */
2265         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2266
2267         /*
2268          * Setup the HW Rx Head and Tail Descriptor Pointers.
2269          * This needs to be done after enable.
2270          */
2271         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2272                 rxq = dev->data->rx_queues[i];
2273                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2274                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2275         }
2276
2277         return 0;
2278 }
2279
2280 /*********************************************************************
2281  *
2282  *  Enable transmit unit.
2283  *
2284  **********************************************************************/
2285 void
2286 eth_igb_tx_init(struct rte_eth_dev *dev)
2287 {
2288         struct e1000_hw     *hw;
2289         struct igb_tx_queue *txq;
2290         uint32_t tctl;
2291         uint32_t txdctl;
2292         uint16_t i;
2293
2294         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2295
2296         /* Setup the Base and Length of the Tx Descriptor Rings. */
2297         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2298                 uint64_t bus_addr;
2299                 txq = dev->data->tx_queues[i];
2300                 bus_addr = txq->tx_ring_phys_addr;
2301
2302                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2303                                 txq->nb_tx_desc *
2304                                 sizeof(union e1000_adv_tx_desc));
2305                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2306                                 (uint32_t)(bus_addr >> 32));
2307                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2308
2309                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2310                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2311                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2312
2313                 /* Setup Transmit threshold registers. */
2314                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2315                 txdctl |= txq->pthresh & 0x1F;
2316                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2317                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2318                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2319                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2320         }
2321
2322         /* Program the Transmit Control Register. */
2323         tctl = E1000_READ_REG(hw, E1000_TCTL);
2324         tctl &= ~E1000_TCTL_CT;
2325         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2326                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2327
2328         e1000_config_collision_dist(hw);
2329
2330         /* This write will effectively turn on the transmit unit. */
2331         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2332 }
2333
2334 /*********************************************************************
2335  *
2336  *  Enable VF receive unit.
2337  *
2338  **********************************************************************/
2339 int
2340 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2341 {
2342         struct e1000_hw     *hw;
2343         struct igb_rx_queue *rxq;
2344         uint32_t srrctl;
2345         uint16_t buf_size;
2346         uint16_t rctl_bsize;
2347         uint16_t i;
2348         int ret;
2349
2350         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2351
2352         /* setup MTU */
2353         e1000_rlpml_set_vf(hw,
2354                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2355                 VLAN_TAG_SIZE));
2356
2357         /* Configure and enable each RX queue. */
2358         rctl_bsize = 0;
2359         dev->rx_pkt_burst = eth_igb_recv_pkts;
2360         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2361                 uint64_t bus_addr;
2362                 uint32_t rxdctl;
2363
2364                 rxq = dev->data->rx_queues[i];
2365
2366                 /* Allocate buffers for descriptor rings and set up queue */
2367                 ret = igb_alloc_rx_queue_mbufs(rxq);
2368                 if (ret)
2369                         return ret;
2370
2371                 bus_addr = rxq->rx_ring_phys_addr;
2372                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2373                                 rxq->nb_rx_desc *
2374                                 sizeof(union e1000_adv_rx_desc));
2375                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2376                                 (uint32_t)(bus_addr >> 32));
2377                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2378
2379                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2380
2381                 /*
2382                  * Configure RX buffer size.
2383                  */
2384                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2385                         RTE_PKTMBUF_HEADROOM);
2386                 if (buf_size >= 1024) {
2387                         /*
2388                          * Configure the BSIZEPACKET field of the SRRCTL
2389                          * register of the queue.
2390                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2391                          * If this field is equal to 0b, then RCTL.BSIZE
2392                          * determines the RX packet buffer size.
2393                          */
2394                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2395                                    E1000_SRRCTL_BSIZEPKT_MASK);
2396                         buf_size = (uint16_t) ((srrctl &
2397                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2398                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2399
2400                         /* It adds dual VLAN length for supporting dual VLAN */
2401                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2402                                                 2 * VLAN_TAG_SIZE) > buf_size){
2403                                 if (!dev->data->scattered_rx)
2404                                         PMD_INIT_LOG(DEBUG,
2405                                                      "forcing scatter mode");
2406                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2407                                 dev->data->scattered_rx = 1;
2408                         }
2409                 } else {
2410                         /*
2411                          * Use BSIZE field of the device RCTL register.
2412                          */
2413                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2414                                 rctl_bsize = buf_size;
2415                         if (!dev->data->scattered_rx)
2416                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2417                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2418                         dev->data->scattered_rx = 1;
2419                 }
2420
2421                 /* Set if packets are dropped when no descriptors available */
2422                 if (rxq->drop_en)
2423                         srrctl |= E1000_SRRCTL_DROP_EN;
2424
2425                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2426
2427                 /* Enable this RX queue. */
2428                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2429                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2430                 rxdctl &= 0xFFF00000;
2431                 rxdctl |= (rxq->pthresh & 0x1F);
2432                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2433                 if (hw->mac.type == e1000_vfadapt) {
2434                         /*
2435                          * Workaround of 82576 VF Erratum
2436                          * force set WTHRESH to 1
2437                          * to avoid Write-Back not triggered sometimes
2438                          */
2439                         rxdctl |= 0x10000;
2440                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2441                 }
2442                 else
2443                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2444                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2445         }
2446
2447         if (dev->data->dev_conf.rxmode.enable_scatter) {
2448                 if (!dev->data->scattered_rx)
2449                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2450                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2451                 dev->data->scattered_rx = 1;
2452         }
2453
2454         /*
2455          * Setup the HW Rx Head and Tail Descriptor Pointers.
2456          * This needs to be done after enable.
2457          */
2458         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2459                 rxq = dev->data->rx_queues[i];
2460                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2461                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2462         }
2463
2464         return 0;
2465 }
2466
2467 /*********************************************************************
2468  *
2469  *  Enable VF transmit unit.
2470  *
2471  **********************************************************************/
2472 void
2473 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2474 {
2475         struct e1000_hw     *hw;
2476         struct igb_tx_queue *txq;
2477         uint32_t txdctl;
2478         uint16_t i;
2479
2480         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2481
2482         /* Setup the Base and Length of the Tx Descriptor Rings. */
2483         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2484                 uint64_t bus_addr;
2485
2486                 txq = dev->data->tx_queues[i];
2487                 bus_addr = txq->tx_ring_phys_addr;
2488                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2489                                 txq->nb_tx_desc *
2490                                 sizeof(union e1000_adv_tx_desc));
2491                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2492                                 (uint32_t)(bus_addr >> 32));
2493                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2494
2495                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2496                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2497                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2498
2499                 /* Setup Transmit threshold registers. */
2500                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2501                 txdctl |= txq->pthresh & 0x1F;
2502                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2503                 if (hw->mac.type == e1000_82576) {
2504                         /*
2505                          * Workaround of 82576 VF Erratum
2506                          * force set WTHRESH to 1
2507                          * to avoid Write-Back not triggered sometimes
2508                          */
2509                         txdctl |= 0x10000;
2510                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2511                 }
2512                 else
2513                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2514                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2515                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2516         }
2517
2518 }