71f2ef441e93d41b980eac6128215bcef47df663
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/queue.h>
36
37 #include <endian.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <stdarg.h>
44 #include <inttypes.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
49 #include <rte_log.h>
50 #include <rte_debug.h>
51 #include <rte_pci.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
57 #include <rte_eal.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
69 #include <rte_udp.h>
70 #include <rte_tcp.h>
71 #include <rte_sctp.h>
72 #include <rte_string_fns.h>
73
74 #include "e1000_logs.h"
75 #include "e1000/e1000_api.h"
76 #include "e1000_ethdev.h"
77
78 static inline struct rte_mbuf *
79 rte_rxmbuf_alloc(struct rte_mempool *mp)
80 {
81         struct rte_mbuf *m;
82
83         m = __rte_mbuf_raw_alloc(mp);
84         __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
85         return (m);
86 }
87
88 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
89         (uint64_t) ((mb)->buf_physaddr +                   \
90                         (uint64_t) ((char *)((mb)->pkt.data) -     \
91                                 (char *)(mb)->buf_addr))
92
93 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
94         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
95
96 /**
97  * Structure associated with each descriptor of the RX ring of a RX queue.
98  */
99 struct igb_rx_entry {
100         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
101 };
102
103 /**
104  * Structure associated with each descriptor of the TX ring of a TX queue.
105  */
106 struct igb_tx_entry {
107         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
108         uint16_t next_id; /**< Index of next descriptor in ring. */
109         uint16_t last_id; /**< Index of last scattered descriptor. */
110 };
111
112 /**
113  * Structure associated with each RX queue.
114  */
115 struct igb_rx_queue {
116         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
117         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
118         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
119         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
120         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
121         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
122         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
123         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
124         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
125         uint16_t            rx_tail;    /**< current value of RDT register. */
126         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
127         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
128         uint16_t            queue_id;   /**< RX queue index. */
129         uint16_t            reg_idx;    /**< RX queue register index. */
130         uint8_t             port_id;    /**< Device port identifier. */
131         uint8_t             pthresh;    /**< Prefetch threshold register. */
132         uint8_t             hthresh;    /**< Host threshold register. */
133         uint8_t             wthresh;    /**< Write-back threshold register. */
134         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
135         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
136 };
137
138 /**
139  * Hardware context number
140  */
141 enum igb_advctx_num {
142         IGB_CTX_0    = 0, /**< CTX0    */
143         IGB_CTX_1    = 1, /**< CTX1    */
144         IGB_CTX_NUM  = 2, /**< CTX_NUM */
145 };
146
147 /**
148  * Strucutre to check if new context need be built
149  */
150 struct igb_advctx_info {
151         uint16_t flags;           /**< ol_flags related to context build. */
152         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
153         union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
154 };
155
156 /**
157  * Structure associated with each TX queue.
158  */
159 struct igb_tx_queue {
160         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
161         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
162         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
163         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
164         uint32_t               txd_type;      /**< Device-specific TXD type */
165         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
166         uint16_t               tx_tail; /**< Current value of TDT register. */
167         uint16_t               tx_head;
168         /**< Index of first used TX descriptor. */
169         uint16_t               queue_id; /**< TX queue index. */
170         uint16_t               reg_idx;  /**< TX queue register index. */
171         uint8_t                port_id;  /**< Device port identifier. */
172         uint8_t                pthresh;  /**< Prefetch threshold register. */
173         uint8_t                hthresh;  /**< Host threshold register. */
174         uint8_t                wthresh;  /**< Write-back threshold register. */
175         uint32_t               ctx_curr;
176         /**< Current used hardware descriptor. */
177         uint32_t               ctx_start;
178         /**< Start context position for transmit queue. */
179         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
180         /**< Hardware context history.*/
181 };
182
183 #if 1
184 #define RTE_PMD_USE_PREFETCH
185 #endif
186
187 #ifdef RTE_PMD_USE_PREFETCH
188 #define rte_igb_prefetch(p)     rte_prefetch0(p)
189 #else
190 #define rte_igb_prefetch(p)     do {} while(0)
191 #endif
192
193 #ifdef RTE_PMD_PACKET_PREFETCH
194 #define rte_packet_prefetch(p) rte_prefetch1(p)
195 #else
196 #define rte_packet_prefetch(p)  do {} while(0)
197 #endif
198
199 /*********************************************************************
200  *
201  *  TX function
202  *
203  **********************************************************************/
204
205 /*
206  * Advanced context descriptor are almost same between igb/ixgbe
207  * This is a separate function, looking for optimization opportunity here
208  * Rework required to go with the pre-defined values.
209  */
210
211 static inline void
212 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
213                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
214                 uint16_t ol_flags, uint32_t vlan_macip_lens)
215 {
216         uint32_t type_tucmd_mlhl;
217         uint32_t mss_l4len_idx;
218         uint32_t ctx_idx, ctx_curr;
219         uint32_t cmp_mask;
220
221         ctx_curr = txq->ctx_curr;
222         ctx_idx = ctx_curr + txq->ctx_start;
223
224         cmp_mask = 0;
225         type_tucmd_mlhl = 0;
226
227         if (ol_flags & PKT_TX_VLAN_PKT) {
228                 cmp_mask |= TX_VLAN_CMP_MASK;
229         }
230
231         if (ol_flags & PKT_TX_IP_CKSUM) {
232                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
233                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
234         }
235
236         /* Specify which HW CTX to upload. */
237         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
238         switch (ol_flags & PKT_TX_L4_MASK) {
239         case PKT_TX_UDP_CKSUM:
240                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
241                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
242                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
243                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
244                 break;
245         case PKT_TX_TCP_CKSUM:
246                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
247                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
248                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
249                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
250                 break;
251         case PKT_TX_SCTP_CKSUM:
252                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
253                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
254                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
255                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
256                 break;
257         default:
258                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
259                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
260                 break;
261         }
262
263         txq->ctx_cache[ctx_curr].flags           = ol_flags;
264         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
265         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
266                 vlan_macip_lens & cmp_mask;
267
268         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
269         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
270         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
271         ctx_txd->seqnum_seed     = 0;
272 }
273
274 /*
275  * Check which hardware context can be used. Use the existing match
276  * or create a new context descriptor.
277  */
278 static inline uint32_t
279 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
280                 uint32_t vlan_macip_lens)
281 {
282         /* If match with the current context */
283         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
284                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
285                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
286                         return txq->ctx_curr;
287         }
288
289         /* If match with the second context */
290         txq->ctx_curr ^= 1;
291         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
292                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
293                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
294                         return txq->ctx_curr;
295         }
296
297         /* Mismatch, use the previous context */
298         return (IGB_CTX_NUM);
299 }
300
301 static inline uint32_t
302 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
303 {
304         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
305         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
306         uint32_t tmp;
307
308         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
309         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
310         return tmp;
311 }
312
313 static inline uint32_t
314 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
315 {
316         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
317         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
318 }
319
320 uint16_t
321 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
322                uint16_t nb_pkts)
323 {
324         struct igb_tx_queue *txq;
325         struct igb_tx_entry *sw_ring;
326         struct igb_tx_entry *txe, *txn;
327         volatile union e1000_adv_tx_desc *txr;
328         volatile union e1000_adv_tx_desc *txd;
329         struct rte_mbuf     *tx_pkt;
330         struct rte_mbuf     *m_seg;
331         uint64_t buf_dma_addr;
332         uint32_t olinfo_status;
333         uint32_t cmd_type_len;
334         uint32_t pkt_len;
335         uint16_t slen;
336         uint16_t ol_flags;
337         uint16_t tx_end;
338         uint16_t tx_id;
339         uint16_t tx_last;
340         uint16_t nb_tx;
341         uint16_t tx_ol_req;
342         uint32_t new_ctx = 0;
343         uint32_t ctx = 0;
344         uint32_t vlan_macip_lens;
345
346         txq = tx_queue;
347         sw_ring = txq->sw_ring;
348         txr     = txq->tx_ring;
349         tx_id   = txq->tx_tail;
350         txe = &sw_ring[tx_id];
351
352         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
353                 tx_pkt = *tx_pkts++;
354                 pkt_len = tx_pkt->pkt.pkt_len;
355
356                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
357
358                 /*
359                  * The number of descriptors that must be allocated for a
360                  * packet is the number of segments of that packet, plus 1
361                  * Context Descriptor for the VLAN Tag Identifier, if any.
362                  * Determine the last TX descriptor to allocate in the TX ring
363                  * for the packet, starting from the current position (tx_id)
364                  * in the ring.
365                  */
366                 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
367
368                 ol_flags = tx_pkt->ol_flags;
369                 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
370                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
371
372                 /* If a Context Descriptor need be built . */
373                 if (tx_ol_req) {
374                         ctx = what_advctx_update(txq, tx_ol_req,
375                                 vlan_macip_lens);
376                         /* Only allocate context descriptor if required*/
377                         new_ctx = (ctx == IGB_CTX_NUM);
378                         ctx = txq->ctx_curr;
379                         tx_last = (uint16_t) (tx_last + new_ctx);
380                 }
381                 if (tx_last >= txq->nb_tx_desc)
382                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
383
384                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
385                            " tx_first=%u tx_last=%u\n",
386                            (unsigned) txq->port_id,
387                            (unsigned) txq->queue_id,
388                            (unsigned) pkt_len,
389                            (unsigned) tx_id,
390                            (unsigned) tx_last);
391
392                 /*
393                  * Check if there are enough free descriptors in the TX ring
394                  * to transmit the next packet.
395                  * This operation is based on the two following rules:
396                  *
397                  *   1- Only check that the last needed TX descriptor can be
398                  *      allocated (by construction, if that descriptor is free,
399                  *      all intermediate ones are also free).
400                  *
401                  *      For this purpose, the index of the last TX descriptor
402                  *      used for a packet (the "last descriptor" of a packet)
403                  *      is recorded in the TX entries (the last one included)
404                  *      that are associated with all TX descriptors allocated
405                  *      for that packet.
406                  *
407                  *   2- Avoid to allocate the last free TX descriptor of the
408                  *      ring, in order to never set the TDT register with the
409                  *      same value stored in parallel by the NIC in the TDH
410                  *      register, which makes the TX engine of the NIC enter
411                  *      in a deadlock situation.
412                  *
413                  *      By extension, avoid to allocate a free descriptor that
414                  *      belongs to the last set of free descriptors allocated
415                  *      to the same packet previously transmitted.
416                  */
417
418                 /*
419                  * The "last descriptor" of the previously sent packet, if any,
420                  * which used the last descriptor to allocate.
421                  */
422                 tx_end = sw_ring[tx_last].last_id;
423
424                 /*
425                  * The next descriptor following that "last descriptor" in the
426                  * ring.
427                  */
428                 tx_end = sw_ring[tx_end].next_id;
429
430                 /*
431                  * The "last descriptor" associated with that next descriptor.
432                  */
433                 tx_end = sw_ring[tx_end].last_id;
434
435                 /*
436                  * Check that this descriptor is free.
437                  */
438                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
439                         if (nb_tx == 0)
440                                 return (0);
441                         goto end_of_tx;
442                 }
443
444                 /*
445                  * Set common flags of all TX Data Descriptors.
446                  *
447                  * The following bits must be set in all Data Descriptors:
448                  *   - E1000_ADVTXD_DTYP_DATA
449                  *   - E1000_ADVTXD_DCMD_DEXT
450                  *
451                  * The following bits must be set in the first Data Descriptor
452                  * and are ignored in the other ones:
453                  *   - E1000_ADVTXD_DCMD_IFCS
454                  *   - E1000_ADVTXD_MAC_1588
455                  *   - E1000_ADVTXD_DCMD_VLE
456                  *
457                  * The following bits must only be set in the last Data
458                  * Descriptor:
459                  *   - E1000_TXD_CMD_EOP
460                  *
461                  * The following bits can be set in any Data Descriptor, but
462                  * are only set in the last Data Descriptor:
463                  *   - E1000_TXD_CMD_RS
464                  */
465                 cmd_type_len = txq->txd_type |
466                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
467                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
468 #if defined(RTE_LIBRTE_IEEE1588)
469                 if (ol_flags & PKT_TX_IEEE1588_TMST)
470                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
471 #endif
472                 if (tx_ol_req) {
473                         /* Setup TX Advanced context descriptor if required */
474                         if (new_ctx) {
475                                 volatile struct e1000_adv_tx_context_desc *
476                                     ctx_txd;
477
478                                 ctx_txd = (volatile struct
479                                     e1000_adv_tx_context_desc *)
480                                     &txr[tx_id];
481
482                                 txn = &sw_ring[txe->next_id];
483                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
484
485                                 if (txe->mbuf != NULL) {
486                                         rte_pktmbuf_free_seg(txe->mbuf);
487                                         txe->mbuf = NULL;
488                                 }
489
490                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
491                                     vlan_macip_lens);
492
493                                 txe->last_id = tx_last;
494                                 tx_id = txe->next_id;
495                                 txe = txn;
496                         }
497
498                         /* Setup the TX Advanced Data Descriptor */
499                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
500                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
501                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
502                 }
503
504                 m_seg = tx_pkt;
505                 do {
506                         txn = &sw_ring[txe->next_id];
507                         txd = &txr[tx_id];
508
509                         if (txe->mbuf != NULL)
510                                 rte_pktmbuf_free_seg(txe->mbuf);
511                         txe->mbuf = m_seg;
512
513                         /*
514                          * Set up transmit descriptor.
515                          */
516                         slen = (uint16_t) m_seg->pkt.data_len;
517                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
518                         txd->read.buffer_addr =
519                                 rte_cpu_to_le_64(buf_dma_addr);
520                         txd->read.cmd_type_len =
521                                 rte_cpu_to_le_32(cmd_type_len | slen);
522                         txd->read.olinfo_status =
523                                 rte_cpu_to_le_32(olinfo_status);
524                         txe->last_id = tx_last;
525                         tx_id = txe->next_id;
526                         txe = txn;
527                         m_seg = m_seg->pkt.next;
528                 } while (m_seg != NULL);
529
530                 /*
531                  * The last packet data descriptor needs End Of Packet (EOP)
532                  * and Report Status (RS).
533                  */
534                 txd->read.cmd_type_len |=
535                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
536         }
537  end_of_tx:
538         rte_wmb();
539
540         /*
541          * Set the Transmit Descriptor Tail (TDT).
542          */
543         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
544         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
545                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
546                    (unsigned) tx_id, (unsigned) nb_tx);
547         txq->tx_tail = tx_id;
548
549         return (nb_tx);
550 }
551
552 /*********************************************************************
553  *
554  *  RX functions
555  *
556  **********************************************************************/
557 static inline uint16_t
558 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
559 {
560         uint16_t pkt_flags;
561
562         static uint16_t ip_pkt_types_map[16] = {
563                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
564                 PKT_RX_IPV6_HDR, 0, 0, 0,
565                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
566                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
567         };
568
569 #if defined(RTE_LIBRTE_IEEE1588)
570         static uint32_t ip_pkt_etqf_map[8] = {
571                 0, 0, 0, PKT_RX_IEEE1588_PTP,
572                 0, 0, 0, 0,
573         };
574
575         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
576                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
577                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
578 #else
579         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
580                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
581 #endif
582         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
583                                                 0 : PKT_RX_RSS_HASH));
584 }
585
586 static inline uint16_t
587 rx_desc_status_to_pkt_flags(uint32_t rx_status)
588 {
589         uint16_t pkt_flags;
590
591         /* Check if VLAN present */
592         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
593                                                 PKT_RX_VLAN_PKT : 0);
594
595 #if defined(RTE_LIBRTE_IEEE1588)
596         if (rx_status & E1000_RXD_STAT_TMST)
597                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
598 #endif
599         return pkt_flags;
600 }
601
602 static inline uint16_t
603 rx_desc_error_to_pkt_flags(uint32_t rx_status)
604 {
605         /*
606          * Bit 30: IPE, IPv4 checksum error
607          * Bit 29: L4I, L4I integrity error
608          */
609
610         static uint16_t error_to_pkt_flags_map[4] = {
611                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
612                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
613         };
614         return error_to_pkt_flags_map[(rx_status >>
615                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
616 }
617
618 uint16_t
619 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
620                uint16_t nb_pkts)
621 {
622         struct igb_rx_queue *rxq;
623         volatile union e1000_adv_rx_desc *rx_ring;
624         volatile union e1000_adv_rx_desc *rxdp;
625         struct igb_rx_entry *sw_ring;
626         struct igb_rx_entry *rxe;
627         struct rte_mbuf *rxm;
628         struct rte_mbuf *nmb;
629         union e1000_adv_rx_desc rxd;
630         uint64_t dma_addr;
631         uint32_t staterr;
632         uint32_t hlen_type_rss;
633         uint16_t pkt_len;
634         uint16_t rx_id;
635         uint16_t nb_rx;
636         uint16_t nb_hold;
637         uint16_t pkt_flags;
638
639         nb_rx = 0;
640         nb_hold = 0;
641         rxq = rx_queue;
642         rx_id = rxq->rx_tail;
643         rx_ring = rxq->rx_ring;
644         sw_ring = rxq->sw_ring;
645         while (nb_rx < nb_pkts) {
646                 /*
647                  * The order of operations here is important as the DD status
648                  * bit must not be read after any other descriptor fields.
649                  * rx_ring and rxdp are pointing to volatile data so the order
650                  * of accesses cannot be reordered by the compiler. If they were
651                  * not volatile, they could be reordered which could lead to
652                  * using invalid descriptor fields when read from rxd.
653                  */
654                 rxdp = &rx_ring[rx_id];
655                 staterr = rxdp->wb.upper.status_error;
656                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
657                         break;
658                 rxd = *rxdp;
659
660                 /*
661                  * End of packet.
662                  *
663                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
664                  * likely to be invalid and to be dropped by the various
665                  * validation checks performed by the network stack.
666                  *
667                  * Allocate a new mbuf to replenish the RX ring descriptor.
668                  * If the allocation fails:
669                  *    - arrange for that RX descriptor to be the first one
670                  *      being parsed the next time the receive function is
671                  *      invoked [on the same queue].
672                  *
673                  *    - Stop parsing the RX ring and return immediately.
674                  *
675                  * This policy do not drop the packet received in the RX
676                  * descriptor for which the allocation of a new mbuf failed.
677                  * Thus, it allows that packet to be later retrieved if
678                  * mbuf have been freed in the mean time.
679                  * As a side effect, holding RX descriptors instead of
680                  * systematically giving them back to the NIC may lead to
681                  * RX ring exhaustion situations.
682                  * However, the NIC can gracefully prevent such situations
683                  * to happen by sending specific "back-pressure" flow control
684                  * frames to its peer(s).
685                  */
686                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
687                            "staterr=0x%x pkt_len=%u\n",
688                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
689                            (unsigned) rx_id, (unsigned) staterr,
690                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
691
692                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
693                 if (nmb == NULL) {
694                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
695                                    "queue_id=%u\n", (unsigned) rxq->port_id,
696                                    (unsigned) rxq->queue_id);
697                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
698                         break;
699                 }
700
701                 nb_hold++;
702                 rxe = &sw_ring[rx_id];
703                 rx_id++;
704                 if (rx_id == rxq->nb_rx_desc)
705                         rx_id = 0;
706
707                 /* Prefetch next mbuf while processing current one. */
708                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
709
710                 /*
711                  * When next RX descriptor is on a cache-line boundary,
712                  * prefetch the next 4 RX descriptors and the next 8 pointers
713                  * to mbufs.
714                  */
715                 if ((rx_id & 0x3) == 0) {
716                         rte_igb_prefetch(&rx_ring[rx_id]);
717                         rte_igb_prefetch(&sw_ring[rx_id]);
718                 }
719
720                 rxm = rxe->mbuf;
721                 rxe->mbuf = nmb;
722                 dma_addr =
723                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
724                 rxdp->read.hdr_addr = dma_addr;
725                 rxdp->read.pkt_addr = dma_addr;
726
727                 /*
728                  * Initialize the returned mbuf.
729                  * 1) setup generic mbuf fields:
730                  *    - number of segments,
731                  *    - next segment,
732                  *    - packet length,
733                  *    - RX port identifier.
734                  * 2) integrate hardware offload data, if any:
735                  *    - RSS flag & hash,
736                  *    - IP checksum flag,
737                  *    - VLAN TCI, if any,
738                  *    - error flags.
739                  */
740                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
741                                       rxq->crc_len);
742                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
743                 rte_packet_prefetch(rxm->pkt.data);
744                 rxm->pkt.nb_segs = 1;
745                 rxm->pkt.next = NULL;
746                 rxm->pkt.pkt_len = pkt_len;
747                 rxm->pkt.data_len = pkt_len;
748                 rxm->pkt.in_port = rxq->port_id;
749
750                 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
751                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
752                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
753                 rxm->pkt.vlan_macip.f.vlan_tci =
754                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
755
756                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
757                 pkt_flags = (uint16_t)(pkt_flags |
758                                 rx_desc_status_to_pkt_flags(staterr));
759                 pkt_flags = (uint16_t)(pkt_flags |
760                                 rx_desc_error_to_pkt_flags(staterr));
761                 rxm->ol_flags = pkt_flags;
762
763                 /*
764                  * Store the mbuf address into the next entry of the array
765                  * of returned packets.
766                  */
767                 rx_pkts[nb_rx++] = rxm;
768         }
769         rxq->rx_tail = rx_id;
770
771         /*
772          * If the number of free RX descriptors is greater than the RX free
773          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
774          * register.
775          * Update the RDT with the value of the last processed RX descriptor
776          * minus 1, to guarantee that the RDT register is never equal to the
777          * RDH register, which creates a "full" ring situtation from the
778          * hardware point of view...
779          */
780         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
781         if (nb_hold > rxq->rx_free_thresh) {
782                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
783                            "nb_hold=%u nb_rx=%u\n",
784                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
785                            (unsigned) rx_id, (unsigned) nb_hold,
786                            (unsigned) nb_rx);
787                 rx_id = (uint16_t) ((rx_id == 0) ?
788                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
789                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
790                 nb_hold = 0;
791         }
792         rxq->nb_rx_hold = nb_hold;
793         return (nb_rx);
794 }
795
796 uint16_t
797 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
798                          uint16_t nb_pkts)
799 {
800         struct igb_rx_queue *rxq;
801         volatile union e1000_adv_rx_desc *rx_ring;
802         volatile union e1000_adv_rx_desc *rxdp;
803         struct igb_rx_entry *sw_ring;
804         struct igb_rx_entry *rxe;
805         struct rte_mbuf *first_seg;
806         struct rte_mbuf *last_seg;
807         struct rte_mbuf *rxm;
808         struct rte_mbuf *nmb;
809         union e1000_adv_rx_desc rxd;
810         uint64_t dma; /* Physical address of mbuf data buffer */
811         uint32_t staterr;
812         uint32_t hlen_type_rss;
813         uint16_t rx_id;
814         uint16_t nb_rx;
815         uint16_t nb_hold;
816         uint16_t data_len;
817         uint16_t pkt_flags;
818
819         nb_rx = 0;
820         nb_hold = 0;
821         rxq = rx_queue;
822         rx_id = rxq->rx_tail;
823         rx_ring = rxq->rx_ring;
824         sw_ring = rxq->sw_ring;
825
826         /*
827          * Retrieve RX context of current packet, if any.
828          */
829         first_seg = rxq->pkt_first_seg;
830         last_seg = rxq->pkt_last_seg;
831
832         while (nb_rx < nb_pkts) {
833         next_desc:
834                 /*
835                  * The order of operations here is important as the DD status
836                  * bit must not be read after any other descriptor fields.
837                  * rx_ring and rxdp are pointing to volatile data so the order
838                  * of accesses cannot be reordered by the compiler. If they were
839                  * not volatile, they could be reordered which could lead to
840                  * using invalid descriptor fields when read from rxd.
841                  */
842                 rxdp = &rx_ring[rx_id];
843                 staterr = rxdp->wb.upper.status_error;
844                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
845                         break;
846                 rxd = *rxdp;
847
848                 /*
849                  * Descriptor done.
850                  *
851                  * Allocate a new mbuf to replenish the RX ring descriptor.
852                  * If the allocation fails:
853                  *    - arrange for that RX descriptor to be the first one
854                  *      being parsed the next time the receive function is
855                  *      invoked [on the same queue].
856                  *
857                  *    - Stop parsing the RX ring and return immediately.
858                  *
859                  * This policy does not drop the packet received in the RX
860                  * descriptor for which the allocation of a new mbuf failed.
861                  * Thus, it allows that packet to be later retrieved if
862                  * mbuf have been freed in the mean time.
863                  * As a side effect, holding RX descriptors instead of
864                  * systematically giving them back to the NIC may lead to
865                  * RX ring exhaustion situations.
866                  * However, the NIC can gracefully prevent such situations
867                  * to happen by sending specific "back-pressure" flow control
868                  * frames to its peer(s).
869                  */
870                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
871                            "staterr=0x%x data_len=%u\n",
872                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
873                            (unsigned) rx_id, (unsigned) staterr,
874                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
875
876                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
877                 if (nmb == NULL) {
878                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
879                                    "queue_id=%u\n", (unsigned) rxq->port_id,
880                                    (unsigned) rxq->queue_id);
881                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
882                         break;
883                 }
884
885                 nb_hold++;
886                 rxe = &sw_ring[rx_id];
887                 rx_id++;
888                 if (rx_id == rxq->nb_rx_desc)
889                         rx_id = 0;
890
891                 /* Prefetch next mbuf while processing current one. */
892                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
893
894                 /*
895                  * When next RX descriptor is on a cache-line boundary,
896                  * prefetch the next 4 RX descriptors and the next 8 pointers
897                  * to mbufs.
898                  */
899                 if ((rx_id & 0x3) == 0) {
900                         rte_igb_prefetch(&rx_ring[rx_id]);
901                         rte_igb_prefetch(&sw_ring[rx_id]);
902                 }
903
904                 /*
905                  * Update RX descriptor with the physical address of the new
906                  * data buffer of the new allocated mbuf.
907                  */
908                 rxm = rxe->mbuf;
909                 rxe->mbuf = nmb;
910                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
911                 rxdp->read.pkt_addr = dma;
912                 rxdp->read.hdr_addr = dma;
913
914                 /*
915                  * Set data length & data buffer address of mbuf.
916                  */
917                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
918                 rxm->pkt.data_len = data_len;
919                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
920
921                 /*
922                  * If this is the first buffer of the received packet,
923                  * set the pointer to the first mbuf of the packet and
924                  * initialize its context.
925                  * Otherwise, update the total length and the number of segments
926                  * of the current scattered packet, and update the pointer to
927                  * the last mbuf of the current packet.
928                  */
929                 if (first_seg == NULL) {
930                         first_seg = rxm;
931                         first_seg->pkt.pkt_len = data_len;
932                         first_seg->pkt.nb_segs = 1;
933                 } else {
934                         first_seg->pkt.pkt_len += data_len;
935                         first_seg->pkt.nb_segs++;
936                         last_seg->pkt.next = rxm;
937                 }
938
939                 /*
940                  * If this is not the last buffer of the received packet,
941                  * update the pointer to the last mbuf of the current scattered
942                  * packet and continue to parse the RX ring.
943                  */
944                 if (! (staterr & E1000_RXD_STAT_EOP)) {
945                         last_seg = rxm;
946                         goto next_desc;
947                 }
948
949                 /*
950                  * This is the last buffer of the received packet.
951                  * If the CRC is not stripped by the hardware:
952                  *   - Subtract the CRC length from the total packet length.
953                  *   - If the last buffer only contains the whole CRC or a part
954                  *     of it, free the mbuf associated to the last buffer.
955                  *     If part of the CRC is also contained in the previous
956                  *     mbuf, subtract the length of that CRC part from the
957                  *     data length of the previous mbuf.
958                  */
959                 rxm->pkt.next = NULL;
960                 if (unlikely(rxq->crc_len > 0)) {
961                         first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
962                         if (data_len <= ETHER_CRC_LEN) {
963                                 rte_pktmbuf_free_seg(rxm);
964                                 first_seg->pkt.nb_segs--;
965                                 last_seg->pkt.data_len = (uint16_t)
966                                         (last_seg->pkt.data_len -
967                                          (ETHER_CRC_LEN - data_len));
968                                 last_seg->pkt.next = NULL;
969                         } else
970                                 rxm->pkt.data_len =
971                                         (uint16_t) (data_len - ETHER_CRC_LEN);
972                 }
973
974                 /*
975                  * Initialize the first mbuf of the returned packet:
976                  *    - RX port identifier,
977                  *    - hardware offload data, if any:
978                  *      - RSS flag & hash,
979                  *      - IP checksum flag,
980                  *      - VLAN TCI, if any,
981                  *      - error flags.
982                  */
983                 first_seg->pkt.in_port = rxq->port_id;
984                 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
985
986                 /*
987                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
988                  * set in the pkt_flags field.
989                  */
990                 first_seg->pkt.vlan_macip.f.vlan_tci =
991                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
992                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
993                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
994                 pkt_flags = (uint16_t)(pkt_flags |
995                                 rx_desc_status_to_pkt_flags(staterr));
996                 pkt_flags = (uint16_t)(pkt_flags |
997                                 rx_desc_error_to_pkt_flags(staterr));
998                 first_seg->ol_flags = pkt_flags;
999
1000                 /* Prefetch data of first segment, if configured to do so. */
1001                 rte_packet_prefetch(first_seg->pkt.data);
1002
1003                 /*
1004                  * Store the mbuf address into the next entry of the array
1005                  * of returned packets.
1006                  */
1007                 rx_pkts[nb_rx++] = first_seg;
1008
1009                 /*
1010                  * Setup receipt context for a new packet.
1011                  */
1012                 first_seg = NULL;
1013         }
1014
1015         /*
1016          * Record index of the next RX descriptor to probe.
1017          */
1018         rxq->rx_tail = rx_id;
1019
1020         /*
1021          * Save receive context.
1022          */
1023         rxq->pkt_first_seg = first_seg;
1024         rxq->pkt_last_seg = last_seg;
1025
1026         /*
1027          * If the number of free RX descriptors is greater than the RX free
1028          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1029          * register.
1030          * Update the RDT with the value of the last processed RX descriptor
1031          * minus 1, to guarantee that the RDT register is never equal to the
1032          * RDH register, which creates a "full" ring situtation from the
1033          * hardware point of view...
1034          */
1035         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1036         if (nb_hold > rxq->rx_free_thresh) {
1037                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1038                            "nb_hold=%u nb_rx=%u\n",
1039                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1040                            (unsigned) rx_id, (unsigned) nb_hold,
1041                            (unsigned) nb_rx);
1042                 rx_id = (uint16_t) ((rx_id == 0) ?
1043                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1044                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1045                 nb_hold = 0;
1046         }
1047         rxq->nb_rx_hold = nb_hold;
1048         return (nb_rx);
1049 }
1050
1051 /*
1052  * Rings setup and release.
1053  *
1054  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1055  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1056  * This will also optimize cache line size effect.
1057  * H/W supports up to cache line size 128.
1058  */
1059 #define IGB_ALIGN 128
1060
1061 /*
1062  * Maximum number of Ring Descriptors.
1063  *
1064  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1065  * desscriptors should meet the following condition:
1066  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1067  */
1068 #define IGB_MIN_RING_DESC 32
1069 #define IGB_MAX_RING_DESC 4096
1070
1071 static const struct rte_memzone *
1072 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1073                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1074 {
1075         char z_name[RTE_MEMZONE_NAMESIZE];
1076         const struct rte_memzone *mz;
1077
1078         rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1079                         dev->driver->pci_drv.name, ring_name,
1080                                 dev->data->port_id, queue_id);
1081         mz = rte_memzone_lookup(z_name);
1082         if (mz)
1083                 return mz;
1084
1085         return rte_memzone_reserve_aligned(z_name, ring_size,
1086                         socket_id, 0, IGB_ALIGN);
1087 }
1088
1089 static void
1090 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1091 {
1092         unsigned i;
1093
1094         if (txq->sw_ring != NULL) {
1095                 for (i = 0; i < txq->nb_tx_desc; i++) {
1096                         if (txq->sw_ring[i].mbuf != NULL) {
1097                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1098                                 txq->sw_ring[i].mbuf = NULL;
1099                         }
1100                 }
1101         }
1102 }
1103
1104 static void
1105 igb_tx_queue_release(struct igb_tx_queue *txq)
1106 {
1107         if (txq != NULL) {
1108                 igb_tx_queue_release_mbufs(txq);
1109                 rte_free(txq->sw_ring);
1110                 rte_free(txq);
1111         }
1112 }
1113
1114 void
1115 eth_igb_tx_queue_release(void *txq)
1116 {
1117         igb_tx_queue_release(txq);
1118 }
1119
1120 static void
1121 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1122 {
1123         txq->tx_head = 0;
1124         txq->tx_tail = 0;
1125         txq->ctx_curr = 0;
1126         memset((void*)&txq->ctx_cache, 0,
1127                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1128 }
1129
1130 static void
1131 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1132 {
1133         struct igb_tx_entry *txe = txq->sw_ring;
1134         uint32_t size;
1135         uint16_t i, prev;
1136         struct e1000_hw *hw;
1137
1138         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1139         size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
1140         /* Zero out HW ring memory */
1141         for (i = 0; i < size; i++) {
1142                 ((volatile char *)txq->tx_ring)[i] = 0;
1143         }
1144
1145         /* Initialize ring entries */
1146         prev = (uint16_t)(txq->nb_tx_desc - 1);
1147         for (i = 0; i < txq->nb_tx_desc; i++) {
1148                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1149
1150                 txd->wb.status = E1000_TXD_STAT_DD;
1151                 txe[i].mbuf = NULL;
1152                 txe[i].last_id = i;
1153                 txe[prev].next_id = i;
1154                 prev = i;
1155         }
1156
1157         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1158         /* 82575 specific, each tx queue will use 2 hw contexts */
1159         if (hw->mac.type == e1000_82575)
1160                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1161
1162         igb_reset_tx_queue_stat(txq);
1163 }
1164
1165 int
1166 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1167                          uint16_t queue_idx,
1168                          uint16_t nb_desc,
1169                          unsigned int socket_id,
1170                          const struct rte_eth_txconf *tx_conf)
1171 {
1172         const struct rte_memzone *tz;
1173         struct igb_tx_queue *txq;
1174         struct e1000_hw     *hw;
1175         uint32_t size;
1176
1177         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178
1179         /*
1180          * Validate number of transmit descriptors.
1181          * It must not exceed hardware maximum, and must be multiple
1182          * of IGB_ALIGN.
1183          */
1184         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1185             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1186                 return -EINVAL;
1187         }
1188
1189         /*
1190          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1191          * driver.
1192          */
1193         if (tx_conf->tx_free_thresh != 0)
1194                 RTE_LOG(WARNING, PMD,
1195                         "The tx_free_thresh parameter is not "
1196                         "used for the 1G driver.\n");
1197         if (tx_conf->tx_rs_thresh != 0)
1198                 RTE_LOG(WARNING, PMD,
1199                         "The tx_rs_thresh parameter is not "
1200                         "used for the 1G driver.\n");
1201         if (tx_conf->tx_thresh.wthresh == 0)
1202                 RTE_LOG(WARNING, PMD,
1203                         "To improve 1G driver performance, consider setting "
1204                         "the TX WTHRESH value to 4, 8, or 16.\n");
1205
1206         /* Free memory prior to re-allocation if needed */
1207         if (dev->data->tx_queues[queue_idx] != NULL)
1208                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1209
1210         /* First allocate the tx queue data structure */
1211         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1212                                                         CACHE_LINE_SIZE);
1213         if (txq == NULL)
1214                 return (-ENOMEM);
1215
1216         /*
1217          * Allocate TX ring hardware descriptors. A memzone large enough to
1218          * handle the maximum ring size is allocated in order to allow for
1219          * resizing in later calls to the queue setup function.
1220          */
1221         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1222         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1223                                         size, socket_id);
1224         if (tz == NULL) {
1225                 igb_tx_queue_release(txq);
1226                 return (-ENOMEM);
1227         }
1228
1229         txq->nb_tx_desc = nb_desc;
1230         txq->pthresh = tx_conf->tx_thresh.pthresh;
1231         txq->hthresh = tx_conf->tx_thresh.hthresh;
1232         txq->wthresh = tx_conf->tx_thresh.wthresh;
1233         txq->queue_id = queue_idx;
1234         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1235                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1236         txq->port_id = dev->data->port_id;
1237
1238         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1239         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1240         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1241
1242         /* Allocate software ring */
1243         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1244                                    sizeof(struct igb_tx_entry) * nb_desc,
1245                                    CACHE_LINE_SIZE);
1246         if (txq->sw_ring == NULL) {
1247                 igb_tx_queue_release(txq);
1248                 return (-ENOMEM);
1249         }
1250         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1251                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1252
1253         igb_reset_tx_queue(txq, dev);
1254         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1255         dev->data->tx_queues[queue_idx] = txq;
1256
1257         return (0);
1258 }
1259
1260 static void
1261 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1262 {
1263         unsigned i;
1264
1265         if (rxq->sw_ring != NULL) {
1266                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1267                         if (rxq->sw_ring[i].mbuf != NULL) {
1268                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1269                                 rxq->sw_ring[i].mbuf = NULL;
1270                         }
1271                 }
1272         }
1273 }
1274
1275 static void
1276 igb_rx_queue_release(struct igb_rx_queue *rxq)
1277 {
1278         if (rxq != NULL) {
1279                 igb_rx_queue_release_mbufs(rxq);
1280                 rte_free(rxq->sw_ring);
1281                 rte_free(rxq);
1282         }
1283 }
1284
1285 void
1286 eth_igb_rx_queue_release(void *rxq)
1287 {
1288         igb_rx_queue_release(rxq);
1289 }
1290
1291 static void
1292 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1293 {
1294         unsigned size;
1295         unsigned i;
1296
1297         /* Zero out HW ring memory */
1298         size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
1299         for (i = 0; i < size; i++) {
1300                 ((volatile char *)rxq->rx_ring)[i] = 0;
1301         }
1302
1303         rxq->rx_tail = 0;
1304         rxq->pkt_first_seg = NULL;
1305         rxq->pkt_last_seg = NULL;
1306 }
1307
1308 int
1309 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1310                          uint16_t queue_idx,
1311                          uint16_t nb_desc,
1312                          unsigned int socket_id,
1313                          const struct rte_eth_rxconf *rx_conf,
1314                          struct rte_mempool *mp)
1315 {
1316         const struct rte_memzone *rz;
1317         struct igb_rx_queue *rxq;
1318         struct e1000_hw     *hw;
1319         unsigned int size;
1320
1321         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1322
1323         /*
1324          * Validate number of receive descriptors.
1325          * It must not exceed hardware maximum, and must be multiple
1326          * of IGB_ALIGN.
1327          */
1328         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1329             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1330                 return (-EINVAL);
1331         }
1332
1333         /* Free memory prior to re-allocation if needed */
1334         if (dev->data->rx_queues[queue_idx] != NULL) {
1335                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1336                 dev->data->rx_queues[queue_idx] = NULL;
1337         }
1338
1339         /* First allocate the RX queue data structure. */
1340         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1341                           CACHE_LINE_SIZE);
1342         if (rxq == NULL)
1343                 return (-ENOMEM);
1344         rxq->mb_pool = mp;
1345         rxq->nb_rx_desc = nb_desc;
1346         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1347         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1348         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1349         rxq->drop_en = rx_conf->rx_drop_en;
1350         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1351         rxq->queue_id = queue_idx;
1352         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1353                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1354         rxq->port_id = dev->data->port_id;
1355         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1356                                   ETHER_CRC_LEN);
1357
1358         /*
1359          *  Allocate RX ring hardware descriptors. A memzone large enough to
1360          *  handle the maximum ring size is allocated in order to allow for
1361          *  resizing in later calls to the queue setup function.
1362          */
1363         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1364         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1365         if (rz == NULL) {
1366                 igb_rx_queue_release(rxq);
1367                 return (-ENOMEM);
1368         }
1369         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1370         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1371         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1372         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1373
1374         /* Allocate software ring. */
1375         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1376                                    sizeof(struct igb_rx_entry) * nb_desc,
1377                                    CACHE_LINE_SIZE);
1378         if (rxq->sw_ring == NULL) {
1379                 igb_rx_queue_release(rxq);
1380                 return (-ENOMEM);
1381         }
1382         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1383                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1384
1385         dev->data->rx_queues[queue_idx] = rxq;
1386         igb_reset_rx_queue(rxq);
1387
1388         return 0;
1389 }
1390
1391 uint32_t 
1392 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1393 {
1394 #define IGB_RXQ_SCAN_INTERVAL 4
1395         volatile union e1000_adv_rx_desc *rxdp;
1396         struct igb_rx_queue *rxq;
1397         uint32_t desc = 0;
1398
1399         if (rx_queue_id >= dev->data->nb_rx_queues) {
1400                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1401                 return 0;
1402         }
1403
1404         rxq = dev->data->rx_queues[rx_queue_id];
1405         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1406
1407         while ((desc < rxq->nb_rx_desc) &&
1408                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1409                 desc += IGB_RXQ_SCAN_INTERVAL;
1410                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1411                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1412                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1413                                 desc - rxq->nb_rx_desc]);
1414         }
1415
1416         return 0;
1417 }
1418
1419 int
1420 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1421 {
1422         volatile union e1000_adv_rx_desc *rxdp;
1423         struct igb_rx_queue *rxq = rx_queue;
1424         uint32_t desc;
1425
1426         if (unlikely(offset >= rxq->nb_rx_desc))
1427                 return 0;
1428         desc = rxq->rx_tail + offset;
1429         if (desc >= rxq->nb_rx_desc)
1430                 desc -= rxq->nb_rx_desc;
1431
1432         rxdp = &rxq->rx_ring[desc];
1433         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1434 }
1435
1436 void
1437 igb_dev_clear_queues(struct rte_eth_dev *dev)
1438 {
1439         uint16_t i;
1440         struct igb_tx_queue *txq;
1441         struct igb_rx_queue *rxq;
1442
1443         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1444                 txq = dev->data->tx_queues[i];
1445                 if (txq != NULL) {
1446                         igb_tx_queue_release_mbufs(txq);
1447                         igb_reset_tx_queue(txq, dev);
1448                 }
1449         }
1450
1451         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1452                 rxq = dev->data->rx_queues[i];
1453                 if (rxq != NULL) {
1454                         igb_rx_queue_release_mbufs(rxq);
1455                         igb_reset_rx_queue(rxq);
1456                 }
1457         }
1458 }
1459
1460 /**
1461  * Receive Side Scaling (RSS).
1462  * See section 7.1.1.7 in the following document:
1463  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1464  *
1465  * Principles:
1466  * The source and destination IP addresses of the IP header and the source and
1467  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1468  * against a configurable random key to compute a 32-bit RSS hash result.
1469  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1470  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1471  * RSS output index which is used as the RX queue index where to store the
1472  * received packets.
1473  * The following output is supplied in the RX write-back descriptor:
1474  *     - 32-bit result of the Microsoft RSS hash function,
1475  *     - 4-bit RSS type field.
1476  */
1477
1478 /*
1479  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1480  * Used as the default key.
1481  */
1482 static uint8_t rss_intel_key[40] = {
1483         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1484         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1485         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1486         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1487         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1488 };
1489
1490 static void
1491 igb_rss_disable(struct rte_eth_dev *dev)
1492 {
1493         struct e1000_hw *hw;
1494         uint32_t mrqc;
1495
1496         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1498         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1499         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1500 }
1501
1502 static void
1503 igb_rss_configure(struct rte_eth_dev *dev)
1504 {
1505         struct e1000_hw *hw;
1506         uint8_t *hash_key;
1507         uint32_t rss_key;
1508         uint32_t mrqc;
1509         uint32_t shift;
1510         uint16_t rss_hf;
1511         uint16_t i;
1512
1513         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514
1515         rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1516         if (rss_hf == 0) /* Disable RSS. */ {
1517                 igb_rss_disable(dev);
1518                 return;
1519         }
1520         hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1521         if (hash_key == NULL)
1522                 hash_key = rss_intel_key; /* Default hash key. */
1523
1524         /* Fill in RSS hash key. */
1525         for (i = 0; i < 10; i++) {
1526                 rss_key  = hash_key[(i * 4)];
1527                 rss_key |= hash_key[(i * 4) + 1] << 8;
1528                 rss_key |= hash_key[(i * 4) + 2] << 16;
1529                 rss_key |= hash_key[(i * 4) + 3] << 24;
1530                 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1531         }
1532
1533         /* Fill in redirection table. */
1534         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1535         for (i = 0; i < 128; i++) {
1536                 union e1000_reta {
1537                         uint32_t dword;
1538                         uint8_t  bytes[4];
1539                 } reta;
1540                 uint8_t q_idx;
1541
1542                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1543                                    i % dev->data->nb_rx_queues : 0);
1544                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1545                 if ((i & 3) == 3)
1546                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1547         }
1548
1549         /* Set configured hashing functions in MRQC register. */
1550         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1551         if (rss_hf & ETH_RSS_IPV4)
1552                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1553         if (rss_hf & ETH_RSS_IPV4_TCP)
1554                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1555         if (rss_hf & ETH_RSS_IPV6)
1556                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1557         if (rss_hf & ETH_RSS_IPV6_EX)
1558                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1559         if (rss_hf & ETH_RSS_IPV6_TCP)
1560                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1561         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1562                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1563         if (rss_hf & ETH_RSS_IPV4_UDP)
1564                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1565         if (rss_hf & ETH_RSS_IPV6_UDP)
1566                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1567         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1568                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1569         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1570 }
1571
1572 /*********************************************************************
1573  *
1574  *  Enable receive unit.
1575  *
1576  **********************************************************************/
1577
1578 static int
1579 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1580 {
1581         struct igb_rx_entry *rxe = rxq->sw_ring;
1582         uint64_t dma_addr;
1583         unsigned i;
1584
1585         /* Initialize software ring entries. */
1586         for (i = 0; i < rxq->nb_rx_desc; i++) {
1587                 volatile union e1000_adv_rx_desc *rxd;
1588                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1589
1590                 if (mbuf == NULL) {
1591                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1592                                 "queue_id=%hu\n", rxq->queue_id);
1593                         igb_rx_queue_release(rxq);
1594                         return (-ENOMEM);
1595                 }
1596                 dma_addr =
1597                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1598                 rxd = &rxq->rx_ring[i];
1599                 rxd->read.hdr_addr = dma_addr;
1600                 rxd->read.pkt_addr = dma_addr;
1601                 rxe[i].mbuf = mbuf;
1602         }
1603
1604         return 0;
1605 }
1606
1607 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1608 static int
1609 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1610 {
1611         struct e1000_hw *hw =
1612                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1613         uint32_t mrqc;
1614  
1615         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1616                 /*
1617                 * SRIOV active scheme
1618                 * FIXME if support RSS together with VMDq & SRIOV
1619                 */
1620                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1621                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1622                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1623                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1624         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) { 
1625                 /*
1626                 * SRIOV inactive scheme
1627                 */
1628                 if (dev->data->nb_rx_queues > 1)
1629                         igb_rss_configure(dev);
1630                 else
1631                         igb_rss_disable(dev);
1632         }
1633  
1634         return 0;
1635 }
1636  
1637 int
1638 eth_igb_rx_init(struct rte_eth_dev *dev)
1639 {
1640         struct e1000_hw     *hw;
1641         struct igb_rx_queue *rxq;
1642         struct rte_pktmbuf_pool_private *mbp_priv;
1643         uint32_t rctl;
1644         uint32_t rxcsum;
1645         uint32_t srrctl;
1646         uint16_t buf_size;
1647         uint16_t rctl_bsize;
1648         uint16_t i;
1649         int ret;
1650
1651         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1652         srrctl = 0;
1653
1654         /*
1655          * Make sure receives are disabled while setting
1656          * up the descriptor ring.
1657          */
1658         rctl = E1000_READ_REG(hw, E1000_RCTL);
1659         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1660
1661         /*
1662          * Configure support of jumbo frames, if any.
1663          */
1664         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1665                 rctl |= E1000_RCTL_LPE;
1666
1667                 /*
1668                  * Set maximum packet length by default, and might be updated
1669                  * together with enabling/disabling dual VLAN.
1670                  */
1671                 E1000_WRITE_REG(hw, E1000_RLPML,
1672                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1673                                                 VLAN_TAG_SIZE);
1674         } else
1675                 rctl &= ~E1000_RCTL_LPE;
1676
1677         /* Configure and enable each RX queue. */
1678         rctl_bsize = 0;
1679         dev->rx_pkt_burst = eth_igb_recv_pkts;
1680         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1681                 uint64_t bus_addr;
1682                 uint32_t rxdctl;
1683
1684                 rxq = dev->data->rx_queues[i];
1685
1686                 /* Allocate buffers for descriptor rings and set up queue */
1687                 ret = igb_alloc_rx_queue_mbufs(rxq);
1688                 if (ret)
1689                         return ret;
1690
1691                 /*
1692                  * Reset crc_len in case it was changed after queue setup by a
1693                  *  call to configure
1694                  */
1695                 rxq->crc_len =
1696                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1697                                                         0 : ETHER_CRC_LEN);
1698
1699                 bus_addr = rxq->rx_ring_phys_addr;
1700                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1701                                 rxq->nb_rx_desc *
1702                                 sizeof(union e1000_adv_rx_desc));
1703                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1704                                 (uint32_t)(bus_addr >> 32));
1705                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1706
1707                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1708
1709                 /*
1710                  * Configure RX buffer size.
1711                  */
1712                 mbp_priv = (struct rte_pktmbuf_pool_private *)
1713                         ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1714                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1715                                        RTE_PKTMBUF_HEADROOM);
1716                 if (buf_size >= 1024) {
1717                         /*
1718                          * Configure the BSIZEPACKET field of the SRRCTL
1719                          * register of the queue.
1720                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1721                          * If this field is equal to 0b, then RCTL.BSIZE
1722                          * determines the RX packet buffer size.
1723                          */
1724                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1725                                    E1000_SRRCTL_BSIZEPKT_MASK);
1726                         buf_size = (uint16_t) ((srrctl &
1727                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1728                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1729
1730                         /* It adds dual VLAN length for supporting dual VLAN */
1731                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1732                                                 2 * VLAN_TAG_SIZE) > buf_size){
1733                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1734                                 dev->data->scattered_rx = 1;
1735                         }
1736                 } else {
1737                         /*
1738                          * Use BSIZE field of the device RCTL register.
1739                          */
1740                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1741                                 rctl_bsize = buf_size;
1742                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1743                         dev->data->scattered_rx = 1;
1744                 }
1745
1746                 /* Set if packets are dropped when no descriptors available */
1747                 if (rxq->drop_en)
1748                         srrctl |= E1000_SRRCTL_DROP_EN;
1749
1750                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1751
1752                 /* Enable this RX queue. */
1753                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1754                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1755                 rxdctl &= 0xFFF00000;
1756                 rxdctl |= (rxq->pthresh & 0x1F);
1757                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1758                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1759                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1760         }
1761
1762         /*
1763          * Setup BSIZE field of RCTL register, if needed.
1764          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1765          * register, since the code above configures the SRRCTL register of
1766          * the RX queue in such a case.
1767          * All configurable sizes are:
1768          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1769          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
1770          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
1771          *  2048: rctl |= E1000_RCTL_SZ_2048;
1772          *  1024: rctl |= E1000_RCTL_SZ_1024;
1773          *   512: rctl |= E1000_RCTL_SZ_512;
1774          *   256: rctl |= E1000_RCTL_SZ_256;
1775          */
1776         if (rctl_bsize > 0) {
1777                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1778                         rctl |= E1000_RCTL_SZ_512;
1779                 else /* 256 <= buf_size < 512 - use 256 */
1780                         rctl |= E1000_RCTL_SZ_256;
1781         }
1782
1783         /*
1784          * Configure RSS if device configured with multiple RX queues.
1785          */
1786         igb_dev_mq_rx_configure(dev);
1787
1788         /*
1789          * Setup the Checksum Register.
1790          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1791          */
1792         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1793         rxcsum |= E1000_RXCSUM_PCSD;
1794
1795         /* Enable both L3/L4 rx checksum offload */
1796         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1797                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
1798         else
1799                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1800         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1801
1802         /* Setup the Receive Control Register. */
1803         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1804                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1805
1806                 /* set STRCRC bit in all queues for Powerville/Springville */
1807                 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1808                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1809                                 rxq = dev->data->rx_queues[i];
1810                                 uint32_t dvmolr = E1000_READ_REG(hw,
1811                                         E1000_DVMOLR(rxq->reg_idx));
1812                                 dvmolr |= E1000_DVMOLR_STRCRC;
1813                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1814                         }
1815                 }
1816         } else {
1817                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1818
1819                 /* clear STRCRC bit in all queues for Powerville/Springville */
1820                 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1821                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1822                                 rxq = dev->data->rx_queues[i];
1823                                 uint32_t dvmolr = E1000_READ_REG(hw,
1824                                         E1000_DVMOLR(rxq->reg_idx));
1825                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
1826                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1827                         }
1828                 }
1829         }
1830
1831         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1832         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1833                 E1000_RCTL_RDMTS_HALF |
1834                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1835
1836         /* Make sure VLAN Filters are off. */
1837         rctl &= ~E1000_RCTL_VFE;
1838         /* Don't store bad packets. */
1839         rctl &= ~E1000_RCTL_SBP;
1840
1841         /* Enable Receives. */
1842         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1843
1844         /*
1845          * Setup the HW Rx Head and Tail Descriptor Pointers.
1846          * This needs to be done after enable.
1847          */
1848         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1849                 rxq = dev->data->rx_queues[i];
1850                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
1851                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
1852         }
1853
1854         return 0;
1855 }
1856
1857 /*********************************************************************
1858  *
1859  *  Enable transmit unit.
1860  *
1861  **********************************************************************/
1862 void
1863 eth_igb_tx_init(struct rte_eth_dev *dev)
1864 {
1865         struct e1000_hw     *hw;
1866         struct igb_tx_queue *txq;
1867         uint32_t tctl;
1868         uint32_t txdctl;
1869         uint16_t i;
1870
1871         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1872
1873         /* Setup the Base and Length of the Tx Descriptor Rings. */
1874         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1875                 uint64_t bus_addr;
1876                 txq = dev->data->tx_queues[i];
1877                 bus_addr = txq->tx_ring_phys_addr;
1878
1879                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
1880                                 txq->nb_tx_desc *
1881                                 sizeof(union e1000_adv_tx_desc));
1882                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
1883                                 (uint32_t)(bus_addr >> 32));
1884                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
1885
1886                 /* Setup the HW Tx Head and Tail descriptor pointers. */
1887                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
1888                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
1889
1890                 /* Setup Transmit threshold registers. */
1891                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
1892                 txdctl |= txq->pthresh & 0x1F;
1893                 txdctl |= ((txq->hthresh & 0x1F) << 8);
1894                 txdctl |= ((txq->wthresh & 0x1F) << 16);
1895                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1896                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
1897         }
1898
1899         /* Program the Transmit Control Register. */
1900         tctl = E1000_READ_REG(hw, E1000_TCTL);
1901         tctl &= ~E1000_TCTL_CT;
1902         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1903                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1904
1905         e1000_config_collision_dist(hw);
1906
1907         /* This write will effectively turn on the transmit unit. */
1908         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1909 }
1910
1911 /*********************************************************************
1912  *
1913  *  Enable VF receive unit.
1914  *
1915  **********************************************************************/
1916 int
1917 eth_igbvf_rx_init(struct rte_eth_dev *dev)
1918 {
1919         struct e1000_hw     *hw;
1920         struct igb_rx_queue *rxq;
1921         struct rte_pktmbuf_pool_private *mbp_priv;
1922         uint32_t srrctl;
1923         uint16_t buf_size;
1924         uint16_t rctl_bsize;
1925         uint16_t i;
1926         int ret;
1927
1928         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1929
1930         /* Configure and enable each RX queue. */
1931         rctl_bsize = 0;
1932         dev->rx_pkt_burst = eth_igb_recv_pkts;
1933         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1934                 uint64_t bus_addr;
1935                 uint32_t rxdctl;
1936
1937                 rxq = dev->data->rx_queues[i];
1938
1939                 /* Allocate buffers for descriptor rings and set up queue */
1940                 ret = igb_alloc_rx_queue_mbufs(rxq);
1941                 if (ret)
1942                         return ret;
1943
1944                 bus_addr = rxq->rx_ring_phys_addr;
1945                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1946                                 rxq->nb_rx_desc *
1947                                 sizeof(union e1000_adv_rx_desc));
1948                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1949                                 (uint32_t)(bus_addr >> 32));
1950                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1951
1952                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1953
1954                 /*
1955                  * Configure RX buffer size.
1956                  */
1957                 mbp_priv = (struct rte_pktmbuf_pool_private *)
1958                         ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1959                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1960                                        RTE_PKTMBUF_HEADROOM);
1961                 if (buf_size >= 1024) {
1962                         /*
1963                          * Configure the BSIZEPACKET field of the SRRCTL
1964                          * register of the queue.
1965                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1966                          * If this field is equal to 0b, then RCTL.BSIZE
1967                          * determines the RX packet buffer size.
1968                          */
1969                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1970                                    E1000_SRRCTL_BSIZEPKT_MASK);
1971                         buf_size = (uint16_t) ((srrctl &
1972                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1973                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1974
1975                         /* It adds dual VLAN length for supporting dual VLAN */
1976                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1977                                                 2 * VLAN_TAG_SIZE) > buf_size){
1978                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1979                                 dev->data->scattered_rx = 1;
1980                         }
1981                 } else {
1982                         /*
1983                          * Use BSIZE field of the device RCTL register.
1984                          */
1985                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1986                                 rctl_bsize = buf_size;
1987                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1988                         dev->data->scattered_rx = 1;
1989                 }
1990
1991                 /* Set if packets are dropped when no descriptors available */
1992                 if (rxq->drop_en)
1993                         srrctl |= E1000_SRRCTL_DROP_EN;
1994
1995                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1996
1997                 /* Enable this RX queue. */
1998                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1999                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2000                 rxdctl &= 0xFFF00000;
2001                 rxdctl |= (rxq->pthresh & 0x1F);
2002                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2003                 if (hw->mac.type == e1000_82576) {
2004                         /* 
2005                          * Workaround of 82576 VF Erratum
2006                          * force set WTHRESH to 1 
2007                          * to avoid Write-Back not triggered sometimes
2008                          */
2009                         rxdctl |= 0x10000;
2010                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2011                 }
2012                 else
2013                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2014                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2015         }
2016
2017         /*
2018          * Setup the HW Rx Head and Tail Descriptor Pointers.
2019          * This needs to be done after enable.
2020          */
2021         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2022                 rxq = dev->data->rx_queues[i];
2023                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2024                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2025         }
2026
2027         return 0;
2028 }
2029
2030 /*********************************************************************
2031  *
2032  *  Enable VF transmit unit.
2033  *
2034  **********************************************************************/
2035 void
2036 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2037 {
2038         struct e1000_hw     *hw;
2039         struct igb_tx_queue *txq;
2040         uint32_t txdctl;
2041         uint16_t i;
2042
2043         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2044
2045         /* Setup the Base and Length of the Tx Descriptor Rings. */
2046         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2047                 uint64_t bus_addr;
2048
2049                 txq = dev->data->tx_queues[i];
2050                 bus_addr = txq->tx_ring_phys_addr;
2051                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2052                                 txq->nb_tx_desc *
2053                                 sizeof(union e1000_adv_tx_desc));
2054                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2055                                 (uint32_t)(bus_addr >> 32));
2056                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2057
2058                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2059                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2060                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2061
2062                 /* Setup Transmit threshold registers. */
2063                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2064                 txdctl |= txq->pthresh & 0x1F;
2065                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2066                 if (hw->mac.type == e1000_82576) {
2067                         /* 
2068                          * Workaround of 82576 VF Erratum
2069                          * force set WTHRESH to 1 
2070                          * to avoid Write-Back not triggered sometimes
2071                          */
2072                         txdctl |= 0x10000; 
2073                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2074                 }
2075                 else
2076                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2077                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2078                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2079         }
2080
2081 }
2082