963770408b69315a44c456da3c172cb571ba9732
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 static inline struct rte_mbuf *
77 rte_rxmbuf_alloc(struct rte_mempool *mp)
78 {
79         struct rte_mbuf *m;
80
81         m = __rte_mbuf_raw_alloc(mp);
82         __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
83         return (m);
84 }
85
86 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
87         (uint64_t) ((mb)->buf_physaddr +                   \
88                         (uint64_t) ((char *)((mb)->pkt.data) -     \
89                                 (char *)(mb)->buf_addr))
90
91 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
92         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
93
94 /**
95  * Structure associated with each descriptor of the RX ring of a RX queue.
96  */
97 struct igb_rx_entry {
98         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
99 };
100
101 /**
102  * Structure associated with each descriptor of the TX ring of a TX queue.
103  */
104 struct igb_tx_entry {
105         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
106         uint16_t next_id; /**< Index of next descriptor in ring. */
107         uint16_t last_id; /**< Index of last scattered descriptor. */
108 };
109
110 /**
111  * Structure associated with each RX queue.
112  */
113 struct igb_rx_queue {
114         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
115         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
116         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
117         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
118         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
119         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
120         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
121         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
122         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
123         uint16_t            rx_tail;    /**< current value of RDT register. */
124         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
125         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
126         uint16_t            queue_id;   /**< RX queue index. */
127         uint16_t            reg_idx;    /**< RX queue register index. */
128         uint8_t             port_id;    /**< Device port identifier. */
129         uint8_t             pthresh;    /**< Prefetch threshold register. */
130         uint8_t             hthresh;    /**< Host threshold register. */
131         uint8_t             wthresh;    /**< Write-back threshold register. */
132         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
133         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
134 };
135
136 /**
137  * Hardware context number
138  */
139 enum igb_advctx_num {
140         IGB_CTX_0    = 0, /**< CTX0    */
141         IGB_CTX_1    = 1, /**< CTX1    */
142         IGB_CTX_NUM  = 2, /**< CTX_NUM */
143 };
144
145 /**
146  * Strucutre to check if new context need be built
147  */
148 struct igb_advctx_info {
149         uint16_t flags;           /**< ol_flags related to context build. */
150         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
151         union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
152 };
153
154 /**
155  * Structure associated with each TX queue.
156  */
157 struct igb_tx_queue {
158         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
159         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
160         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
161         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
162         uint32_t               txd_type;      /**< Device-specific TXD type */
163         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
164         uint16_t               tx_tail; /**< Current value of TDT register. */
165         uint16_t               tx_head;
166         /**< Index of first used TX descriptor. */
167         uint16_t               queue_id; /**< TX queue index. */
168         uint16_t               reg_idx;  /**< TX queue register index. */
169         uint8_t                port_id;  /**< Device port identifier. */
170         uint8_t                pthresh;  /**< Prefetch threshold register. */
171         uint8_t                hthresh;  /**< Host threshold register. */
172         uint8_t                wthresh;  /**< Write-back threshold register. */
173         uint32_t               ctx_curr;
174         /**< Current used hardware descriptor. */
175         uint32_t               ctx_start;
176         /**< Start context position for transmit queue. */
177         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
178         /**< Hardware context history.*/
179 };
180
181 #if 1
182 #define RTE_PMD_USE_PREFETCH
183 #endif
184
185 #ifdef RTE_PMD_USE_PREFETCH
186 #define rte_igb_prefetch(p)     rte_prefetch0(p)
187 #else
188 #define rte_igb_prefetch(p)     do {} while(0)
189 #endif
190
191 #ifdef RTE_PMD_PACKET_PREFETCH
192 #define rte_packet_prefetch(p) rte_prefetch1(p)
193 #else
194 #define rte_packet_prefetch(p)  do {} while(0)
195 #endif
196
197 /*
198  * Macro for VMDq feature for 1 GbE NIC.
199  */
200 #define E1000_VMOLR_SIZE                        (8)
201
202 /*********************************************************************
203  *
204  *  TX function
205  *
206  **********************************************************************/
207
208 /*
209  * Advanced context descriptor are almost same between igb/ixgbe
210  * This is a separate function, looking for optimization opportunity here
211  * Rework required to go with the pre-defined values.
212  */
213
214 static inline void
215 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
216                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
217                 uint16_t ol_flags, uint32_t vlan_macip_lens)
218 {
219         uint32_t type_tucmd_mlhl;
220         uint32_t mss_l4len_idx;
221         uint32_t ctx_idx, ctx_curr;
222         uint32_t cmp_mask;
223
224         ctx_curr = txq->ctx_curr;
225         ctx_idx = ctx_curr + txq->ctx_start;
226
227         cmp_mask = 0;
228         type_tucmd_mlhl = 0;
229
230         if (ol_flags & PKT_TX_VLAN_PKT) {
231                 cmp_mask |= TX_VLAN_CMP_MASK;
232         }
233
234         if (ol_flags & PKT_TX_IP_CKSUM) {
235                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
236                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
237         }
238
239         /* Specify which HW CTX to upload. */
240         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
241         switch (ol_flags & PKT_TX_L4_MASK) {
242         case PKT_TX_UDP_CKSUM:
243                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
244                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
245                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
246                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
247                 break;
248         case PKT_TX_TCP_CKSUM:
249                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
250                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
251                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
252                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
253                 break;
254         case PKT_TX_SCTP_CKSUM:
255                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
256                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
257                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
258                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
259                 break;
260         default:
261                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
262                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
263                 break;
264         }
265
266         txq->ctx_cache[ctx_curr].flags           = ol_flags;
267         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
268         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
269                 vlan_macip_lens & cmp_mask;
270
271         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
272         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
273         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
274         ctx_txd->seqnum_seed     = 0;
275 }
276
277 /*
278  * Check which hardware context can be used. Use the existing match
279  * or create a new context descriptor.
280  */
281 static inline uint32_t
282 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
283                 uint32_t vlan_macip_lens)
284 {
285         /* If match with the current context */
286         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
287                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
288                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
289                         return txq->ctx_curr;
290         }
291
292         /* If match with the second context */
293         txq->ctx_curr ^= 1;
294         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
295                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
296                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
297                         return txq->ctx_curr;
298         }
299
300         /* Mismatch, use the previous context */
301         return (IGB_CTX_NUM);
302 }
303
304 static inline uint32_t
305 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
306 {
307         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
308         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
309         uint32_t tmp;
310
311         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
312         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
313         return tmp;
314 }
315
316 static inline uint32_t
317 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
318 {
319         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
320         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
321 }
322
323 uint16_t
324 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
325                uint16_t nb_pkts)
326 {
327         struct igb_tx_queue *txq;
328         struct igb_tx_entry *sw_ring;
329         struct igb_tx_entry *txe, *txn;
330         volatile union e1000_adv_tx_desc *txr;
331         volatile union e1000_adv_tx_desc *txd;
332         struct rte_mbuf     *tx_pkt;
333         struct rte_mbuf     *m_seg;
334         uint64_t buf_dma_addr;
335         uint32_t olinfo_status;
336         uint32_t cmd_type_len;
337         uint32_t pkt_len;
338         uint16_t slen;
339         uint16_t ol_flags;
340         uint16_t tx_end;
341         uint16_t tx_id;
342         uint16_t tx_last;
343         uint16_t nb_tx;
344         uint16_t tx_ol_req;
345         uint32_t new_ctx = 0;
346         uint32_t ctx = 0;
347         uint32_t vlan_macip_lens;
348
349         txq = tx_queue;
350         sw_ring = txq->sw_ring;
351         txr     = txq->tx_ring;
352         tx_id   = txq->tx_tail;
353         txe = &sw_ring[tx_id];
354
355         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
356                 tx_pkt = *tx_pkts++;
357                 pkt_len = tx_pkt->pkt.pkt_len;
358
359                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
360
361                 /*
362                  * The number of descriptors that must be allocated for a
363                  * packet is the number of segments of that packet, plus 1
364                  * Context Descriptor for the VLAN Tag Identifier, if any.
365                  * Determine the last TX descriptor to allocate in the TX ring
366                  * for the packet, starting from the current position (tx_id)
367                  * in the ring.
368                  */
369                 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
370
371                 ol_flags = tx_pkt->ol_flags;
372                 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
373                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
374
375                 /* If a Context Descriptor need be built . */
376                 if (tx_ol_req) {
377                         ctx = what_advctx_update(txq, tx_ol_req,
378                                 vlan_macip_lens);
379                         /* Only allocate context descriptor if required*/
380                         new_ctx = (ctx == IGB_CTX_NUM);
381                         ctx = txq->ctx_curr;
382                         tx_last = (uint16_t) (tx_last + new_ctx);
383                 }
384                 if (tx_last >= txq->nb_tx_desc)
385                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
386
387                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
388                            " tx_first=%u tx_last=%u\n",
389                            (unsigned) txq->port_id,
390                            (unsigned) txq->queue_id,
391                            (unsigned) pkt_len,
392                            (unsigned) tx_id,
393                            (unsigned) tx_last);
394
395                 /*
396                  * Check if there are enough free descriptors in the TX ring
397                  * to transmit the next packet.
398                  * This operation is based on the two following rules:
399                  *
400                  *   1- Only check that the last needed TX descriptor can be
401                  *      allocated (by construction, if that descriptor is free,
402                  *      all intermediate ones are also free).
403                  *
404                  *      For this purpose, the index of the last TX descriptor
405                  *      used for a packet (the "last descriptor" of a packet)
406                  *      is recorded in the TX entries (the last one included)
407                  *      that are associated with all TX descriptors allocated
408                  *      for that packet.
409                  *
410                  *   2- Avoid to allocate the last free TX descriptor of the
411                  *      ring, in order to never set the TDT register with the
412                  *      same value stored in parallel by the NIC in the TDH
413                  *      register, which makes the TX engine of the NIC enter
414                  *      in a deadlock situation.
415                  *
416                  *      By extension, avoid to allocate a free descriptor that
417                  *      belongs to the last set of free descriptors allocated
418                  *      to the same packet previously transmitted.
419                  */
420
421                 /*
422                  * The "last descriptor" of the previously sent packet, if any,
423                  * which used the last descriptor to allocate.
424                  */
425                 tx_end = sw_ring[tx_last].last_id;
426
427                 /*
428                  * The next descriptor following that "last descriptor" in the
429                  * ring.
430                  */
431                 tx_end = sw_ring[tx_end].next_id;
432
433                 /*
434                  * The "last descriptor" associated with that next descriptor.
435                  */
436                 tx_end = sw_ring[tx_end].last_id;
437
438                 /*
439                  * Check that this descriptor is free.
440                  */
441                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
442                         if (nb_tx == 0)
443                                 return (0);
444                         goto end_of_tx;
445                 }
446
447                 /*
448                  * Set common flags of all TX Data Descriptors.
449                  *
450                  * The following bits must be set in all Data Descriptors:
451                  *   - E1000_ADVTXD_DTYP_DATA
452                  *   - E1000_ADVTXD_DCMD_DEXT
453                  *
454                  * The following bits must be set in the first Data Descriptor
455                  * and are ignored in the other ones:
456                  *   - E1000_ADVTXD_DCMD_IFCS
457                  *   - E1000_ADVTXD_MAC_1588
458                  *   - E1000_ADVTXD_DCMD_VLE
459                  *
460                  * The following bits must only be set in the last Data
461                  * Descriptor:
462                  *   - E1000_TXD_CMD_EOP
463                  *
464                  * The following bits can be set in any Data Descriptor, but
465                  * are only set in the last Data Descriptor:
466                  *   - E1000_TXD_CMD_RS
467                  */
468                 cmd_type_len = txq->txd_type |
469                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
470                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
471 #if defined(RTE_LIBRTE_IEEE1588)
472                 if (ol_flags & PKT_TX_IEEE1588_TMST)
473                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
474 #endif
475                 if (tx_ol_req) {
476                         /* Setup TX Advanced context descriptor if required */
477                         if (new_ctx) {
478                                 volatile struct e1000_adv_tx_context_desc *
479                                     ctx_txd;
480
481                                 ctx_txd = (volatile struct
482                                     e1000_adv_tx_context_desc *)
483                                     &txr[tx_id];
484
485                                 txn = &sw_ring[txe->next_id];
486                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
487
488                                 if (txe->mbuf != NULL) {
489                                         rte_pktmbuf_free_seg(txe->mbuf);
490                                         txe->mbuf = NULL;
491                                 }
492
493                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
494                                     vlan_macip_lens);
495
496                                 txe->last_id = tx_last;
497                                 tx_id = txe->next_id;
498                                 txe = txn;
499                         }
500
501                         /* Setup the TX Advanced Data Descriptor */
502                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
503                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
504                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
505                 }
506
507                 m_seg = tx_pkt;
508                 do {
509                         txn = &sw_ring[txe->next_id];
510                         txd = &txr[tx_id];
511
512                         if (txe->mbuf != NULL)
513                                 rte_pktmbuf_free_seg(txe->mbuf);
514                         txe->mbuf = m_seg;
515
516                         /*
517                          * Set up transmit descriptor.
518                          */
519                         slen = (uint16_t) m_seg->pkt.data_len;
520                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
521                         txd->read.buffer_addr =
522                                 rte_cpu_to_le_64(buf_dma_addr);
523                         txd->read.cmd_type_len =
524                                 rte_cpu_to_le_32(cmd_type_len | slen);
525                         txd->read.olinfo_status =
526                                 rte_cpu_to_le_32(olinfo_status);
527                         txe->last_id = tx_last;
528                         tx_id = txe->next_id;
529                         txe = txn;
530                         m_seg = m_seg->pkt.next;
531                 } while (m_seg != NULL);
532
533                 /*
534                  * The last packet data descriptor needs End Of Packet (EOP)
535                  * and Report Status (RS).
536                  */
537                 txd->read.cmd_type_len |=
538                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
539         }
540  end_of_tx:
541         rte_wmb();
542
543         /*
544          * Set the Transmit Descriptor Tail (TDT).
545          */
546         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
547         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
548                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
549                    (unsigned) tx_id, (unsigned) nb_tx);
550         txq->tx_tail = tx_id;
551
552         return (nb_tx);
553 }
554
555 /*********************************************************************
556  *
557  *  RX functions
558  *
559  **********************************************************************/
560 static inline uint16_t
561 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
562 {
563         uint16_t pkt_flags;
564
565         static uint16_t ip_pkt_types_map[16] = {
566                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
567                 PKT_RX_IPV6_HDR, 0, 0, 0,
568                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
569                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
570         };
571
572 #if defined(RTE_LIBRTE_IEEE1588)
573         static uint32_t ip_pkt_etqf_map[8] = {
574                 0, 0, 0, PKT_RX_IEEE1588_PTP,
575                 0, 0, 0, 0,
576         };
577
578         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
579                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
580                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
581 #else
582         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
583                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
584 #endif
585         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
586                                                 0 : PKT_RX_RSS_HASH));
587 }
588
589 static inline uint16_t
590 rx_desc_status_to_pkt_flags(uint32_t rx_status)
591 {
592         uint16_t pkt_flags;
593
594         /* Check if VLAN present */
595         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
596                                                 PKT_RX_VLAN_PKT : 0);
597
598 #if defined(RTE_LIBRTE_IEEE1588)
599         if (rx_status & E1000_RXD_STAT_TMST)
600                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
601 #endif
602         return pkt_flags;
603 }
604
605 static inline uint16_t
606 rx_desc_error_to_pkt_flags(uint32_t rx_status)
607 {
608         /*
609          * Bit 30: IPE, IPv4 checksum error
610          * Bit 29: L4I, L4I integrity error
611          */
612
613         static uint16_t error_to_pkt_flags_map[4] = {
614                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
615                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
616         };
617         return error_to_pkt_flags_map[(rx_status >>
618                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
619 }
620
621 uint16_t
622 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
623                uint16_t nb_pkts)
624 {
625         struct igb_rx_queue *rxq;
626         volatile union e1000_adv_rx_desc *rx_ring;
627         volatile union e1000_adv_rx_desc *rxdp;
628         struct igb_rx_entry *sw_ring;
629         struct igb_rx_entry *rxe;
630         struct rte_mbuf *rxm;
631         struct rte_mbuf *nmb;
632         union e1000_adv_rx_desc rxd;
633         uint64_t dma_addr;
634         uint32_t staterr;
635         uint32_t hlen_type_rss;
636         uint16_t pkt_len;
637         uint16_t rx_id;
638         uint16_t nb_rx;
639         uint16_t nb_hold;
640         uint16_t pkt_flags;
641
642         nb_rx = 0;
643         nb_hold = 0;
644         rxq = rx_queue;
645         rx_id = rxq->rx_tail;
646         rx_ring = rxq->rx_ring;
647         sw_ring = rxq->sw_ring;
648         while (nb_rx < nb_pkts) {
649                 /*
650                  * The order of operations here is important as the DD status
651                  * bit must not be read after any other descriptor fields.
652                  * rx_ring and rxdp are pointing to volatile data so the order
653                  * of accesses cannot be reordered by the compiler. If they were
654                  * not volatile, they could be reordered which could lead to
655                  * using invalid descriptor fields when read from rxd.
656                  */
657                 rxdp = &rx_ring[rx_id];
658                 staterr = rxdp->wb.upper.status_error;
659                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
660                         break;
661                 rxd = *rxdp;
662
663                 /*
664                  * End of packet.
665                  *
666                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
667                  * likely to be invalid and to be dropped by the various
668                  * validation checks performed by the network stack.
669                  *
670                  * Allocate a new mbuf to replenish the RX ring descriptor.
671                  * If the allocation fails:
672                  *    - arrange for that RX descriptor to be the first one
673                  *      being parsed the next time the receive function is
674                  *      invoked [on the same queue].
675                  *
676                  *    - Stop parsing the RX ring and return immediately.
677                  *
678                  * This policy do not drop the packet received in the RX
679                  * descriptor for which the allocation of a new mbuf failed.
680                  * Thus, it allows that packet to be later retrieved if
681                  * mbuf have been freed in the mean time.
682                  * As a side effect, holding RX descriptors instead of
683                  * systematically giving them back to the NIC may lead to
684                  * RX ring exhaustion situations.
685                  * However, the NIC can gracefully prevent such situations
686                  * to happen by sending specific "back-pressure" flow control
687                  * frames to its peer(s).
688                  */
689                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
690                            "staterr=0x%x pkt_len=%u\n",
691                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
692                            (unsigned) rx_id, (unsigned) staterr,
693                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
694
695                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
696                 if (nmb == NULL) {
697                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
698                                    "queue_id=%u\n", (unsigned) rxq->port_id,
699                                    (unsigned) rxq->queue_id);
700                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
701                         break;
702                 }
703
704                 nb_hold++;
705                 rxe = &sw_ring[rx_id];
706                 rx_id++;
707                 if (rx_id == rxq->nb_rx_desc)
708                         rx_id = 0;
709
710                 /* Prefetch next mbuf while processing current one. */
711                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
712
713                 /*
714                  * When next RX descriptor is on a cache-line boundary,
715                  * prefetch the next 4 RX descriptors and the next 8 pointers
716                  * to mbufs.
717                  */
718                 if ((rx_id & 0x3) == 0) {
719                         rte_igb_prefetch(&rx_ring[rx_id]);
720                         rte_igb_prefetch(&sw_ring[rx_id]);
721                 }
722
723                 rxm = rxe->mbuf;
724                 rxe->mbuf = nmb;
725                 dma_addr =
726                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
727                 rxdp->read.hdr_addr = dma_addr;
728                 rxdp->read.pkt_addr = dma_addr;
729
730                 /*
731                  * Initialize the returned mbuf.
732                  * 1) setup generic mbuf fields:
733                  *    - number of segments,
734                  *    - next segment,
735                  *    - packet length,
736                  *    - RX port identifier.
737                  * 2) integrate hardware offload data, if any:
738                  *    - RSS flag & hash,
739                  *    - IP checksum flag,
740                  *    - VLAN TCI, if any,
741                  *    - error flags.
742                  */
743                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
744                                       rxq->crc_len);
745                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
746                 rte_packet_prefetch(rxm->pkt.data);
747                 rxm->pkt.nb_segs = 1;
748                 rxm->pkt.next = NULL;
749                 rxm->pkt.pkt_len = pkt_len;
750                 rxm->pkt.data_len = pkt_len;
751                 rxm->pkt.in_port = rxq->port_id;
752
753                 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
754                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
755                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
756                 rxm->pkt.vlan_macip.f.vlan_tci =
757                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
758
759                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
760                 pkt_flags = (uint16_t)(pkt_flags |
761                                 rx_desc_status_to_pkt_flags(staterr));
762                 pkt_flags = (uint16_t)(pkt_flags |
763                                 rx_desc_error_to_pkt_flags(staterr));
764                 rxm->ol_flags = pkt_flags;
765
766                 /*
767                  * Store the mbuf address into the next entry of the array
768                  * of returned packets.
769                  */
770                 rx_pkts[nb_rx++] = rxm;
771         }
772         rxq->rx_tail = rx_id;
773
774         /*
775          * If the number of free RX descriptors is greater than the RX free
776          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
777          * register.
778          * Update the RDT with the value of the last processed RX descriptor
779          * minus 1, to guarantee that the RDT register is never equal to the
780          * RDH register, which creates a "full" ring situtation from the
781          * hardware point of view...
782          */
783         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
784         if (nb_hold > rxq->rx_free_thresh) {
785                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
786                            "nb_hold=%u nb_rx=%u\n",
787                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
788                            (unsigned) rx_id, (unsigned) nb_hold,
789                            (unsigned) nb_rx);
790                 rx_id = (uint16_t) ((rx_id == 0) ?
791                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
792                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
793                 nb_hold = 0;
794         }
795         rxq->nb_rx_hold = nb_hold;
796         return (nb_rx);
797 }
798
799 uint16_t
800 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
801                          uint16_t nb_pkts)
802 {
803         struct igb_rx_queue *rxq;
804         volatile union e1000_adv_rx_desc *rx_ring;
805         volatile union e1000_adv_rx_desc *rxdp;
806         struct igb_rx_entry *sw_ring;
807         struct igb_rx_entry *rxe;
808         struct rte_mbuf *first_seg;
809         struct rte_mbuf *last_seg;
810         struct rte_mbuf *rxm;
811         struct rte_mbuf *nmb;
812         union e1000_adv_rx_desc rxd;
813         uint64_t dma; /* Physical address of mbuf data buffer */
814         uint32_t staterr;
815         uint32_t hlen_type_rss;
816         uint16_t rx_id;
817         uint16_t nb_rx;
818         uint16_t nb_hold;
819         uint16_t data_len;
820         uint16_t pkt_flags;
821
822         nb_rx = 0;
823         nb_hold = 0;
824         rxq = rx_queue;
825         rx_id = rxq->rx_tail;
826         rx_ring = rxq->rx_ring;
827         sw_ring = rxq->sw_ring;
828
829         /*
830          * Retrieve RX context of current packet, if any.
831          */
832         first_seg = rxq->pkt_first_seg;
833         last_seg = rxq->pkt_last_seg;
834
835         while (nb_rx < nb_pkts) {
836         next_desc:
837                 /*
838                  * The order of operations here is important as the DD status
839                  * bit must not be read after any other descriptor fields.
840                  * rx_ring and rxdp are pointing to volatile data so the order
841                  * of accesses cannot be reordered by the compiler. If they were
842                  * not volatile, they could be reordered which could lead to
843                  * using invalid descriptor fields when read from rxd.
844                  */
845                 rxdp = &rx_ring[rx_id];
846                 staterr = rxdp->wb.upper.status_error;
847                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
848                         break;
849                 rxd = *rxdp;
850
851                 /*
852                  * Descriptor done.
853                  *
854                  * Allocate a new mbuf to replenish the RX ring descriptor.
855                  * If the allocation fails:
856                  *    - arrange for that RX descriptor to be the first one
857                  *      being parsed the next time the receive function is
858                  *      invoked [on the same queue].
859                  *
860                  *    - Stop parsing the RX ring and return immediately.
861                  *
862                  * This policy does not drop the packet received in the RX
863                  * descriptor for which the allocation of a new mbuf failed.
864                  * Thus, it allows that packet to be later retrieved if
865                  * mbuf have been freed in the mean time.
866                  * As a side effect, holding RX descriptors instead of
867                  * systematically giving them back to the NIC may lead to
868                  * RX ring exhaustion situations.
869                  * However, the NIC can gracefully prevent such situations
870                  * to happen by sending specific "back-pressure" flow control
871                  * frames to its peer(s).
872                  */
873                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
874                            "staterr=0x%x data_len=%u\n",
875                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876                            (unsigned) rx_id, (unsigned) staterr,
877                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
878
879                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
880                 if (nmb == NULL) {
881                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882                                    "queue_id=%u\n", (unsigned) rxq->port_id,
883                                    (unsigned) rxq->queue_id);
884                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
885                         break;
886                 }
887
888                 nb_hold++;
889                 rxe = &sw_ring[rx_id];
890                 rx_id++;
891                 if (rx_id == rxq->nb_rx_desc)
892                         rx_id = 0;
893
894                 /* Prefetch next mbuf while processing current one. */
895                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
896
897                 /*
898                  * When next RX descriptor is on a cache-line boundary,
899                  * prefetch the next 4 RX descriptors and the next 8 pointers
900                  * to mbufs.
901                  */
902                 if ((rx_id & 0x3) == 0) {
903                         rte_igb_prefetch(&rx_ring[rx_id]);
904                         rte_igb_prefetch(&sw_ring[rx_id]);
905                 }
906
907                 /*
908                  * Update RX descriptor with the physical address of the new
909                  * data buffer of the new allocated mbuf.
910                  */
911                 rxm = rxe->mbuf;
912                 rxe->mbuf = nmb;
913                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
914                 rxdp->read.pkt_addr = dma;
915                 rxdp->read.hdr_addr = dma;
916
917                 /*
918                  * Set data length & data buffer address of mbuf.
919                  */
920                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
921                 rxm->pkt.data_len = data_len;
922                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
923
924                 /*
925                  * If this is the first buffer of the received packet,
926                  * set the pointer to the first mbuf of the packet and
927                  * initialize its context.
928                  * Otherwise, update the total length and the number of segments
929                  * of the current scattered packet, and update the pointer to
930                  * the last mbuf of the current packet.
931                  */
932                 if (first_seg == NULL) {
933                         first_seg = rxm;
934                         first_seg->pkt.pkt_len = data_len;
935                         first_seg->pkt.nb_segs = 1;
936                 } else {
937                         first_seg->pkt.pkt_len += data_len;
938                         first_seg->pkt.nb_segs++;
939                         last_seg->pkt.next = rxm;
940                 }
941
942                 /*
943                  * If this is not the last buffer of the received packet,
944                  * update the pointer to the last mbuf of the current scattered
945                  * packet and continue to parse the RX ring.
946                  */
947                 if (! (staterr & E1000_RXD_STAT_EOP)) {
948                         last_seg = rxm;
949                         goto next_desc;
950                 }
951
952                 /*
953                  * This is the last buffer of the received packet.
954                  * If the CRC is not stripped by the hardware:
955                  *   - Subtract the CRC length from the total packet length.
956                  *   - If the last buffer only contains the whole CRC or a part
957                  *     of it, free the mbuf associated to the last buffer.
958                  *     If part of the CRC is also contained in the previous
959                  *     mbuf, subtract the length of that CRC part from the
960                  *     data length of the previous mbuf.
961                  */
962                 rxm->pkt.next = NULL;
963                 if (unlikely(rxq->crc_len > 0)) {
964                         first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
965                         if (data_len <= ETHER_CRC_LEN) {
966                                 rte_pktmbuf_free_seg(rxm);
967                                 first_seg->pkt.nb_segs--;
968                                 last_seg->pkt.data_len = (uint16_t)
969                                         (last_seg->pkt.data_len -
970                                          (ETHER_CRC_LEN - data_len));
971                                 last_seg->pkt.next = NULL;
972                         } else
973                                 rxm->pkt.data_len =
974                                         (uint16_t) (data_len - ETHER_CRC_LEN);
975                 }
976
977                 /*
978                  * Initialize the first mbuf of the returned packet:
979                  *    - RX port identifier,
980                  *    - hardware offload data, if any:
981                  *      - RSS flag & hash,
982                  *      - IP checksum flag,
983                  *      - VLAN TCI, if any,
984                  *      - error flags.
985                  */
986                 first_seg->pkt.in_port = rxq->port_id;
987                 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
988
989                 /*
990                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
991                  * set in the pkt_flags field.
992                  */
993                 first_seg->pkt.vlan_macip.f.vlan_tci =
994                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
995                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
996                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
997                 pkt_flags = (uint16_t)(pkt_flags |
998                                 rx_desc_status_to_pkt_flags(staterr));
999                 pkt_flags = (uint16_t)(pkt_flags |
1000                                 rx_desc_error_to_pkt_flags(staterr));
1001                 first_seg->ol_flags = pkt_flags;
1002
1003                 /* Prefetch data of first segment, if configured to do so. */
1004                 rte_packet_prefetch(first_seg->pkt.data);
1005
1006                 /*
1007                  * Store the mbuf address into the next entry of the array
1008                  * of returned packets.
1009                  */
1010                 rx_pkts[nb_rx++] = first_seg;
1011
1012                 /*
1013                  * Setup receipt context for a new packet.
1014                  */
1015                 first_seg = NULL;
1016         }
1017
1018         /*
1019          * Record index of the next RX descriptor to probe.
1020          */
1021         rxq->rx_tail = rx_id;
1022
1023         /*
1024          * Save receive context.
1025          */
1026         rxq->pkt_first_seg = first_seg;
1027         rxq->pkt_last_seg = last_seg;
1028
1029         /*
1030          * If the number of free RX descriptors is greater than the RX free
1031          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1032          * register.
1033          * Update the RDT with the value of the last processed RX descriptor
1034          * minus 1, to guarantee that the RDT register is never equal to the
1035          * RDH register, which creates a "full" ring situtation from the
1036          * hardware point of view...
1037          */
1038         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1039         if (nb_hold > rxq->rx_free_thresh) {
1040                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1041                            "nb_hold=%u nb_rx=%u\n",
1042                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1043                            (unsigned) rx_id, (unsigned) nb_hold,
1044                            (unsigned) nb_rx);
1045                 rx_id = (uint16_t) ((rx_id == 0) ?
1046                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1047                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1048                 nb_hold = 0;
1049         }
1050         rxq->nb_rx_hold = nb_hold;
1051         return (nb_rx);
1052 }
1053
1054 /*
1055  * Rings setup and release.
1056  *
1057  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1058  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1059  * This will also optimize cache line size effect.
1060  * H/W supports up to cache line size 128.
1061  */
1062 #define IGB_ALIGN 128
1063
1064 /*
1065  * Maximum number of Ring Descriptors.
1066  *
1067  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1068  * desscriptors should meet the following condition:
1069  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1070  */
1071 #define IGB_MIN_RING_DESC 32
1072 #define IGB_MAX_RING_DESC 4096
1073
1074 static const struct rte_memzone *
1075 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1076                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1077 {
1078         char z_name[RTE_MEMZONE_NAMESIZE];
1079         const struct rte_memzone *mz;
1080
1081         rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1082                         dev->driver->pci_drv.name, ring_name,
1083                                 dev->data->port_id, queue_id);
1084         mz = rte_memzone_lookup(z_name);
1085         if (mz)
1086                 return mz;
1087
1088 #ifdef RTE_LIBRTE_XEN_DOM0
1089         return rte_memzone_reserve_bounded(z_name, ring_size,
1090                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1091 #else
1092         return rte_memzone_reserve_aligned(z_name, ring_size,
1093                         socket_id, 0, IGB_ALIGN);
1094 #endif
1095 }
1096
1097 static void
1098 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1099 {
1100         unsigned i;
1101
1102         if (txq->sw_ring != NULL) {
1103                 for (i = 0; i < txq->nb_tx_desc; i++) {
1104                         if (txq->sw_ring[i].mbuf != NULL) {
1105                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1106                                 txq->sw_ring[i].mbuf = NULL;
1107                         }
1108                 }
1109         }
1110 }
1111
1112 static void
1113 igb_tx_queue_release(struct igb_tx_queue *txq)
1114 {
1115         if (txq != NULL) {
1116                 igb_tx_queue_release_mbufs(txq);
1117                 rte_free(txq->sw_ring);
1118                 rte_free(txq);
1119         }
1120 }
1121
1122 void
1123 eth_igb_tx_queue_release(void *txq)
1124 {
1125         igb_tx_queue_release(txq);
1126 }
1127
1128 static void
1129 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1130 {
1131         txq->tx_head = 0;
1132         txq->tx_tail = 0;
1133         txq->ctx_curr = 0;
1134         memset((void*)&txq->ctx_cache, 0,
1135                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1136 }
1137
1138 static void
1139 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1140 {
1141         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1142                         .buffer_addr = 0}};
1143         struct igb_tx_entry *txe = txq->sw_ring;
1144         uint16_t i, prev;
1145         struct e1000_hw *hw;
1146
1147         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148         /* Zero out HW ring memory */
1149         for (i = 0; i < txq->nb_tx_desc; i++) {
1150                 txq->tx_ring[i] = zeroed_desc;
1151         }
1152
1153         /* Initialize ring entries */
1154         prev = (uint16_t)(txq->nb_tx_desc - 1);
1155         for (i = 0; i < txq->nb_tx_desc; i++) {
1156                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1157
1158                 txd->wb.status = E1000_TXD_STAT_DD;
1159                 txe[i].mbuf = NULL;
1160                 txe[i].last_id = i;
1161                 txe[prev].next_id = i;
1162                 prev = i;
1163         }
1164
1165         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1166         /* 82575 specific, each tx queue will use 2 hw contexts */
1167         if (hw->mac.type == e1000_82575)
1168                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1169
1170         igb_reset_tx_queue_stat(txq);
1171 }
1172
1173 int
1174 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1175                          uint16_t queue_idx,
1176                          uint16_t nb_desc,
1177                          unsigned int socket_id,
1178                          const struct rte_eth_txconf *tx_conf)
1179 {
1180         const struct rte_memzone *tz;
1181         struct igb_tx_queue *txq;
1182         struct e1000_hw     *hw;
1183         uint32_t size;
1184
1185         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186
1187         /*
1188          * Validate number of transmit descriptors.
1189          * It must not exceed hardware maximum, and must be multiple
1190          * of IGB_ALIGN.
1191          */
1192         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1193             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1194                 return -EINVAL;
1195         }
1196
1197         /*
1198          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1199          * driver.
1200          */
1201         if (tx_conf->tx_free_thresh != 0)
1202                 RTE_LOG(WARNING, PMD,
1203                         "The tx_free_thresh parameter is not "
1204                         "used for the 1G driver.\n");
1205         if (tx_conf->tx_rs_thresh != 0)
1206                 RTE_LOG(WARNING, PMD,
1207                         "The tx_rs_thresh parameter is not "
1208                         "used for the 1G driver.\n");
1209         if (tx_conf->tx_thresh.wthresh == 0)
1210                 RTE_LOG(WARNING, PMD,
1211                         "To improve 1G driver performance, consider setting "
1212                         "the TX WTHRESH value to 4, 8, or 16.\n");
1213
1214         /* Free memory prior to re-allocation if needed */
1215         if (dev->data->tx_queues[queue_idx] != NULL)
1216                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1217
1218         /* First allocate the tx queue data structure */
1219         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1220                                                         CACHE_LINE_SIZE);
1221         if (txq == NULL)
1222                 return (-ENOMEM);
1223
1224         /*
1225          * Allocate TX ring hardware descriptors. A memzone large enough to
1226          * handle the maximum ring size is allocated in order to allow for
1227          * resizing in later calls to the queue setup function.
1228          */
1229         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1230         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1231                                         size, socket_id);
1232         if (tz == NULL) {
1233                 igb_tx_queue_release(txq);
1234                 return (-ENOMEM);
1235         }
1236
1237         txq->nb_tx_desc = nb_desc;
1238         txq->pthresh = tx_conf->tx_thresh.pthresh;
1239         txq->hthresh = tx_conf->tx_thresh.hthresh;
1240         txq->wthresh = tx_conf->tx_thresh.wthresh;
1241         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1242                 txq->wthresh = 1;
1243         txq->queue_id = queue_idx;
1244         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1245                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1246         txq->port_id = dev->data->port_id;
1247
1248         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1249 #ifndef RTE_LIBRTE_XEN_DOM0
1250         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1251 #else
1252         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1253 #endif
1254          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1255         /* Allocate software ring */
1256         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1257                                    sizeof(struct igb_tx_entry) * nb_desc,
1258                                    CACHE_LINE_SIZE);
1259         if (txq->sw_ring == NULL) {
1260                 igb_tx_queue_release(txq);
1261                 return (-ENOMEM);
1262         }
1263         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1264                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1265
1266         igb_reset_tx_queue(txq, dev);
1267         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1268         dev->data->tx_queues[queue_idx] = txq;
1269
1270         return (0);
1271 }
1272
1273 static void
1274 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1275 {
1276         unsigned i;
1277
1278         if (rxq->sw_ring != NULL) {
1279                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1280                         if (rxq->sw_ring[i].mbuf != NULL) {
1281                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1282                                 rxq->sw_ring[i].mbuf = NULL;
1283                         }
1284                 }
1285         }
1286 }
1287
1288 static void
1289 igb_rx_queue_release(struct igb_rx_queue *rxq)
1290 {
1291         if (rxq != NULL) {
1292                 igb_rx_queue_release_mbufs(rxq);
1293                 rte_free(rxq->sw_ring);
1294                 rte_free(rxq);
1295         }
1296 }
1297
1298 void
1299 eth_igb_rx_queue_release(void *rxq)
1300 {
1301         igb_rx_queue_release(rxq);
1302 }
1303
1304 static void
1305 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1306 {
1307         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1308                         .pkt_addr = 0}};
1309         unsigned i;
1310
1311         /* Zero out HW ring memory */
1312         for (i = 0; i < rxq->nb_rx_desc; i++) {
1313                 rxq->rx_ring[i] = zeroed_desc;
1314         }
1315
1316         rxq->rx_tail = 0;
1317         rxq->pkt_first_seg = NULL;
1318         rxq->pkt_last_seg = NULL;
1319 }
1320
1321 int
1322 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1323                          uint16_t queue_idx,
1324                          uint16_t nb_desc,
1325                          unsigned int socket_id,
1326                          const struct rte_eth_rxconf *rx_conf,
1327                          struct rte_mempool *mp)
1328 {
1329         const struct rte_memzone *rz;
1330         struct igb_rx_queue *rxq;
1331         struct e1000_hw     *hw;
1332         unsigned int size;
1333
1334         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1335
1336         /*
1337          * Validate number of receive descriptors.
1338          * It must not exceed hardware maximum, and must be multiple
1339          * of IGB_ALIGN.
1340          */
1341         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1342             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1343                 return (-EINVAL);
1344         }
1345
1346         /* Free memory prior to re-allocation if needed */
1347         if (dev->data->rx_queues[queue_idx] != NULL) {
1348                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1349                 dev->data->rx_queues[queue_idx] = NULL;
1350         }
1351
1352         /* First allocate the RX queue data structure. */
1353         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1354                           CACHE_LINE_SIZE);
1355         if (rxq == NULL)
1356                 return (-ENOMEM);
1357         rxq->mb_pool = mp;
1358         rxq->nb_rx_desc = nb_desc;
1359         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1360         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1361         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1362         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1363                 rxq->wthresh = 1;
1364         rxq->drop_en = rx_conf->rx_drop_en;
1365         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1366         rxq->queue_id = queue_idx;
1367         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1368                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1369         rxq->port_id = dev->data->port_id;
1370         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1371                                   ETHER_CRC_LEN);
1372
1373         /*
1374          *  Allocate RX ring hardware descriptors. A memzone large enough to
1375          *  handle the maximum ring size is allocated in order to allow for
1376          *  resizing in later calls to the queue setup function.
1377          */
1378         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1379         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1380         if (rz == NULL) {
1381                 igb_rx_queue_release(rxq);
1382                 return (-ENOMEM);
1383         }
1384         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1385         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1386 #ifndef RTE_LIBRTE_XEN_DOM0
1387         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1388 #else
1389         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); 
1390 #endif 
1391         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1392
1393         /* Allocate software ring. */
1394         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1395                                    sizeof(struct igb_rx_entry) * nb_desc,
1396                                    CACHE_LINE_SIZE);
1397         if (rxq->sw_ring == NULL) {
1398                 igb_rx_queue_release(rxq);
1399                 return (-ENOMEM);
1400         }
1401         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1402                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1403
1404         dev->data->rx_queues[queue_idx] = rxq;
1405         igb_reset_rx_queue(rxq);
1406
1407         return 0;
1408 }
1409
1410 uint32_t 
1411 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1412 {
1413 #define IGB_RXQ_SCAN_INTERVAL 4
1414         volatile union e1000_adv_rx_desc *rxdp;
1415         struct igb_rx_queue *rxq;
1416         uint32_t desc = 0;
1417
1418         if (rx_queue_id >= dev->data->nb_rx_queues) {
1419                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1420                 return 0;
1421         }
1422
1423         rxq = dev->data->rx_queues[rx_queue_id];
1424         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1425
1426         while ((desc < rxq->nb_rx_desc) &&
1427                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1428                 desc += IGB_RXQ_SCAN_INTERVAL;
1429                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1430                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1431                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1432                                 desc - rxq->nb_rx_desc]);
1433         }
1434
1435         return 0;
1436 }
1437
1438 int
1439 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1440 {
1441         volatile union e1000_adv_rx_desc *rxdp;
1442         struct igb_rx_queue *rxq = rx_queue;
1443         uint32_t desc;
1444
1445         if (unlikely(offset >= rxq->nb_rx_desc))
1446                 return 0;
1447         desc = rxq->rx_tail + offset;
1448         if (desc >= rxq->nb_rx_desc)
1449                 desc -= rxq->nb_rx_desc;
1450
1451         rxdp = &rxq->rx_ring[desc];
1452         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1453 }
1454
1455 void
1456 igb_dev_clear_queues(struct rte_eth_dev *dev)
1457 {
1458         uint16_t i;
1459         struct igb_tx_queue *txq;
1460         struct igb_rx_queue *rxq;
1461
1462         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1463                 txq = dev->data->tx_queues[i];
1464                 if (txq != NULL) {
1465                         igb_tx_queue_release_mbufs(txq);
1466                         igb_reset_tx_queue(txq, dev);
1467                 }
1468         }
1469
1470         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1471                 rxq = dev->data->rx_queues[i];
1472                 if (rxq != NULL) {
1473                         igb_rx_queue_release_mbufs(rxq);
1474                         igb_reset_rx_queue(rxq);
1475                 }
1476         }
1477 }
1478
1479 /**
1480  * Receive Side Scaling (RSS).
1481  * See section 7.1.1.7 in the following document:
1482  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1483  *
1484  * Principles:
1485  * The source and destination IP addresses of the IP header and the source and
1486  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1487  * against a configurable random key to compute a 32-bit RSS hash result.
1488  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1489  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1490  * RSS output index which is used as the RX queue index where to store the
1491  * received packets.
1492  * The following output is supplied in the RX write-back descriptor:
1493  *     - 32-bit result of the Microsoft RSS hash function,
1494  *     - 4-bit RSS type field.
1495  */
1496
1497 /*
1498  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1499  * Used as the default key.
1500  */
1501 static uint8_t rss_intel_key[40] = {
1502         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1503         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1504         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1505         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1506         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1507 };
1508
1509 static void
1510 igb_rss_disable(struct rte_eth_dev *dev)
1511 {
1512         struct e1000_hw *hw;
1513         uint32_t mrqc;
1514
1515         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1517         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1518         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1519 }
1520
1521 static void
1522 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1523 {
1524         uint8_t  *hash_key;
1525         uint32_t rss_key;
1526         uint32_t mrqc;
1527         uint16_t rss_hf;
1528         uint16_t i;
1529
1530         hash_key = rss_conf->rss_key;
1531         if (hash_key != NULL) {
1532                 /* Fill in RSS hash key */
1533                 for (i = 0; i < 10; i++) {
1534                         rss_key  = hash_key[(i * 4)];
1535                         rss_key |= hash_key[(i * 4) + 1] << 8;
1536                         rss_key |= hash_key[(i * 4) + 2] << 16;
1537                         rss_key |= hash_key[(i * 4) + 3] << 24;
1538                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1539                 }
1540         }
1541
1542         /* Set configured hashing protocols in MRQC register */
1543         rss_hf = rss_conf->rss_hf;
1544         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1545         if (rss_hf & ETH_RSS_IPV4)
1546                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1547         if (rss_hf & ETH_RSS_IPV4_TCP)
1548                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1549         if (rss_hf & ETH_RSS_IPV6)
1550                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1551         if (rss_hf & ETH_RSS_IPV6_EX)
1552                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1553         if (rss_hf & ETH_RSS_IPV6_TCP)
1554                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1555         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1556                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1557         if (rss_hf & ETH_RSS_IPV4_UDP)
1558                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1559         if (rss_hf & ETH_RSS_IPV6_UDP)
1560                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1561         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1562                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1563         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1564 }
1565
1566 int
1567 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1568                         struct rte_eth_rss_conf *rss_conf)
1569 {
1570         struct e1000_hw *hw;
1571         uint32_t mrqc;
1572         uint16_t rss_hf;
1573
1574         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1575
1576         /*
1577          * Before changing anything, first check that the update RSS operation
1578          * does not attempt to disable RSS, if RSS was enabled at
1579          * initialization time, or does not attempt to enable RSS, if RSS was
1580          * disabled at initialization time.
1581          */
1582         rss_hf = rss_conf->rss_hf;
1583         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1584         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1585                 if (rss_hf != 0) /* Enable RSS */
1586                         return -(EINVAL);
1587                 return 0; /* Nothing to do */
1588         }
1589         /* RSS enabled */
1590         if (rss_hf == 0) /* Disable RSS */
1591                 return -(EINVAL);
1592         igb_hw_rss_hash_set(hw, rss_conf);
1593         return 0;
1594 }
1595
1596 static void
1597 igb_rss_configure(struct rte_eth_dev *dev)
1598 {
1599         struct rte_eth_rss_conf rss_conf;
1600         struct e1000_hw *hw;
1601         uint32_t shift;
1602         uint16_t i;
1603
1604         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605
1606         /* Fill in redirection table. */
1607         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1608         for (i = 0; i < 128; i++) {
1609                 union e1000_reta {
1610                         uint32_t dword;
1611                         uint8_t  bytes[4];
1612                 } reta;
1613                 uint8_t q_idx;
1614
1615                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1616                                    i % dev->data->nb_rx_queues : 0);
1617                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1618                 if ((i & 3) == 3)
1619                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1620         }
1621
1622         /*
1623          * Configure the RSS key and the RSS protocols used to compute
1624          * the RSS hash of input packets.
1625          */
1626         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1627         if (rss_conf.rss_hf == 0) {
1628                 igb_rss_disable(dev);
1629                 return;
1630         }
1631         if (rss_conf.rss_key == NULL)
1632                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1633         igb_hw_rss_hash_set(hw, &rss_conf);
1634 }
1635
1636 /*
1637  * Check if the mac type support VMDq or not.
1638  * Return 1 if it supports, otherwise, return 0.
1639  */
1640 static int
1641 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1642 {
1643         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1644         
1645         switch (hw->mac.type) { 
1646         case e1000_82576: 
1647         case e1000_82580: 
1648         case e1000_i350: 
1649                 return 1;
1650         case e1000_82540: 
1651         case e1000_82541: 
1652         case e1000_82542: 
1653         case e1000_82543: 
1654         case e1000_82544: 
1655         case e1000_82545: 
1656         case e1000_82546: 
1657         case e1000_82547: 
1658         case e1000_82571: 
1659         case e1000_82572: 
1660         case e1000_82573: 
1661         case e1000_82574: 
1662         case e1000_82583: 
1663         case e1000_i210: 
1664         case e1000_i211: 
1665         default:
1666                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1667                 return 0;
1668         }
1669 }
1670
1671 static int
1672 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1673 {
1674         struct rte_eth_vmdq_rx_conf *cfg;
1675         struct e1000_hw *hw;
1676         uint32_t mrqc, vt_ctl, vmolr, rctl;
1677         int i;
1678  
1679         PMD_INIT_LOG(DEBUG, ">>");
1680         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1681         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1682
1683         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1684         if (igb_is_vmdq_supported(dev) == 0)
1685                 return -1;
1686
1687         igb_rss_disable(dev);
1688         
1689         /* RCTL: eanble VLAN filter */
1690         rctl = E1000_READ_REG(hw, E1000_RCTL);
1691         rctl |= E1000_RCTL_VFE;
1692         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1693
1694         /* MRQC: enable vmdq */
1695         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1696         mrqc |= E1000_MRQC_ENABLE_VMDQ; 
1697         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1698  
1699         /* VTCTL:  pool selection according to VLAN tag */
1700         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1701         if (cfg->enable_default_pool) 
1702                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1703         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1704         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1705         
1706         /* 
1707          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1708          * Both 82576 and 82580 support it 
1709          */
1710         if (hw->mac.type != e1000_i350) {
1711                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1712                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1713                         vmolr |= E1000_VMOLR_STRVLAN;
1714                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1715                 }
1716         }
1717
1718         /* VFTA - enable all vlan filters */
1719         for (i = 0; i < IGB_VFTA_SIZE; i++) 
1720                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1721         
1722         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1723         if (hw->mac.type != e1000_82580)
1724                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1725  
1726         /*
1727          * RAH/RAL - allow pools to read specific mac addresses
1728          * In this case, all pools should be able to read from mac addr 0
1729          */
1730         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1731         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1732
1733         /* VLVF: set up filters for vlan tags as configured */
1734         for (i = 0; i < cfg->nb_pool_maps; i++) {
1735                 /* set vlan id in VF register and set the valid bit */
1736                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1737                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1738                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1739                         E1000_VLVF_POOLSEL_MASK)));
1740         }
1741
1742         E1000_WRITE_FLUSH(hw);
1743         
1744         return 0;
1745 }
1746
1747
1748 /*********************************************************************
1749  *
1750  *  Enable receive unit.
1751  *
1752  **********************************************************************/
1753
1754 static int
1755 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1756 {
1757         struct igb_rx_entry *rxe = rxq->sw_ring;
1758         uint64_t dma_addr;
1759         unsigned i;
1760
1761         /* Initialize software ring entries. */
1762         for (i = 0; i < rxq->nb_rx_desc; i++) {
1763                 volatile union e1000_adv_rx_desc *rxd;
1764                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1765
1766                 if (mbuf == NULL) {
1767                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1768                                 "queue_id=%hu\n", rxq->queue_id);
1769                         igb_rx_queue_release(rxq);
1770                         return (-ENOMEM);
1771                 }
1772                 dma_addr =
1773                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1774                 rxd = &rxq->rx_ring[i];
1775                 rxd->read.hdr_addr = dma_addr;
1776                 rxd->read.pkt_addr = dma_addr;
1777                 rxe[i].mbuf = mbuf;
1778         }
1779
1780         return 0;
1781 }
1782
1783 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1784 static int
1785 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1786 {
1787         struct e1000_hw *hw =
1788                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789         uint32_t mrqc;
1790  
1791         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1792                 /*
1793                 * SRIOV active scheme
1794                 * FIXME if support RSS together with VMDq & SRIOV
1795                 */
1796                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1797                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1798                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1799                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1800         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) { 
1801                 /*
1802                 * SRIOV inactive scheme
1803                 */
1804                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1805                         case ETH_MQ_RX_RSS:
1806                                 igb_rss_configure(dev);
1807                                 break;
1808                         case ETH_MQ_RX_VMDQ_ONLY:
1809                                 /*Configure general VMDQ only RX parameters*/
1810                                 igb_vmdq_rx_hw_configure(dev); 
1811                                 break;
1812                         case ETH_MQ_RX_NONE:
1813                                 /* if mq_mode is none, disable rss mode.*/
1814                         default: 
1815                                 igb_rss_disable(dev);
1816                                 break;
1817                 }
1818         }
1819  
1820         return 0;
1821 }
1822  
1823 int
1824 eth_igb_rx_init(struct rte_eth_dev *dev)
1825 {
1826         struct e1000_hw     *hw;
1827         struct igb_rx_queue *rxq;
1828         struct rte_pktmbuf_pool_private *mbp_priv;
1829         uint32_t rctl;
1830         uint32_t rxcsum;
1831         uint32_t srrctl;
1832         uint16_t buf_size;
1833         uint16_t rctl_bsize;
1834         uint16_t i;
1835         int ret;
1836
1837         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1838         srrctl = 0;
1839
1840         /*
1841          * Make sure receives are disabled while setting
1842          * up the descriptor ring.
1843          */
1844         rctl = E1000_READ_REG(hw, E1000_RCTL);
1845         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1846
1847         /*
1848          * Configure support of jumbo frames, if any.
1849          */
1850         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1851                 rctl |= E1000_RCTL_LPE;
1852
1853                 /*
1854                  * Set maximum packet length by default, and might be updated
1855                  * together with enabling/disabling dual VLAN.
1856                  */
1857                 E1000_WRITE_REG(hw, E1000_RLPML,
1858                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1859                                                 VLAN_TAG_SIZE);
1860         } else
1861                 rctl &= ~E1000_RCTL_LPE;
1862
1863         /* Configure and enable each RX queue. */
1864         rctl_bsize = 0;
1865         dev->rx_pkt_burst = eth_igb_recv_pkts;
1866         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1867                 uint64_t bus_addr;
1868                 uint32_t rxdctl;
1869
1870                 rxq = dev->data->rx_queues[i];
1871
1872                 /* Allocate buffers for descriptor rings and set up queue */
1873                 ret = igb_alloc_rx_queue_mbufs(rxq);
1874                 if (ret)
1875                         return ret;
1876
1877                 /*
1878                  * Reset crc_len in case it was changed after queue setup by a
1879                  *  call to configure
1880                  */
1881                 rxq->crc_len =
1882                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1883                                                         0 : ETHER_CRC_LEN);
1884
1885                 bus_addr = rxq->rx_ring_phys_addr;
1886                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1887                                 rxq->nb_rx_desc *
1888                                 sizeof(union e1000_adv_rx_desc));
1889                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1890                                 (uint32_t)(bus_addr >> 32));
1891                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1892
1893                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1894
1895                 /*
1896                  * Configure RX buffer size.
1897                  */
1898                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1899                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1900                                        RTE_PKTMBUF_HEADROOM);
1901                 if (buf_size >= 1024) {
1902                         /*
1903                          * Configure the BSIZEPACKET field of the SRRCTL
1904                          * register of the queue.
1905                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1906                          * If this field is equal to 0b, then RCTL.BSIZE
1907                          * determines the RX packet buffer size.
1908                          */
1909                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1910                                    E1000_SRRCTL_BSIZEPKT_MASK);
1911                         buf_size = (uint16_t) ((srrctl &
1912                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1913                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1914
1915                         /* It adds dual VLAN length for supporting dual VLAN */
1916                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1917                                                 2 * VLAN_TAG_SIZE) > buf_size){
1918                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1919                                 dev->data->scattered_rx = 1;
1920                         }
1921                 } else {
1922                         /*
1923                          * Use BSIZE field of the device RCTL register.
1924                          */
1925                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1926                                 rctl_bsize = buf_size;
1927                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1928                         dev->data->scattered_rx = 1;
1929                 }
1930
1931                 /* Set if packets are dropped when no descriptors available */
1932                 if (rxq->drop_en)
1933                         srrctl |= E1000_SRRCTL_DROP_EN;
1934
1935                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1936
1937                 /* Enable this RX queue. */
1938                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1939                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1940                 rxdctl &= 0xFFF00000;
1941                 rxdctl |= (rxq->pthresh & 0x1F);
1942                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1943                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1944                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1945         }
1946
1947         /*
1948          * Setup BSIZE field of RCTL register, if needed.
1949          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1950          * register, since the code above configures the SRRCTL register of
1951          * the RX queue in such a case.
1952          * All configurable sizes are:
1953          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1954          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
1955          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
1956          *  2048: rctl |= E1000_RCTL_SZ_2048;
1957          *  1024: rctl |= E1000_RCTL_SZ_1024;
1958          *   512: rctl |= E1000_RCTL_SZ_512;
1959          *   256: rctl |= E1000_RCTL_SZ_256;
1960          */
1961         if (rctl_bsize > 0) {
1962                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1963                         rctl |= E1000_RCTL_SZ_512;
1964                 else /* 256 <= buf_size < 512 - use 256 */
1965                         rctl |= E1000_RCTL_SZ_256;
1966         }
1967
1968         /*
1969          * Configure RSS if device configured with multiple RX queues.
1970          */
1971         igb_dev_mq_rx_configure(dev);
1972
1973         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
1974         rctl |= E1000_READ_REG(hw, E1000_RCTL);
1975
1976         /*
1977          * Setup the Checksum Register.
1978          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1979          */
1980         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1981         rxcsum |= E1000_RXCSUM_PCSD;
1982
1983         /* Enable both L3/L4 rx checksum offload */
1984         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1985                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
1986         else
1987                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1988         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1989
1990         /* Setup the Receive Control Register. */
1991         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1992                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1993
1994                 /* set STRCRC bit in all queues */
1995                 if (hw->mac.type == e1000_i350 ||
1996                     hw->mac.type == e1000_i210 ||
1997                     hw->mac.type == e1000_i211 ||
1998                     hw->mac.type == e1000_i354) {
1999                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2000                                 rxq = dev->data->rx_queues[i];
2001                                 uint32_t dvmolr = E1000_READ_REG(hw,
2002                                         E1000_DVMOLR(rxq->reg_idx));
2003                                 dvmolr |= E1000_DVMOLR_STRCRC;
2004                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2005                         }
2006                 }
2007         } else {
2008                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2009
2010                 /* clear STRCRC bit in all queues */
2011                 if (hw->mac.type == e1000_i350 ||
2012                     hw->mac.type == e1000_i210 ||
2013                     hw->mac.type == e1000_i211 ||
2014                     hw->mac.type == e1000_i354) {
2015                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2016                                 rxq = dev->data->rx_queues[i];
2017                                 uint32_t dvmolr = E1000_READ_REG(hw,
2018                                         E1000_DVMOLR(rxq->reg_idx));
2019                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2020                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2021                         }
2022                 }
2023         }
2024
2025         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2026         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2027                 E1000_RCTL_RDMTS_HALF |
2028                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2029
2030         /* Make sure VLAN Filters are off. */
2031         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2032                 rctl &= ~E1000_RCTL_VFE;
2033         /* Don't store bad packets. */
2034         rctl &= ~E1000_RCTL_SBP;
2035
2036         /* Enable Receives. */
2037         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2038
2039         /*
2040          * Setup the HW Rx Head and Tail Descriptor Pointers.
2041          * This needs to be done after enable.
2042          */
2043         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2044                 rxq = dev->data->rx_queues[i];
2045                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2046                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2047         }
2048
2049         return 0;
2050 }
2051
2052 /*********************************************************************
2053  *
2054  *  Enable transmit unit.
2055  *
2056  **********************************************************************/
2057 void
2058 eth_igb_tx_init(struct rte_eth_dev *dev)
2059 {
2060         struct e1000_hw     *hw;
2061         struct igb_tx_queue *txq;
2062         uint32_t tctl;
2063         uint32_t txdctl;
2064         uint16_t i;
2065
2066         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2067
2068         /* Setup the Base and Length of the Tx Descriptor Rings. */
2069         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2070                 uint64_t bus_addr;
2071                 txq = dev->data->tx_queues[i];
2072                 bus_addr = txq->tx_ring_phys_addr;
2073
2074                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2075                                 txq->nb_tx_desc *
2076                                 sizeof(union e1000_adv_tx_desc));
2077                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2078                                 (uint32_t)(bus_addr >> 32));
2079                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2080
2081                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2082                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2083                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2084
2085                 /* Setup Transmit threshold registers. */
2086                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2087                 txdctl |= txq->pthresh & 0x1F;
2088                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2089                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2090                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2091                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2092         }
2093
2094         /* Program the Transmit Control Register. */
2095         tctl = E1000_READ_REG(hw, E1000_TCTL);
2096         tctl &= ~E1000_TCTL_CT;
2097         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2098                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2099
2100         e1000_config_collision_dist(hw);
2101
2102         /* This write will effectively turn on the transmit unit. */
2103         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2104 }
2105
2106 /*********************************************************************
2107  *
2108  *  Enable VF receive unit.
2109  *
2110  **********************************************************************/
2111 int
2112 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2113 {
2114         struct e1000_hw     *hw;
2115         struct igb_rx_queue *rxq;
2116         struct rte_pktmbuf_pool_private *mbp_priv;
2117         uint32_t srrctl;
2118         uint16_t buf_size;
2119         uint16_t rctl_bsize;
2120         uint16_t i;
2121         int ret;
2122
2123         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2124
2125         /* setup MTU */
2126         e1000_rlpml_set_vf(hw,
2127                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2128                 VLAN_TAG_SIZE));
2129
2130         /* Configure and enable each RX queue. */
2131         rctl_bsize = 0;
2132         dev->rx_pkt_burst = eth_igb_recv_pkts;
2133         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2134                 uint64_t bus_addr;
2135                 uint32_t rxdctl;
2136
2137                 rxq = dev->data->rx_queues[i];
2138
2139                 /* Allocate buffers for descriptor rings and set up queue */
2140                 ret = igb_alloc_rx_queue_mbufs(rxq);
2141                 if (ret)
2142                         return ret;
2143
2144                 bus_addr = rxq->rx_ring_phys_addr;
2145                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2146                                 rxq->nb_rx_desc *
2147                                 sizeof(union e1000_adv_rx_desc));
2148                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2149                                 (uint32_t)(bus_addr >> 32));
2150                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2151
2152                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2153
2154                 /*
2155                  * Configure RX buffer size.
2156                  */
2157                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2158                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2159                                        RTE_PKTMBUF_HEADROOM);
2160                 if (buf_size >= 1024) {
2161                         /*
2162                          * Configure the BSIZEPACKET field of the SRRCTL
2163                          * register of the queue.
2164                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2165                          * If this field is equal to 0b, then RCTL.BSIZE
2166                          * determines the RX packet buffer size.
2167                          */
2168                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2169                                    E1000_SRRCTL_BSIZEPKT_MASK);
2170                         buf_size = (uint16_t) ((srrctl &
2171                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2172                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2173
2174                         /* It adds dual VLAN length for supporting dual VLAN */
2175                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2176                                                 2 * VLAN_TAG_SIZE) > buf_size){
2177                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2178                                 dev->data->scattered_rx = 1;
2179                         }
2180                 } else {
2181                         /*
2182                          * Use BSIZE field of the device RCTL register.
2183                          */
2184                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2185                                 rctl_bsize = buf_size;
2186                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2187                         dev->data->scattered_rx = 1;
2188                 }
2189
2190                 /* Set if packets are dropped when no descriptors available */
2191                 if (rxq->drop_en)
2192                         srrctl |= E1000_SRRCTL_DROP_EN;
2193
2194                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2195
2196                 /* Enable this RX queue. */
2197                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2198                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2199                 rxdctl &= 0xFFF00000;
2200                 rxdctl |= (rxq->pthresh & 0x1F);
2201                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2202                 if (hw->mac.type == e1000_vfadapt) {
2203                         /* 
2204                          * Workaround of 82576 VF Erratum
2205                          * force set WTHRESH to 1 
2206                          * to avoid Write-Back not triggered sometimes
2207                          */
2208                         rxdctl |= 0x10000;
2209                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2210                 }
2211                 else
2212                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2213                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2214         }
2215
2216         /*
2217          * Setup the HW Rx Head and Tail Descriptor Pointers.
2218          * This needs to be done after enable.
2219          */
2220         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2221                 rxq = dev->data->rx_queues[i];
2222                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2223                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2224         }
2225
2226         return 0;
2227 }
2228
2229 /*********************************************************************
2230  *
2231  *  Enable VF transmit unit.
2232  *
2233  **********************************************************************/
2234 void
2235 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2236 {
2237         struct e1000_hw     *hw;
2238         struct igb_tx_queue *txq;
2239         uint32_t txdctl;
2240         uint16_t i;
2241
2242         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2243
2244         /* Setup the Base and Length of the Tx Descriptor Rings. */
2245         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2246                 uint64_t bus_addr;
2247
2248                 txq = dev->data->tx_queues[i];
2249                 bus_addr = txq->tx_ring_phys_addr;
2250                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2251                                 txq->nb_tx_desc *
2252                                 sizeof(union e1000_adv_tx_desc));
2253                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2254                                 (uint32_t)(bus_addr >> 32));
2255                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2256
2257                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2258                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2259                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2260
2261                 /* Setup Transmit threshold registers. */
2262                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2263                 txdctl |= txq->pthresh & 0x1F;
2264                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2265                 if (hw->mac.type == e1000_82576) {
2266                         /* 
2267                          * Workaround of 82576 VF Erratum
2268                          * force set WTHRESH to 1 
2269                          * to avoid Write-Back not triggered sometimes
2270                          */
2271                         txdctl |= 0x10000; 
2272                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2273                 }
2274                 else
2275                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2276                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2277                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2278         }
2279
2280 }
2281