d4a803edbd125abe49de68b4855398116416e509
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 #define IGB_RSS_OFFLOAD_ALL ( \
77                 ETH_RSS_IPV4 | \
78                 ETH_RSS_IPV4_TCP | \
79                 ETH_RSS_IPV6 | \
80                 ETH_RSS_IPV6_EX | \
81                 ETH_RSS_IPV6_TCP | \
82                 ETH_RSS_IPV6_TCP_EX | \
83                 ETH_RSS_IPV4_UDP | \
84                 ETH_RSS_IPV6_UDP | \
85                 ETH_RSS_IPV6_UDP_EX)
86
87 static inline struct rte_mbuf *
88 rte_rxmbuf_alloc(struct rte_mempool *mp)
89 {
90         struct rte_mbuf *m;
91
92         m = __rte_mbuf_raw_alloc(mp);
93         __rte_mbuf_sanity_check_raw(m, 0);
94         return (m);
95 }
96
97 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
98         (uint64_t) ((mb)->buf_physaddr +                   \
99                         (uint64_t) ((char *)((mb)->data) -     \
100                                 (char *)(mb)->buf_addr))
101
102 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
103         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
104
105 /**
106  * Structure associated with each descriptor of the RX ring of a RX queue.
107  */
108 struct igb_rx_entry {
109         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
110 };
111
112 /**
113  * Structure associated with each descriptor of the TX ring of a TX queue.
114  */
115 struct igb_tx_entry {
116         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
117         uint16_t next_id; /**< Index of next descriptor in ring. */
118         uint16_t last_id; /**< Index of last scattered descriptor. */
119 };
120
121 /**
122  * Structure associated with each RX queue.
123  */
124 struct igb_rx_queue {
125         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
126         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
127         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
128         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
129         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
130         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
131         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
132         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
133         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
134         uint16_t            rx_tail;    /**< current value of RDT register. */
135         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
136         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
137         uint16_t            queue_id;   /**< RX queue index. */
138         uint16_t            reg_idx;    /**< RX queue register index. */
139         uint8_t             port_id;    /**< Device port identifier. */
140         uint8_t             pthresh;    /**< Prefetch threshold register. */
141         uint8_t             hthresh;    /**< Host threshold register. */
142         uint8_t             wthresh;    /**< Write-back threshold register. */
143         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
144         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
145 };
146
147 /**
148  * Hardware context number
149  */
150 enum igb_advctx_num {
151         IGB_CTX_0    = 0, /**< CTX0    */
152         IGB_CTX_1    = 1, /**< CTX1    */
153         IGB_CTX_NUM  = 2, /**< CTX_NUM */
154 };
155
156 /** Offload features */
157 union igb_vlan_macip {
158         uint32_t data;
159         struct {
160                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
161                 uint16_t vlan_tci;
162                 /**< VLAN Tag Control Identifier (CPU order). */
163         } f;
164 };
165
166 /*
167  * Compare mask for vlan_macip_len.data,
168  * should be in sync with igb_vlan_macip.f layout.
169  * */
170 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
171 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
172 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
173 /** MAC+IP  length. */
174 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
175
176 /**
177  * Strucutre to check if new context need be built
178  */
179 struct igb_advctx_info {
180         uint16_t flags;           /**< ol_flags related to context build. */
181         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
182         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
183 };
184
185 /**
186  * Structure associated with each TX queue.
187  */
188 struct igb_tx_queue {
189         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
190         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
191         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
192         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
193         uint32_t               txd_type;      /**< Device-specific TXD type */
194         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
195         uint16_t               tx_tail; /**< Current value of TDT register. */
196         uint16_t               tx_head;
197         /**< Index of first used TX descriptor. */
198         uint16_t               queue_id; /**< TX queue index. */
199         uint16_t               reg_idx;  /**< TX queue register index. */
200         uint8_t                port_id;  /**< Device port identifier. */
201         uint8_t                pthresh;  /**< Prefetch threshold register. */
202         uint8_t                hthresh;  /**< Host threshold register. */
203         uint8_t                wthresh;  /**< Write-back threshold register. */
204         uint32_t               ctx_curr;
205         /**< Current used hardware descriptor. */
206         uint32_t               ctx_start;
207         /**< Start context position for transmit queue. */
208         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
209         /**< Hardware context history.*/
210 };
211
212 #if 1
213 #define RTE_PMD_USE_PREFETCH
214 #endif
215
216 #ifdef RTE_PMD_USE_PREFETCH
217 #define rte_igb_prefetch(p)     rte_prefetch0(p)
218 #else
219 #define rte_igb_prefetch(p)     do {} while(0)
220 #endif
221
222 #ifdef RTE_PMD_PACKET_PREFETCH
223 #define rte_packet_prefetch(p) rte_prefetch1(p)
224 #else
225 #define rte_packet_prefetch(p)  do {} while(0)
226 #endif
227
228 /*
229  * Macro for VMDq feature for 1 GbE NIC.
230  */
231 #define E1000_VMOLR_SIZE                        (8)
232
233 /*********************************************************************
234  *
235  *  TX function
236  *
237  **********************************************************************/
238
239 /*
240  * Advanced context descriptor are almost same between igb/ixgbe
241  * This is a separate function, looking for optimization opportunity here
242  * Rework required to go with the pre-defined values.
243  */
244
245 static inline void
246 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
247                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
248                 uint16_t ol_flags, uint32_t vlan_macip_lens)
249 {
250         uint32_t type_tucmd_mlhl;
251         uint32_t mss_l4len_idx;
252         uint32_t ctx_idx, ctx_curr;
253         uint32_t cmp_mask;
254
255         ctx_curr = txq->ctx_curr;
256         ctx_idx = ctx_curr + txq->ctx_start;
257
258         cmp_mask = 0;
259         type_tucmd_mlhl = 0;
260
261         if (ol_flags & PKT_TX_VLAN_PKT) {
262                 cmp_mask |= TX_VLAN_CMP_MASK;
263         }
264
265         if (ol_flags & PKT_TX_IP_CKSUM) {
266                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
267                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
268         }
269
270         /* Specify which HW CTX to upload. */
271         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
272         switch (ol_flags & PKT_TX_L4_MASK) {
273         case PKT_TX_UDP_CKSUM:
274                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
275                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
276                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
277                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
278                 break;
279         case PKT_TX_TCP_CKSUM:
280                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
281                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
282                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
283                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
284                 break;
285         case PKT_TX_SCTP_CKSUM:
286                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
287                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
289                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
290                 break;
291         default:
292                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
293                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
294                 break;
295         }
296
297         txq->ctx_cache[ctx_curr].flags           = ol_flags;
298         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
299         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
300                 vlan_macip_lens & cmp_mask;
301
302         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
303         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
304         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
305         ctx_txd->seqnum_seed     = 0;
306 }
307
308 /*
309  * Check which hardware context can be used. Use the existing match
310  * or create a new context descriptor.
311  */
312 static inline uint32_t
313 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
314                 uint32_t vlan_macip_lens)
315 {
316         /* If match with the current context */
317         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320                         return txq->ctx_curr;
321         }
322
323         /* If match with the second context */
324         txq->ctx_curr ^= 1;
325         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
326                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
327                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
328                         return txq->ctx_curr;
329         }
330
331         /* Mismatch, use the previous context */
332         return (IGB_CTX_NUM);
333 }
334
335 static inline uint32_t
336 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
337 {
338         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
339         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
340         uint32_t tmp;
341
342         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
343         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
344         return tmp;
345 }
346
347 static inline uint32_t
348 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
349 {
350         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
351         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
352 }
353
354 uint16_t
355 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
356                uint16_t nb_pkts)
357 {
358         struct igb_tx_queue *txq;
359         struct igb_tx_entry *sw_ring;
360         struct igb_tx_entry *txe, *txn;
361         volatile union e1000_adv_tx_desc *txr;
362         volatile union e1000_adv_tx_desc *txd;
363         struct rte_mbuf     *tx_pkt;
364         struct rte_mbuf     *m_seg;
365         union igb_vlan_macip vlan_macip_lens;
366         uint64_t buf_dma_addr;
367         uint32_t olinfo_status;
368         uint32_t cmd_type_len;
369         uint32_t pkt_len;
370         uint16_t slen;
371         uint16_t ol_flags;
372         uint16_t tx_end;
373         uint16_t tx_id;
374         uint16_t tx_last;
375         uint16_t nb_tx;
376         uint16_t tx_ol_req;
377         uint32_t new_ctx = 0;
378         uint32_t ctx = 0;
379
380         txq = tx_queue;
381         sw_ring = txq->sw_ring;
382         txr     = txq->tx_ring;
383         tx_id   = txq->tx_tail;
384         txe = &sw_ring[tx_id];
385
386         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
387                 tx_pkt = *tx_pkts++;
388                 pkt_len = tx_pkt->pkt_len;
389
390                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
391
392                 /*
393                  * The number of descriptors that must be allocated for a
394                  * packet is the number of segments of that packet, plus 1
395                  * Context Descriptor for the VLAN Tag Identifier, if any.
396                  * Determine the last TX descriptor to allocate in the TX ring
397                  * for the packet, starting from the current position (tx_id)
398                  * in the ring.
399                  */
400                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
401
402                 ol_flags = tx_pkt->ol_flags;
403                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
404                 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
405                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
406
407                 /* If a Context Descriptor need be built . */
408                 if (tx_ol_req) {
409                         ctx = what_advctx_update(txq, tx_ol_req,
410                                 vlan_macip_lens.data);
411                         /* Only allocate context descriptor if required*/
412                         new_ctx = (ctx == IGB_CTX_NUM);
413                         ctx = txq->ctx_curr;
414                         tx_last = (uint16_t) (tx_last + new_ctx);
415                 }
416                 if (tx_last >= txq->nb_tx_desc)
417                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
418
419                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
420                            " tx_first=%u tx_last=%u\n",
421                            (unsigned) txq->port_id,
422                            (unsigned) txq->queue_id,
423                            (unsigned) pkt_len,
424                            (unsigned) tx_id,
425                            (unsigned) tx_last);
426
427                 /*
428                  * Check if there are enough free descriptors in the TX ring
429                  * to transmit the next packet.
430                  * This operation is based on the two following rules:
431                  *
432                  *   1- Only check that the last needed TX descriptor can be
433                  *      allocated (by construction, if that descriptor is free,
434                  *      all intermediate ones are also free).
435                  *
436                  *      For this purpose, the index of the last TX descriptor
437                  *      used for a packet (the "last descriptor" of a packet)
438                  *      is recorded in the TX entries (the last one included)
439                  *      that are associated with all TX descriptors allocated
440                  *      for that packet.
441                  *
442                  *   2- Avoid to allocate the last free TX descriptor of the
443                  *      ring, in order to never set the TDT register with the
444                  *      same value stored in parallel by the NIC in the TDH
445                  *      register, which makes the TX engine of the NIC enter
446                  *      in a deadlock situation.
447                  *
448                  *      By extension, avoid to allocate a free descriptor that
449                  *      belongs to the last set of free descriptors allocated
450                  *      to the same packet previously transmitted.
451                  */
452
453                 /*
454                  * The "last descriptor" of the previously sent packet, if any,
455                  * which used the last descriptor to allocate.
456                  */
457                 tx_end = sw_ring[tx_last].last_id;
458
459                 /*
460                  * The next descriptor following that "last descriptor" in the
461                  * ring.
462                  */
463                 tx_end = sw_ring[tx_end].next_id;
464
465                 /*
466                  * The "last descriptor" associated with that next descriptor.
467                  */
468                 tx_end = sw_ring[tx_end].last_id;
469
470                 /*
471                  * Check that this descriptor is free.
472                  */
473                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
474                         if (nb_tx == 0)
475                                 return (0);
476                         goto end_of_tx;
477                 }
478
479                 /*
480                  * Set common flags of all TX Data Descriptors.
481                  *
482                  * The following bits must be set in all Data Descriptors:
483                  *   - E1000_ADVTXD_DTYP_DATA
484                  *   - E1000_ADVTXD_DCMD_DEXT
485                  *
486                  * The following bits must be set in the first Data Descriptor
487                  * and are ignored in the other ones:
488                  *   - E1000_ADVTXD_DCMD_IFCS
489                  *   - E1000_ADVTXD_MAC_1588
490                  *   - E1000_ADVTXD_DCMD_VLE
491                  *
492                  * The following bits must only be set in the last Data
493                  * Descriptor:
494                  *   - E1000_TXD_CMD_EOP
495                  *
496                  * The following bits can be set in any Data Descriptor, but
497                  * are only set in the last Data Descriptor:
498                  *   - E1000_TXD_CMD_RS
499                  */
500                 cmd_type_len = txq->txd_type |
501                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
502                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
503 #if defined(RTE_LIBRTE_IEEE1588)
504                 if (ol_flags & PKT_TX_IEEE1588_TMST)
505                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
506 #endif
507                 if (tx_ol_req) {
508                         /* Setup TX Advanced context descriptor if required */
509                         if (new_ctx) {
510                                 volatile struct e1000_adv_tx_context_desc *
511                                     ctx_txd;
512
513                                 ctx_txd = (volatile struct
514                                     e1000_adv_tx_context_desc *)
515                                     &txr[tx_id];
516
517                                 txn = &sw_ring[txe->next_id];
518                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
519
520                                 if (txe->mbuf != NULL) {
521                                         rte_pktmbuf_free_seg(txe->mbuf);
522                                         txe->mbuf = NULL;
523                                 }
524
525                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
526                                     vlan_macip_lens.data);
527
528                                 txe->last_id = tx_last;
529                                 tx_id = txe->next_id;
530                                 txe = txn;
531                         }
532
533                         /* Setup the TX Advanced Data Descriptor */
534                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
535                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
536                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
537                 }
538
539                 m_seg = tx_pkt;
540                 do {
541                         txn = &sw_ring[txe->next_id];
542                         txd = &txr[tx_id];
543
544                         if (txe->mbuf != NULL)
545                                 rte_pktmbuf_free_seg(txe->mbuf);
546                         txe->mbuf = m_seg;
547
548                         /*
549                          * Set up transmit descriptor.
550                          */
551                         slen = (uint16_t) m_seg->data_len;
552                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
553                         txd->read.buffer_addr =
554                                 rte_cpu_to_le_64(buf_dma_addr);
555                         txd->read.cmd_type_len =
556                                 rte_cpu_to_le_32(cmd_type_len | slen);
557                         txd->read.olinfo_status =
558                                 rte_cpu_to_le_32(olinfo_status);
559                         txe->last_id = tx_last;
560                         tx_id = txe->next_id;
561                         txe = txn;
562                         m_seg = m_seg->next;
563                 } while (m_seg != NULL);
564
565                 /*
566                  * The last packet data descriptor needs End Of Packet (EOP)
567                  * and Report Status (RS).
568                  */
569                 txd->read.cmd_type_len |=
570                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
571         }
572  end_of_tx:
573         rte_wmb();
574
575         /*
576          * Set the Transmit Descriptor Tail (TDT).
577          */
578         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
579         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
580                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
581                    (unsigned) tx_id, (unsigned) nb_tx);
582         txq->tx_tail = tx_id;
583
584         return (nb_tx);
585 }
586
587 /*********************************************************************
588  *
589  *  RX functions
590  *
591  **********************************************************************/
592 static inline uint16_t
593 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
594 {
595         uint16_t pkt_flags;
596
597         static uint16_t ip_pkt_types_map[16] = {
598                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
599                 PKT_RX_IPV6_HDR, 0, 0, 0,
600                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
601                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
602         };
603
604 #if defined(RTE_LIBRTE_IEEE1588)
605         static uint32_t ip_pkt_etqf_map[8] = {
606                 0, 0, 0, PKT_RX_IEEE1588_PTP,
607                 0, 0, 0, 0,
608         };
609
610         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
611                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
612                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
613 #else
614         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
615                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
616 #endif
617         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
618                                                 0 : PKT_RX_RSS_HASH));
619 }
620
621 static inline uint16_t
622 rx_desc_status_to_pkt_flags(uint32_t rx_status)
623 {
624         uint16_t pkt_flags;
625
626         /* Check if VLAN present */
627         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
628                                                 PKT_RX_VLAN_PKT : 0);
629
630 #if defined(RTE_LIBRTE_IEEE1588)
631         if (rx_status & E1000_RXD_STAT_TMST)
632                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
633 #endif
634         return pkt_flags;
635 }
636
637 static inline uint16_t
638 rx_desc_error_to_pkt_flags(uint32_t rx_status)
639 {
640         /*
641          * Bit 30: IPE, IPv4 checksum error
642          * Bit 29: L4I, L4I integrity error
643          */
644
645         static uint16_t error_to_pkt_flags_map[4] = {
646                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
647                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
648         };
649         return error_to_pkt_flags_map[(rx_status >>
650                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
651 }
652
653 uint16_t
654 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
655                uint16_t nb_pkts)
656 {
657         struct igb_rx_queue *rxq;
658         volatile union e1000_adv_rx_desc *rx_ring;
659         volatile union e1000_adv_rx_desc *rxdp;
660         struct igb_rx_entry *sw_ring;
661         struct igb_rx_entry *rxe;
662         struct rte_mbuf *rxm;
663         struct rte_mbuf *nmb;
664         union e1000_adv_rx_desc rxd;
665         uint64_t dma_addr;
666         uint32_t staterr;
667         uint32_t hlen_type_rss;
668         uint16_t pkt_len;
669         uint16_t rx_id;
670         uint16_t nb_rx;
671         uint16_t nb_hold;
672         uint16_t pkt_flags;
673
674         nb_rx = 0;
675         nb_hold = 0;
676         rxq = rx_queue;
677         rx_id = rxq->rx_tail;
678         rx_ring = rxq->rx_ring;
679         sw_ring = rxq->sw_ring;
680         while (nb_rx < nb_pkts) {
681                 /*
682                  * The order of operations here is important as the DD status
683                  * bit must not be read after any other descriptor fields.
684                  * rx_ring and rxdp are pointing to volatile data so the order
685                  * of accesses cannot be reordered by the compiler. If they were
686                  * not volatile, they could be reordered which could lead to
687                  * using invalid descriptor fields when read from rxd.
688                  */
689                 rxdp = &rx_ring[rx_id];
690                 staterr = rxdp->wb.upper.status_error;
691                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
692                         break;
693                 rxd = *rxdp;
694
695                 /*
696                  * End of packet.
697                  *
698                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
699                  * likely to be invalid and to be dropped by the various
700                  * validation checks performed by the network stack.
701                  *
702                  * Allocate a new mbuf to replenish the RX ring descriptor.
703                  * If the allocation fails:
704                  *    - arrange for that RX descriptor to be the first one
705                  *      being parsed the next time the receive function is
706                  *      invoked [on the same queue].
707                  *
708                  *    - Stop parsing the RX ring and return immediately.
709                  *
710                  * This policy do not drop the packet received in the RX
711                  * descriptor for which the allocation of a new mbuf failed.
712                  * Thus, it allows that packet to be later retrieved if
713                  * mbuf have been freed in the mean time.
714                  * As a side effect, holding RX descriptors instead of
715                  * systematically giving them back to the NIC may lead to
716                  * RX ring exhaustion situations.
717                  * However, the NIC can gracefully prevent such situations
718                  * to happen by sending specific "back-pressure" flow control
719                  * frames to its peer(s).
720                  */
721                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
722                            "staterr=0x%x pkt_len=%u\n",
723                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
724                            (unsigned) rx_id, (unsigned) staterr,
725                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
726
727                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
728                 if (nmb == NULL) {
729                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
730                                    "queue_id=%u\n", (unsigned) rxq->port_id,
731                                    (unsigned) rxq->queue_id);
732                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
733                         break;
734                 }
735
736                 nb_hold++;
737                 rxe = &sw_ring[rx_id];
738                 rx_id++;
739                 if (rx_id == rxq->nb_rx_desc)
740                         rx_id = 0;
741
742                 /* Prefetch next mbuf while processing current one. */
743                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
744
745                 /*
746                  * When next RX descriptor is on a cache-line boundary,
747                  * prefetch the next 4 RX descriptors and the next 8 pointers
748                  * to mbufs.
749                  */
750                 if ((rx_id & 0x3) == 0) {
751                         rte_igb_prefetch(&rx_ring[rx_id]);
752                         rte_igb_prefetch(&sw_ring[rx_id]);
753                 }
754
755                 rxm = rxe->mbuf;
756                 rxe->mbuf = nmb;
757                 dma_addr =
758                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
759                 rxdp->read.hdr_addr = dma_addr;
760                 rxdp->read.pkt_addr = dma_addr;
761
762                 /*
763                  * Initialize the returned mbuf.
764                  * 1) setup generic mbuf fields:
765                  *    - number of segments,
766                  *    - next segment,
767                  *    - packet length,
768                  *    - RX port identifier.
769                  * 2) integrate hardware offload data, if any:
770                  *    - RSS flag & hash,
771                  *    - IP checksum flag,
772                  *    - VLAN TCI, if any,
773                  *    - error flags.
774                  */
775                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
776                                       rxq->crc_len);
777                 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
778                 rte_packet_prefetch(rxm->data);
779                 rxm->nb_segs = 1;
780                 rxm->next = NULL;
781                 rxm->pkt_len = pkt_len;
782                 rxm->data_len = pkt_len;
783                 rxm->port = rxq->port_id;
784
785                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
786                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
787                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
788                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
789
790                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
791                 pkt_flags = (uint16_t)(pkt_flags |
792                                 rx_desc_status_to_pkt_flags(staterr));
793                 pkt_flags = (uint16_t)(pkt_flags |
794                                 rx_desc_error_to_pkt_flags(staterr));
795                 rxm->ol_flags = pkt_flags;
796
797                 /*
798                  * Store the mbuf address into the next entry of the array
799                  * of returned packets.
800                  */
801                 rx_pkts[nb_rx++] = rxm;
802         }
803         rxq->rx_tail = rx_id;
804
805         /*
806          * If the number of free RX descriptors is greater than the RX free
807          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
808          * register.
809          * Update the RDT with the value of the last processed RX descriptor
810          * minus 1, to guarantee that the RDT register is never equal to the
811          * RDH register, which creates a "full" ring situtation from the
812          * hardware point of view...
813          */
814         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
815         if (nb_hold > rxq->rx_free_thresh) {
816                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
817                            "nb_hold=%u nb_rx=%u\n",
818                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819                            (unsigned) rx_id, (unsigned) nb_hold,
820                            (unsigned) nb_rx);
821                 rx_id = (uint16_t) ((rx_id == 0) ?
822                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
823                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
824                 nb_hold = 0;
825         }
826         rxq->nb_rx_hold = nb_hold;
827         return (nb_rx);
828 }
829
830 uint16_t
831 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
832                          uint16_t nb_pkts)
833 {
834         struct igb_rx_queue *rxq;
835         volatile union e1000_adv_rx_desc *rx_ring;
836         volatile union e1000_adv_rx_desc *rxdp;
837         struct igb_rx_entry *sw_ring;
838         struct igb_rx_entry *rxe;
839         struct rte_mbuf *first_seg;
840         struct rte_mbuf *last_seg;
841         struct rte_mbuf *rxm;
842         struct rte_mbuf *nmb;
843         union e1000_adv_rx_desc rxd;
844         uint64_t dma; /* Physical address of mbuf data buffer */
845         uint32_t staterr;
846         uint32_t hlen_type_rss;
847         uint16_t rx_id;
848         uint16_t nb_rx;
849         uint16_t nb_hold;
850         uint16_t data_len;
851         uint16_t pkt_flags;
852
853         nb_rx = 0;
854         nb_hold = 0;
855         rxq = rx_queue;
856         rx_id = rxq->rx_tail;
857         rx_ring = rxq->rx_ring;
858         sw_ring = rxq->sw_ring;
859
860         /*
861          * Retrieve RX context of current packet, if any.
862          */
863         first_seg = rxq->pkt_first_seg;
864         last_seg = rxq->pkt_last_seg;
865
866         while (nb_rx < nb_pkts) {
867         next_desc:
868                 /*
869                  * The order of operations here is important as the DD status
870                  * bit must not be read after any other descriptor fields.
871                  * rx_ring and rxdp are pointing to volatile data so the order
872                  * of accesses cannot be reordered by the compiler. If they were
873                  * not volatile, they could be reordered which could lead to
874                  * using invalid descriptor fields when read from rxd.
875                  */
876                 rxdp = &rx_ring[rx_id];
877                 staterr = rxdp->wb.upper.status_error;
878                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
879                         break;
880                 rxd = *rxdp;
881
882                 /*
883                  * Descriptor done.
884                  *
885                  * Allocate a new mbuf to replenish the RX ring descriptor.
886                  * If the allocation fails:
887                  *    - arrange for that RX descriptor to be the first one
888                  *      being parsed the next time the receive function is
889                  *      invoked [on the same queue].
890                  *
891                  *    - Stop parsing the RX ring and return immediately.
892                  *
893                  * This policy does not drop the packet received in the RX
894                  * descriptor for which the allocation of a new mbuf failed.
895                  * Thus, it allows that packet to be later retrieved if
896                  * mbuf have been freed in the mean time.
897                  * As a side effect, holding RX descriptors instead of
898                  * systematically giving them back to the NIC may lead to
899                  * RX ring exhaustion situations.
900                  * However, the NIC can gracefully prevent such situations
901                  * to happen by sending specific "back-pressure" flow control
902                  * frames to its peer(s).
903                  */
904                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
905                            "staterr=0x%x data_len=%u\n",
906                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
907                            (unsigned) rx_id, (unsigned) staterr,
908                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
909
910                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
911                 if (nmb == NULL) {
912                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
913                                    "queue_id=%u\n", (unsigned) rxq->port_id,
914                                    (unsigned) rxq->queue_id);
915                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
916                         break;
917                 }
918
919                 nb_hold++;
920                 rxe = &sw_ring[rx_id];
921                 rx_id++;
922                 if (rx_id == rxq->nb_rx_desc)
923                         rx_id = 0;
924
925                 /* Prefetch next mbuf while processing current one. */
926                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
927
928                 /*
929                  * When next RX descriptor is on a cache-line boundary,
930                  * prefetch the next 4 RX descriptors and the next 8 pointers
931                  * to mbufs.
932                  */
933                 if ((rx_id & 0x3) == 0) {
934                         rte_igb_prefetch(&rx_ring[rx_id]);
935                         rte_igb_prefetch(&sw_ring[rx_id]);
936                 }
937
938                 /*
939                  * Update RX descriptor with the physical address of the new
940                  * data buffer of the new allocated mbuf.
941                  */
942                 rxm = rxe->mbuf;
943                 rxe->mbuf = nmb;
944                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
945                 rxdp->read.pkt_addr = dma;
946                 rxdp->read.hdr_addr = dma;
947
948                 /*
949                  * Set data length & data buffer address of mbuf.
950                  */
951                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
952                 rxm->data_len = data_len;
953                 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
954
955                 /*
956                  * If this is the first buffer of the received packet,
957                  * set the pointer to the first mbuf of the packet and
958                  * initialize its context.
959                  * Otherwise, update the total length and the number of segments
960                  * of the current scattered packet, and update the pointer to
961                  * the last mbuf of the current packet.
962                  */
963                 if (first_seg == NULL) {
964                         first_seg = rxm;
965                         first_seg->pkt_len = data_len;
966                         first_seg->nb_segs = 1;
967                 } else {
968                         first_seg->pkt_len += data_len;
969                         first_seg->nb_segs++;
970                         last_seg->next = rxm;
971                 }
972
973                 /*
974                  * If this is not the last buffer of the received packet,
975                  * update the pointer to the last mbuf of the current scattered
976                  * packet and continue to parse the RX ring.
977                  */
978                 if (! (staterr & E1000_RXD_STAT_EOP)) {
979                         last_seg = rxm;
980                         goto next_desc;
981                 }
982
983                 /*
984                  * This is the last buffer of the received packet.
985                  * If the CRC is not stripped by the hardware:
986                  *   - Subtract the CRC length from the total packet length.
987                  *   - If the last buffer only contains the whole CRC or a part
988                  *     of it, free the mbuf associated to the last buffer.
989                  *     If part of the CRC is also contained in the previous
990                  *     mbuf, subtract the length of that CRC part from the
991                  *     data length of the previous mbuf.
992                  */
993                 rxm->next = NULL;
994                 if (unlikely(rxq->crc_len > 0)) {
995                         first_seg->pkt_len -= ETHER_CRC_LEN;
996                         if (data_len <= ETHER_CRC_LEN) {
997                                 rte_pktmbuf_free_seg(rxm);
998                                 first_seg->nb_segs--;
999                                 last_seg->data_len = (uint16_t)
1000                                         (last_seg->data_len -
1001                                          (ETHER_CRC_LEN - data_len));
1002                                 last_seg->next = NULL;
1003                         } else
1004                                 rxm->data_len =
1005                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1006                 }
1007
1008                 /*
1009                  * Initialize the first mbuf of the returned packet:
1010                  *    - RX port identifier,
1011                  *    - hardware offload data, if any:
1012                  *      - RSS flag & hash,
1013                  *      - IP checksum flag,
1014                  *      - VLAN TCI, if any,
1015                  *      - error flags.
1016                  */
1017                 first_seg->port = rxq->port_id;
1018                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1019
1020                 /*
1021                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1022                  * set in the pkt_flags field.
1023                  */
1024                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1025                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1026                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1027                 pkt_flags = (uint16_t)(pkt_flags |
1028                                 rx_desc_status_to_pkt_flags(staterr));
1029                 pkt_flags = (uint16_t)(pkt_flags |
1030                                 rx_desc_error_to_pkt_flags(staterr));
1031                 first_seg->ol_flags = pkt_flags;
1032
1033                 /* Prefetch data of first segment, if configured to do so. */
1034                 rte_packet_prefetch(first_seg->data);
1035
1036                 /*
1037                  * Store the mbuf address into the next entry of the array
1038                  * of returned packets.
1039                  */
1040                 rx_pkts[nb_rx++] = first_seg;
1041
1042                 /*
1043                  * Setup receipt context for a new packet.
1044                  */
1045                 first_seg = NULL;
1046         }
1047
1048         /*
1049          * Record index of the next RX descriptor to probe.
1050          */
1051         rxq->rx_tail = rx_id;
1052
1053         /*
1054          * Save receive context.
1055          */
1056         rxq->pkt_first_seg = first_seg;
1057         rxq->pkt_last_seg = last_seg;
1058
1059         /*
1060          * If the number of free RX descriptors is greater than the RX free
1061          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1062          * register.
1063          * Update the RDT with the value of the last processed RX descriptor
1064          * minus 1, to guarantee that the RDT register is never equal to the
1065          * RDH register, which creates a "full" ring situtation from the
1066          * hardware point of view...
1067          */
1068         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1069         if (nb_hold > rxq->rx_free_thresh) {
1070                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1071                            "nb_hold=%u nb_rx=%u\n",
1072                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1073                            (unsigned) rx_id, (unsigned) nb_hold,
1074                            (unsigned) nb_rx);
1075                 rx_id = (uint16_t) ((rx_id == 0) ?
1076                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1077                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1078                 nb_hold = 0;
1079         }
1080         rxq->nb_rx_hold = nb_hold;
1081         return (nb_rx);
1082 }
1083
1084 /*
1085  * Rings setup and release.
1086  *
1087  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1088  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1089  * This will also optimize cache line size effect.
1090  * H/W supports up to cache line size 128.
1091  */
1092 #define IGB_ALIGN 128
1093
1094 /*
1095  * Maximum number of Ring Descriptors.
1096  *
1097  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1098  * desscriptors should meet the following condition:
1099  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1100  */
1101 #define IGB_MIN_RING_DESC 32
1102 #define IGB_MAX_RING_DESC 4096
1103
1104 static const struct rte_memzone *
1105 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1106                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1107 {
1108         char z_name[RTE_MEMZONE_NAMESIZE];
1109         const struct rte_memzone *mz;
1110
1111         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1112                         dev->driver->pci_drv.name, ring_name,
1113                                 dev->data->port_id, queue_id);
1114         mz = rte_memzone_lookup(z_name);
1115         if (mz)
1116                 return mz;
1117
1118 #ifdef RTE_LIBRTE_XEN_DOM0
1119         return rte_memzone_reserve_bounded(z_name, ring_size,
1120                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1121 #else
1122         return rte_memzone_reserve_aligned(z_name, ring_size,
1123                         socket_id, 0, IGB_ALIGN);
1124 #endif
1125 }
1126
1127 static void
1128 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1129 {
1130         unsigned i;
1131
1132         if (txq->sw_ring != NULL) {
1133                 for (i = 0; i < txq->nb_tx_desc; i++) {
1134                         if (txq->sw_ring[i].mbuf != NULL) {
1135                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1136                                 txq->sw_ring[i].mbuf = NULL;
1137                         }
1138                 }
1139         }
1140 }
1141
1142 static void
1143 igb_tx_queue_release(struct igb_tx_queue *txq)
1144 {
1145         if (txq != NULL) {
1146                 igb_tx_queue_release_mbufs(txq);
1147                 rte_free(txq->sw_ring);
1148                 rte_free(txq);
1149         }
1150 }
1151
1152 void
1153 eth_igb_tx_queue_release(void *txq)
1154 {
1155         igb_tx_queue_release(txq);
1156 }
1157
1158 static void
1159 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1160 {
1161         txq->tx_head = 0;
1162         txq->tx_tail = 0;
1163         txq->ctx_curr = 0;
1164         memset((void*)&txq->ctx_cache, 0,
1165                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1166 }
1167
1168 static void
1169 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1170 {
1171         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1172                         .buffer_addr = 0}};
1173         struct igb_tx_entry *txe = txq->sw_ring;
1174         uint16_t i, prev;
1175         struct e1000_hw *hw;
1176
1177         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178         /* Zero out HW ring memory */
1179         for (i = 0; i < txq->nb_tx_desc; i++) {
1180                 txq->tx_ring[i] = zeroed_desc;
1181         }
1182
1183         /* Initialize ring entries */
1184         prev = (uint16_t)(txq->nb_tx_desc - 1);
1185         for (i = 0; i < txq->nb_tx_desc; i++) {
1186                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1187
1188                 txd->wb.status = E1000_TXD_STAT_DD;
1189                 txe[i].mbuf = NULL;
1190                 txe[i].last_id = i;
1191                 txe[prev].next_id = i;
1192                 prev = i;
1193         }
1194
1195         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1196         /* 82575 specific, each tx queue will use 2 hw contexts */
1197         if (hw->mac.type == e1000_82575)
1198                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1199
1200         igb_reset_tx_queue_stat(txq);
1201 }
1202
1203 int
1204 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1205                          uint16_t queue_idx,
1206                          uint16_t nb_desc,
1207                          unsigned int socket_id,
1208                          const struct rte_eth_txconf *tx_conf)
1209 {
1210         const struct rte_memzone *tz;
1211         struct igb_tx_queue *txq;
1212         struct e1000_hw     *hw;
1213         uint32_t size;
1214
1215         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216
1217         /*
1218          * Validate number of transmit descriptors.
1219          * It must not exceed hardware maximum, and must be multiple
1220          * of IGB_ALIGN.
1221          */
1222         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1223             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1224                 return -EINVAL;
1225         }
1226
1227         /*
1228          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1229          * driver.
1230          */
1231         if (tx_conf->tx_free_thresh != 0)
1232                 RTE_LOG(WARNING, PMD,
1233                         "The tx_free_thresh parameter is not "
1234                         "used for the 1G driver.\n");
1235         if (tx_conf->tx_rs_thresh != 0)
1236                 RTE_LOG(WARNING, PMD,
1237                         "The tx_rs_thresh parameter is not "
1238                         "used for the 1G driver.\n");
1239         if (tx_conf->tx_thresh.wthresh == 0)
1240                 RTE_LOG(WARNING, PMD,
1241                         "To improve 1G driver performance, consider setting "
1242                         "the TX WTHRESH value to 4, 8, or 16.\n");
1243
1244         /* Free memory prior to re-allocation if needed */
1245         if (dev->data->tx_queues[queue_idx] != NULL) {
1246                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1247                 dev->data->tx_queues[queue_idx] = NULL;
1248         }
1249
1250         /* First allocate the tx queue data structure */
1251         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1252                                                         CACHE_LINE_SIZE);
1253         if (txq == NULL)
1254                 return (-ENOMEM);
1255
1256         /*
1257          * Allocate TX ring hardware descriptors. A memzone large enough to
1258          * handle the maximum ring size is allocated in order to allow for
1259          * resizing in later calls to the queue setup function.
1260          */
1261         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1262         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1263                                         size, socket_id);
1264         if (tz == NULL) {
1265                 igb_tx_queue_release(txq);
1266                 return (-ENOMEM);
1267         }
1268
1269         txq->nb_tx_desc = nb_desc;
1270         txq->pthresh = tx_conf->tx_thresh.pthresh;
1271         txq->hthresh = tx_conf->tx_thresh.hthresh;
1272         txq->wthresh = tx_conf->tx_thresh.wthresh;
1273         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1274                 txq->wthresh = 1;
1275         txq->queue_id = queue_idx;
1276         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1277                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1278         txq->port_id = dev->data->port_id;
1279
1280         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1281 #ifndef RTE_LIBRTE_XEN_DOM0
1282         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1283 #else
1284         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1285 #endif
1286          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1287         /* Allocate software ring */
1288         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1289                                    sizeof(struct igb_tx_entry) * nb_desc,
1290                                    CACHE_LINE_SIZE);
1291         if (txq->sw_ring == NULL) {
1292                 igb_tx_queue_release(txq);
1293                 return (-ENOMEM);
1294         }
1295         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1296                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1297
1298         igb_reset_tx_queue(txq, dev);
1299         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1300         dev->data->tx_queues[queue_idx] = txq;
1301
1302         return (0);
1303 }
1304
1305 static void
1306 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1307 {
1308         unsigned i;
1309
1310         if (rxq->sw_ring != NULL) {
1311                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1312                         if (rxq->sw_ring[i].mbuf != NULL) {
1313                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1314                                 rxq->sw_ring[i].mbuf = NULL;
1315                         }
1316                 }
1317         }
1318 }
1319
1320 static void
1321 igb_rx_queue_release(struct igb_rx_queue *rxq)
1322 {
1323         if (rxq != NULL) {
1324                 igb_rx_queue_release_mbufs(rxq);
1325                 rte_free(rxq->sw_ring);
1326                 rte_free(rxq);
1327         }
1328 }
1329
1330 void
1331 eth_igb_rx_queue_release(void *rxq)
1332 {
1333         igb_rx_queue_release(rxq);
1334 }
1335
1336 static void
1337 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1338 {
1339         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1340                         .pkt_addr = 0}};
1341         unsigned i;
1342
1343         /* Zero out HW ring memory */
1344         for (i = 0; i < rxq->nb_rx_desc; i++) {
1345                 rxq->rx_ring[i] = zeroed_desc;
1346         }
1347
1348         rxq->rx_tail = 0;
1349         rxq->pkt_first_seg = NULL;
1350         rxq->pkt_last_seg = NULL;
1351 }
1352
1353 int
1354 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1355                          uint16_t queue_idx,
1356                          uint16_t nb_desc,
1357                          unsigned int socket_id,
1358                          const struct rte_eth_rxconf *rx_conf,
1359                          struct rte_mempool *mp)
1360 {
1361         const struct rte_memzone *rz;
1362         struct igb_rx_queue *rxq;
1363         struct e1000_hw     *hw;
1364         unsigned int size;
1365
1366         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1367
1368         /*
1369          * Validate number of receive descriptors.
1370          * It must not exceed hardware maximum, and must be multiple
1371          * of IGB_ALIGN.
1372          */
1373         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1374             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1375                 return (-EINVAL);
1376         }
1377
1378         /* Free memory prior to re-allocation if needed */
1379         if (dev->data->rx_queues[queue_idx] != NULL) {
1380                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1381                 dev->data->rx_queues[queue_idx] = NULL;
1382         }
1383
1384         /* First allocate the RX queue data structure. */
1385         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1386                           CACHE_LINE_SIZE);
1387         if (rxq == NULL)
1388                 return (-ENOMEM);
1389         rxq->mb_pool = mp;
1390         rxq->nb_rx_desc = nb_desc;
1391         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1392         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1393         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1394         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1395                 rxq->wthresh = 1;
1396         rxq->drop_en = rx_conf->rx_drop_en;
1397         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1398         rxq->queue_id = queue_idx;
1399         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1400                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1401         rxq->port_id = dev->data->port_id;
1402         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1403                                   ETHER_CRC_LEN);
1404
1405         /*
1406          *  Allocate RX ring hardware descriptors. A memzone large enough to
1407          *  handle the maximum ring size is allocated in order to allow for
1408          *  resizing in later calls to the queue setup function.
1409          */
1410         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1411         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1412         if (rz == NULL) {
1413                 igb_rx_queue_release(rxq);
1414                 return (-ENOMEM);
1415         }
1416         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1417         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1418 #ifndef RTE_LIBRTE_XEN_DOM0
1419         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1420 #else
1421         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1422 #endif
1423         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1424
1425         /* Allocate software ring. */
1426         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1427                                    sizeof(struct igb_rx_entry) * nb_desc,
1428                                    CACHE_LINE_SIZE);
1429         if (rxq->sw_ring == NULL) {
1430                 igb_rx_queue_release(rxq);
1431                 return (-ENOMEM);
1432         }
1433         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1434                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1435
1436         dev->data->rx_queues[queue_idx] = rxq;
1437         igb_reset_rx_queue(rxq);
1438
1439         return 0;
1440 }
1441
1442 uint32_t
1443 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1444 {
1445 #define IGB_RXQ_SCAN_INTERVAL 4
1446         volatile union e1000_adv_rx_desc *rxdp;
1447         struct igb_rx_queue *rxq;
1448         uint32_t desc = 0;
1449
1450         if (rx_queue_id >= dev->data->nb_rx_queues) {
1451                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1452                 return 0;
1453         }
1454
1455         rxq = dev->data->rx_queues[rx_queue_id];
1456         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1457
1458         while ((desc < rxq->nb_rx_desc) &&
1459                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1460                 desc += IGB_RXQ_SCAN_INTERVAL;
1461                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1462                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1463                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1464                                 desc - rxq->nb_rx_desc]);
1465         }
1466
1467         return 0;
1468 }
1469
1470 int
1471 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1472 {
1473         volatile union e1000_adv_rx_desc *rxdp;
1474         struct igb_rx_queue *rxq = rx_queue;
1475         uint32_t desc;
1476
1477         if (unlikely(offset >= rxq->nb_rx_desc))
1478                 return 0;
1479         desc = rxq->rx_tail + offset;
1480         if (desc >= rxq->nb_rx_desc)
1481                 desc -= rxq->nb_rx_desc;
1482
1483         rxdp = &rxq->rx_ring[desc];
1484         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1485 }
1486
1487 void
1488 igb_dev_clear_queues(struct rte_eth_dev *dev)
1489 {
1490         uint16_t i;
1491         struct igb_tx_queue *txq;
1492         struct igb_rx_queue *rxq;
1493
1494         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1495                 txq = dev->data->tx_queues[i];
1496                 if (txq != NULL) {
1497                         igb_tx_queue_release_mbufs(txq);
1498                         igb_reset_tx_queue(txq, dev);
1499                 }
1500         }
1501
1502         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1503                 rxq = dev->data->rx_queues[i];
1504                 if (rxq != NULL) {
1505                         igb_rx_queue_release_mbufs(rxq);
1506                         igb_reset_rx_queue(rxq);
1507                 }
1508         }
1509 }
1510
1511 /**
1512  * Receive Side Scaling (RSS).
1513  * See section 7.1.1.7 in the following document:
1514  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1515  *
1516  * Principles:
1517  * The source and destination IP addresses of the IP header and the source and
1518  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1519  * against a configurable random key to compute a 32-bit RSS hash result.
1520  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1521  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1522  * RSS output index which is used as the RX queue index where to store the
1523  * received packets.
1524  * The following output is supplied in the RX write-back descriptor:
1525  *     - 32-bit result of the Microsoft RSS hash function,
1526  *     - 4-bit RSS type field.
1527  */
1528
1529 /*
1530  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1531  * Used as the default key.
1532  */
1533 static uint8_t rss_intel_key[40] = {
1534         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1535         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1536         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1537         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1538         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1539 };
1540
1541 static void
1542 igb_rss_disable(struct rte_eth_dev *dev)
1543 {
1544         struct e1000_hw *hw;
1545         uint32_t mrqc;
1546
1547         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1549         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1550         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1551 }
1552
1553 static void
1554 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1555 {
1556         uint8_t  *hash_key;
1557         uint32_t rss_key;
1558         uint32_t mrqc;
1559         uint64_t rss_hf;
1560         uint16_t i;
1561
1562         hash_key = rss_conf->rss_key;
1563         if (hash_key != NULL) {
1564                 /* Fill in RSS hash key */
1565                 for (i = 0; i < 10; i++) {
1566                         rss_key  = hash_key[(i * 4)];
1567                         rss_key |= hash_key[(i * 4) + 1] << 8;
1568                         rss_key |= hash_key[(i * 4) + 2] << 16;
1569                         rss_key |= hash_key[(i * 4) + 3] << 24;
1570                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1571                 }
1572         }
1573
1574         /* Set configured hashing protocols in MRQC register */
1575         rss_hf = rss_conf->rss_hf;
1576         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1577         if (rss_hf & ETH_RSS_IPV4)
1578                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1579         if (rss_hf & ETH_RSS_IPV4_TCP)
1580                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1581         if (rss_hf & ETH_RSS_IPV6)
1582                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1583         if (rss_hf & ETH_RSS_IPV6_EX)
1584                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1585         if (rss_hf & ETH_RSS_IPV6_TCP)
1586                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1587         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1588                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1589         if (rss_hf & ETH_RSS_IPV4_UDP)
1590                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1591         if (rss_hf & ETH_RSS_IPV6_UDP)
1592                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1593         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1594                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1595         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1596 }
1597
1598 int
1599 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1600                         struct rte_eth_rss_conf *rss_conf)
1601 {
1602         struct e1000_hw *hw;
1603         uint32_t mrqc;
1604         uint64_t rss_hf;
1605
1606         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1607
1608         /*
1609          * Before changing anything, first check that the update RSS operation
1610          * does not attempt to disable RSS, if RSS was enabled at
1611          * initialization time, or does not attempt to enable RSS, if RSS was
1612          * disabled at initialization time.
1613          */
1614         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1615         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1616         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1617                 if (rss_hf != 0) /* Enable RSS */
1618                         return -(EINVAL);
1619                 return 0; /* Nothing to do */
1620         }
1621         /* RSS enabled */
1622         if (rss_hf == 0) /* Disable RSS */
1623                 return -(EINVAL);
1624         igb_hw_rss_hash_set(hw, rss_conf);
1625         return 0;
1626 }
1627
1628 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1629                               struct rte_eth_rss_conf *rss_conf)
1630 {
1631         struct e1000_hw *hw;
1632         uint8_t *hash_key;
1633         uint32_t rss_key;
1634         uint32_t mrqc;
1635         uint64_t rss_hf;
1636         uint16_t i;
1637
1638         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639         hash_key = rss_conf->rss_key;
1640         if (hash_key != NULL) {
1641                 /* Return RSS hash key */
1642                 for (i = 0; i < 10; i++) {
1643                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1644                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1645                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1646                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1647                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1648                 }
1649         }
1650
1651         /* Get RSS functions configured in MRQC register */
1652         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1653         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1654                 rss_conf->rss_hf = 0;
1655                 return 0;
1656         }
1657         rss_hf = 0;
1658         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1659                 rss_hf |= ETH_RSS_IPV4;
1660         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1661                 rss_hf |= ETH_RSS_IPV4_TCP;
1662         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1663                 rss_hf |= ETH_RSS_IPV6;
1664         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1665                 rss_hf |= ETH_RSS_IPV6_EX;
1666         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1667                 rss_hf |= ETH_RSS_IPV6_TCP;
1668         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1669                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1670         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1671                 rss_hf |= ETH_RSS_IPV4_UDP;
1672         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1673                 rss_hf |= ETH_RSS_IPV6_UDP;
1674         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1675                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1676         rss_conf->rss_hf = rss_hf;
1677         return 0;
1678 }
1679
1680 static void
1681 igb_rss_configure(struct rte_eth_dev *dev)
1682 {
1683         struct rte_eth_rss_conf rss_conf;
1684         struct e1000_hw *hw;
1685         uint32_t shift;
1686         uint16_t i;
1687
1688         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1689
1690         /* Fill in redirection table. */
1691         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1692         for (i = 0; i < 128; i++) {
1693                 union e1000_reta {
1694                         uint32_t dword;
1695                         uint8_t  bytes[4];
1696                 } reta;
1697                 uint8_t q_idx;
1698
1699                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1700                                    i % dev->data->nb_rx_queues : 0);
1701                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1702                 if ((i & 3) == 3)
1703                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1704         }
1705
1706         /*
1707          * Configure the RSS key and the RSS protocols used to compute
1708          * the RSS hash of input packets.
1709          */
1710         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1711         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1712                 igb_rss_disable(dev);
1713                 return;
1714         }
1715         if (rss_conf.rss_key == NULL)
1716                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1717         igb_hw_rss_hash_set(hw, &rss_conf);
1718 }
1719
1720 /*
1721  * Check if the mac type support VMDq or not.
1722  * Return 1 if it supports, otherwise, return 0.
1723  */
1724 static int
1725 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1726 {
1727         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1728
1729         switch (hw->mac.type) {
1730         case e1000_82576:
1731         case e1000_82580:
1732         case e1000_i350:
1733                 return 1;
1734         case e1000_82540:
1735         case e1000_82541:
1736         case e1000_82542:
1737         case e1000_82543:
1738         case e1000_82544:
1739         case e1000_82545:
1740         case e1000_82546:
1741         case e1000_82547:
1742         case e1000_82571:
1743         case e1000_82572:
1744         case e1000_82573:
1745         case e1000_82574:
1746         case e1000_82583:
1747         case e1000_i210:
1748         case e1000_i211:
1749         default:
1750                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1751                 return 0;
1752         }
1753 }
1754
1755 static int
1756 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1757 {
1758         struct rte_eth_vmdq_rx_conf *cfg;
1759         struct e1000_hw *hw;
1760         uint32_t mrqc, vt_ctl, vmolr, rctl;
1761         int i;
1762
1763         PMD_INIT_LOG(DEBUG, ">>");
1764         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1766
1767         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1768         if (igb_is_vmdq_supported(dev) == 0)
1769                 return -1;
1770
1771         igb_rss_disable(dev);
1772
1773         /* RCTL: eanble VLAN filter */
1774         rctl = E1000_READ_REG(hw, E1000_RCTL);
1775         rctl |= E1000_RCTL_VFE;
1776         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1777
1778         /* MRQC: enable vmdq */
1779         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1780         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1781         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1782
1783         /* VTCTL:  pool selection according to VLAN tag */
1784         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1785         if (cfg->enable_default_pool)
1786                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1787         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1788         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1789
1790         /*
1791          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1792          * Both 82576 and 82580 support it
1793          */
1794         if (hw->mac.type != e1000_i350) {
1795                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1796                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1797                         vmolr |= E1000_VMOLR_STRVLAN;
1798                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1799                 }
1800         }
1801
1802         /* VFTA - enable all vlan filters */
1803         for (i = 0; i < IGB_VFTA_SIZE; i++)
1804                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1805
1806         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1807         if (hw->mac.type != e1000_82580)
1808                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1809
1810         /*
1811          * RAH/RAL - allow pools to read specific mac addresses
1812          * In this case, all pools should be able to read from mac addr 0
1813          */
1814         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1815         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1816
1817         /* VLVF: set up filters for vlan tags as configured */
1818         for (i = 0; i < cfg->nb_pool_maps; i++) {
1819                 /* set vlan id in VF register and set the valid bit */
1820                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1821                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1822                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1823                         E1000_VLVF_POOLSEL_MASK)));
1824         }
1825
1826         E1000_WRITE_FLUSH(hw);
1827
1828         return 0;
1829 }
1830
1831
1832 /*********************************************************************
1833  *
1834  *  Enable receive unit.
1835  *
1836  **********************************************************************/
1837
1838 static int
1839 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1840 {
1841         struct igb_rx_entry *rxe = rxq->sw_ring;
1842         uint64_t dma_addr;
1843         unsigned i;
1844
1845         /* Initialize software ring entries. */
1846         for (i = 0; i < rxq->nb_rx_desc; i++) {
1847                 volatile union e1000_adv_rx_desc *rxd;
1848                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1849
1850                 if (mbuf == NULL) {
1851                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1852                                 "queue_id=%hu\n", rxq->queue_id);
1853                         return (-ENOMEM);
1854                 }
1855                 dma_addr =
1856                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1857                 rxd = &rxq->rx_ring[i];
1858                 rxd->read.hdr_addr = dma_addr;
1859                 rxd->read.pkt_addr = dma_addr;
1860                 rxe[i].mbuf = mbuf;
1861         }
1862
1863         return 0;
1864 }
1865
1866 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1867 static int
1868 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1869 {
1870         struct e1000_hw *hw =
1871                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1872         uint32_t mrqc;
1873
1874         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1875                 /*
1876                  * SRIOV active scheme
1877                  * FIXME if support RSS together with VMDq & SRIOV
1878                  */
1879                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1880                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1881                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1882                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1883         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1884                 /*
1885                  * SRIOV inactive scheme
1886                  */
1887                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1888                         case ETH_MQ_RX_RSS:
1889                                 igb_rss_configure(dev);
1890                                 break;
1891                         case ETH_MQ_RX_VMDQ_ONLY:
1892                                 /*Configure general VMDQ only RX parameters*/
1893                                 igb_vmdq_rx_hw_configure(dev);
1894                                 break;
1895                         case ETH_MQ_RX_NONE:
1896                                 /* if mq_mode is none, disable rss mode.*/
1897                         default:
1898                                 igb_rss_disable(dev);
1899                                 break;
1900                 }
1901         }
1902
1903         return 0;
1904 }
1905
1906 int
1907 eth_igb_rx_init(struct rte_eth_dev *dev)
1908 {
1909         struct e1000_hw     *hw;
1910         struct igb_rx_queue *rxq;
1911         struct rte_pktmbuf_pool_private *mbp_priv;
1912         uint32_t rctl;
1913         uint32_t rxcsum;
1914         uint32_t srrctl;
1915         uint16_t buf_size;
1916         uint16_t rctl_bsize;
1917         uint16_t i;
1918         int ret;
1919
1920         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1921         srrctl = 0;
1922
1923         /*
1924          * Make sure receives are disabled while setting
1925          * up the descriptor ring.
1926          */
1927         rctl = E1000_READ_REG(hw, E1000_RCTL);
1928         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1929
1930         /*
1931          * Configure support of jumbo frames, if any.
1932          */
1933         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1934                 rctl |= E1000_RCTL_LPE;
1935
1936                 /*
1937                  * Set maximum packet length by default, and might be updated
1938                  * together with enabling/disabling dual VLAN.
1939                  */
1940                 E1000_WRITE_REG(hw, E1000_RLPML,
1941                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1942                                                 VLAN_TAG_SIZE);
1943         } else
1944                 rctl &= ~E1000_RCTL_LPE;
1945
1946         /* Configure and enable each RX queue. */
1947         rctl_bsize = 0;
1948         dev->rx_pkt_burst = eth_igb_recv_pkts;
1949         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1950                 uint64_t bus_addr;
1951                 uint32_t rxdctl;
1952
1953                 rxq = dev->data->rx_queues[i];
1954
1955                 /* Allocate buffers for descriptor rings and set up queue */
1956                 ret = igb_alloc_rx_queue_mbufs(rxq);
1957                 if (ret)
1958                         return ret;
1959
1960                 /*
1961                  * Reset crc_len in case it was changed after queue setup by a
1962                  *  call to configure
1963                  */
1964                 rxq->crc_len =
1965                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1966                                                         0 : ETHER_CRC_LEN);
1967
1968                 bus_addr = rxq->rx_ring_phys_addr;
1969                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1970                                 rxq->nb_rx_desc *
1971                                 sizeof(union e1000_adv_rx_desc));
1972                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1973                                 (uint32_t)(bus_addr >> 32));
1974                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1975
1976                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1977
1978                 /*
1979                  * Configure RX buffer size.
1980                  */
1981                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1982                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1983                                        RTE_PKTMBUF_HEADROOM);
1984                 if (buf_size >= 1024) {
1985                         /*
1986                          * Configure the BSIZEPACKET field of the SRRCTL
1987                          * register of the queue.
1988                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1989                          * If this field is equal to 0b, then RCTL.BSIZE
1990                          * determines the RX packet buffer size.
1991                          */
1992                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1993                                    E1000_SRRCTL_BSIZEPKT_MASK);
1994                         buf_size = (uint16_t) ((srrctl &
1995                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1996                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1997
1998                         /* It adds dual VLAN length for supporting dual VLAN */
1999                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2000                                                 2 * VLAN_TAG_SIZE) > buf_size){
2001                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2002                                 dev->data->scattered_rx = 1;
2003                         }
2004                 } else {
2005                         /*
2006                          * Use BSIZE field of the device RCTL register.
2007                          */
2008                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2009                                 rctl_bsize = buf_size;
2010                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2011                         dev->data->scattered_rx = 1;
2012                 }
2013
2014                 /* Set if packets are dropped when no descriptors available */
2015                 if (rxq->drop_en)
2016                         srrctl |= E1000_SRRCTL_DROP_EN;
2017
2018                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2019
2020                 /* Enable this RX queue. */
2021                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2022                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2023                 rxdctl &= 0xFFF00000;
2024                 rxdctl |= (rxq->pthresh & 0x1F);
2025                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2026                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2027                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2028         }
2029
2030         if (dev->data->dev_conf.rxmode.enable_scatter) {
2031                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2032                 dev->data->scattered_rx = 1;
2033         }
2034
2035         /*
2036          * Setup BSIZE field of RCTL register, if needed.
2037          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2038          * register, since the code above configures the SRRCTL register of
2039          * the RX queue in such a case.
2040          * All configurable sizes are:
2041          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2042          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2043          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2044          *  2048: rctl |= E1000_RCTL_SZ_2048;
2045          *  1024: rctl |= E1000_RCTL_SZ_1024;
2046          *   512: rctl |= E1000_RCTL_SZ_512;
2047          *   256: rctl |= E1000_RCTL_SZ_256;
2048          */
2049         if (rctl_bsize > 0) {
2050                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2051                         rctl |= E1000_RCTL_SZ_512;
2052                 else /* 256 <= buf_size < 512 - use 256 */
2053                         rctl |= E1000_RCTL_SZ_256;
2054         }
2055
2056         /*
2057          * Configure RSS if device configured with multiple RX queues.
2058          */
2059         igb_dev_mq_rx_configure(dev);
2060
2061         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2062         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2063
2064         /*
2065          * Setup the Checksum Register.
2066          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2067          */
2068         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2069         rxcsum |= E1000_RXCSUM_PCSD;
2070
2071         /* Enable both L3/L4 rx checksum offload */
2072         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2073                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2074         else
2075                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2076         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2077
2078         /* Setup the Receive Control Register. */
2079         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2080                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2081
2082                 /* set STRCRC bit in all queues */
2083                 if (hw->mac.type == e1000_i350 ||
2084                     hw->mac.type == e1000_i210 ||
2085                     hw->mac.type == e1000_i211 ||
2086                     hw->mac.type == e1000_i354) {
2087                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2088                                 rxq = dev->data->rx_queues[i];
2089                                 uint32_t dvmolr = E1000_READ_REG(hw,
2090                                         E1000_DVMOLR(rxq->reg_idx));
2091                                 dvmolr |= E1000_DVMOLR_STRCRC;
2092                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2093                         }
2094                 }
2095         } else {
2096                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2097
2098                 /* clear STRCRC bit in all queues */
2099                 if (hw->mac.type == e1000_i350 ||
2100                     hw->mac.type == e1000_i210 ||
2101                     hw->mac.type == e1000_i211 ||
2102                     hw->mac.type == e1000_i354) {
2103                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2104                                 rxq = dev->data->rx_queues[i];
2105                                 uint32_t dvmolr = E1000_READ_REG(hw,
2106                                         E1000_DVMOLR(rxq->reg_idx));
2107                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2108                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2109                         }
2110                 }
2111         }
2112
2113         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2114         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2115                 E1000_RCTL_RDMTS_HALF |
2116                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2117
2118         /* Make sure VLAN Filters are off. */
2119         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2120                 rctl &= ~E1000_RCTL_VFE;
2121         /* Don't store bad packets. */
2122         rctl &= ~E1000_RCTL_SBP;
2123
2124         /* Enable Receives. */
2125         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2126
2127         /*
2128          * Setup the HW Rx Head and Tail Descriptor Pointers.
2129          * This needs to be done after enable.
2130          */
2131         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2132                 rxq = dev->data->rx_queues[i];
2133                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2134                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2135         }
2136
2137         return 0;
2138 }
2139
2140 /*********************************************************************
2141  *
2142  *  Enable transmit unit.
2143  *
2144  **********************************************************************/
2145 void
2146 eth_igb_tx_init(struct rte_eth_dev *dev)
2147 {
2148         struct e1000_hw     *hw;
2149         struct igb_tx_queue *txq;
2150         uint32_t tctl;
2151         uint32_t txdctl;
2152         uint16_t i;
2153
2154         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155
2156         /* Setup the Base and Length of the Tx Descriptor Rings. */
2157         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2158                 uint64_t bus_addr;
2159                 txq = dev->data->tx_queues[i];
2160                 bus_addr = txq->tx_ring_phys_addr;
2161
2162                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2163                                 txq->nb_tx_desc *
2164                                 sizeof(union e1000_adv_tx_desc));
2165                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2166                                 (uint32_t)(bus_addr >> 32));
2167                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2168
2169                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2170                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2171                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2172
2173                 /* Setup Transmit threshold registers. */
2174                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2175                 txdctl |= txq->pthresh & 0x1F;
2176                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2177                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2178                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2179                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2180         }
2181
2182         /* Program the Transmit Control Register. */
2183         tctl = E1000_READ_REG(hw, E1000_TCTL);
2184         tctl &= ~E1000_TCTL_CT;
2185         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2186                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2187
2188         e1000_config_collision_dist(hw);
2189
2190         /* This write will effectively turn on the transmit unit. */
2191         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2192 }
2193
2194 /*********************************************************************
2195  *
2196  *  Enable VF receive unit.
2197  *
2198  **********************************************************************/
2199 int
2200 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2201 {
2202         struct e1000_hw     *hw;
2203         struct igb_rx_queue *rxq;
2204         struct rte_pktmbuf_pool_private *mbp_priv;
2205         uint32_t srrctl;
2206         uint16_t buf_size;
2207         uint16_t rctl_bsize;
2208         uint16_t i;
2209         int ret;
2210
2211         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2212
2213         /* setup MTU */
2214         e1000_rlpml_set_vf(hw,
2215                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2216                 VLAN_TAG_SIZE));
2217
2218         /* Configure and enable each RX queue. */
2219         rctl_bsize = 0;
2220         dev->rx_pkt_burst = eth_igb_recv_pkts;
2221         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2222                 uint64_t bus_addr;
2223                 uint32_t rxdctl;
2224
2225                 rxq = dev->data->rx_queues[i];
2226
2227                 /* Allocate buffers for descriptor rings and set up queue */
2228                 ret = igb_alloc_rx_queue_mbufs(rxq);
2229                 if (ret)
2230                         return ret;
2231
2232                 bus_addr = rxq->rx_ring_phys_addr;
2233                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2234                                 rxq->nb_rx_desc *
2235                                 sizeof(union e1000_adv_rx_desc));
2236                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2237                                 (uint32_t)(bus_addr >> 32));
2238                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2239
2240                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2241
2242                 /*
2243                  * Configure RX buffer size.
2244                  */
2245                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2246                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2247                                        RTE_PKTMBUF_HEADROOM);
2248                 if (buf_size >= 1024) {
2249                         /*
2250                          * Configure the BSIZEPACKET field of the SRRCTL
2251                          * register of the queue.
2252                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2253                          * If this field is equal to 0b, then RCTL.BSIZE
2254                          * determines the RX packet buffer size.
2255                          */
2256                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2257                                    E1000_SRRCTL_BSIZEPKT_MASK);
2258                         buf_size = (uint16_t) ((srrctl &
2259                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2260                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2261
2262                         /* It adds dual VLAN length for supporting dual VLAN */
2263                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2264                                                 2 * VLAN_TAG_SIZE) > buf_size){
2265                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2266                                 dev->data->scattered_rx = 1;
2267                         }
2268                 } else {
2269                         /*
2270                          * Use BSIZE field of the device RCTL register.
2271                          */
2272                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2273                                 rctl_bsize = buf_size;
2274                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2275                         dev->data->scattered_rx = 1;
2276                 }
2277
2278                 /* Set if packets are dropped when no descriptors available */
2279                 if (rxq->drop_en)
2280                         srrctl |= E1000_SRRCTL_DROP_EN;
2281
2282                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2283
2284                 /* Enable this RX queue. */
2285                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2286                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2287                 rxdctl &= 0xFFF00000;
2288                 rxdctl |= (rxq->pthresh & 0x1F);
2289                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2290                 if (hw->mac.type == e1000_vfadapt) {
2291                         /*
2292                          * Workaround of 82576 VF Erratum
2293                          * force set WTHRESH to 1
2294                          * to avoid Write-Back not triggered sometimes
2295                          */
2296                         rxdctl |= 0x10000;
2297                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2298                 }
2299                 else
2300                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2301                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2302         }
2303
2304         if (dev->data->dev_conf.rxmode.enable_scatter) {
2305                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2306                 dev->data->scattered_rx = 1;
2307         }
2308
2309         /*
2310          * Setup the HW Rx Head and Tail Descriptor Pointers.
2311          * This needs to be done after enable.
2312          */
2313         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2314                 rxq = dev->data->rx_queues[i];
2315                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2316                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2317         }
2318
2319         return 0;
2320 }
2321
2322 /*********************************************************************
2323  *
2324  *  Enable VF transmit unit.
2325  *
2326  **********************************************************************/
2327 void
2328 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2329 {
2330         struct e1000_hw     *hw;
2331         struct igb_tx_queue *txq;
2332         uint32_t txdctl;
2333         uint16_t i;
2334
2335         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2336
2337         /* Setup the Base and Length of the Tx Descriptor Rings. */
2338         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2339                 uint64_t bus_addr;
2340
2341                 txq = dev->data->tx_queues[i];
2342                 bus_addr = txq->tx_ring_phys_addr;
2343                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2344                                 txq->nb_tx_desc *
2345                                 sizeof(union e1000_adv_tx_desc));
2346                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2347                                 (uint32_t)(bus_addr >> 32));
2348                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2349
2350                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2351                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2352                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2353
2354                 /* Setup Transmit threshold registers. */
2355                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2356                 txdctl |= txq->pthresh & 0x1F;
2357                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2358                 if (hw->mac.type == e1000_82576) {
2359                         /*
2360                          * Workaround of 82576 VF Erratum
2361                          * force set WTHRESH to 1
2362                          * to avoid Write-Back not triggered sometimes
2363                          */
2364                         txdctl |= 0x10000;
2365                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2366                 }
2367                 else
2368                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2369                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2370                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2371         }
2372
2373 }
2374