add prefix to cache line macros
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 #define IGB_RSS_OFFLOAD_ALL ( \
77                 ETH_RSS_IPV4 | \
78                 ETH_RSS_IPV4_TCP | \
79                 ETH_RSS_IPV6 | \
80                 ETH_RSS_IPV6_EX | \
81                 ETH_RSS_IPV6_TCP | \
82                 ETH_RSS_IPV6_TCP_EX | \
83                 ETH_RSS_IPV4_UDP | \
84                 ETH_RSS_IPV6_UDP | \
85                 ETH_RSS_IPV6_UDP_EX)
86
87 /* Bit Mask to indicate what bits required for building TX context */
88 #define IGB_TX_OFFLOAD_MASK (                    \
89                 PKT_TX_VLAN_PKT |                \
90                 PKT_TX_IP_CKSUM |                \
91                 PKT_TX_L4_MASK)
92
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
95 {
96         struct rte_mbuf *m;
97
98         m = __rte_mbuf_raw_alloc(mp);
99         __rte_mbuf_sanity_check_raw(m, 0);
100         return (m);
101 }
102
103 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
104         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
105
106 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
107         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
108
109 /**
110  * Structure associated with each descriptor of the RX ring of a RX queue.
111  */
112 struct igb_rx_entry {
113         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
114 };
115
116 /**
117  * Structure associated with each descriptor of the TX ring of a TX queue.
118  */
119 struct igb_tx_entry {
120         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
121         uint16_t next_id; /**< Index of next descriptor in ring. */
122         uint16_t last_id; /**< Index of last scattered descriptor. */
123 };
124
125 /**
126  * Structure associated with each RX queue.
127  */
128 struct igb_rx_queue {
129         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
130         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
131         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
132         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
133         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
134         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
135         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
136         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
137         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
138         uint16_t            rx_tail;    /**< current value of RDT register. */
139         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
140         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
141         uint16_t            queue_id;   /**< RX queue index. */
142         uint16_t            reg_idx;    /**< RX queue register index. */
143         uint8_t             port_id;    /**< Device port identifier. */
144         uint8_t             pthresh;    /**< Prefetch threshold register. */
145         uint8_t             hthresh;    /**< Host threshold register. */
146         uint8_t             wthresh;    /**< Write-back threshold register. */
147         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
148         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
149 };
150
151 /**
152  * Hardware context number
153  */
154 enum igb_advctx_num {
155         IGB_CTX_0    = 0, /**< CTX0    */
156         IGB_CTX_1    = 1, /**< CTX1    */
157         IGB_CTX_NUM  = 2, /**< CTX_NUM */
158 };
159
160 /** Offload features */
161 union igb_vlan_macip {
162         uint32_t data;
163         struct {
164                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
165                 uint16_t vlan_tci;
166                 /**< VLAN Tag Control Identifier (CPU order). */
167         } f;
168 };
169
170 /*
171  * Compare mask for vlan_macip_len.data,
172  * should be in sync with igb_vlan_macip.f layout.
173  * */
174 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
175 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
176 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
177 /** MAC+IP  length. */
178 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
179
180 /**
181  * Strucutre to check if new context need be built
182  */
183 struct igb_advctx_info {
184         uint64_t flags;           /**< ol_flags related to context build. */
185         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
186         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
187 };
188
189 /**
190  * Structure associated with each TX queue.
191  */
192 struct igb_tx_queue {
193         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
194         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
195         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
196         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
197         uint32_t               txd_type;      /**< Device-specific TXD type */
198         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
199         uint16_t               tx_tail; /**< Current value of TDT register. */
200         uint16_t               tx_head;
201         /**< Index of first used TX descriptor. */
202         uint16_t               queue_id; /**< TX queue index. */
203         uint16_t               reg_idx;  /**< TX queue register index. */
204         uint8_t                port_id;  /**< Device port identifier. */
205         uint8_t                pthresh;  /**< Prefetch threshold register. */
206         uint8_t                hthresh;  /**< Host threshold register. */
207         uint8_t                wthresh;  /**< Write-back threshold register. */
208         uint32_t               ctx_curr;
209         /**< Current used hardware descriptor. */
210         uint32_t               ctx_start;
211         /**< Start context position for transmit queue. */
212         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
213         /**< Hardware context history.*/
214 };
215
216 #if 1
217 #define RTE_PMD_USE_PREFETCH
218 #endif
219
220 #ifdef RTE_PMD_USE_PREFETCH
221 #define rte_igb_prefetch(p)     rte_prefetch0(p)
222 #else
223 #define rte_igb_prefetch(p)     do {} while(0)
224 #endif
225
226 #ifdef RTE_PMD_PACKET_PREFETCH
227 #define rte_packet_prefetch(p) rte_prefetch1(p)
228 #else
229 #define rte_packet_prefetch(p)  do {} while(0)
230 #endif
231
232 /*
233  * Macro for VMDq feature for 1 GbE NIC.
234  */
235 #define E1000_VMOLR_SIZE                        (8)
236
237 /*********************************************************************
238  *
239  *  TX function
240  *
241  **********************************************************************/
242
243 /*
244  * Advanced context descriptor are almost same between igb/ixgbe
245  * This is a separate function, looking for optimization opportunity here
246  * Rework required to go with the pre-defined values.
247  */
248
249 static inline void
250 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
251                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
252                 uint64_t ol_flags, uint32_t vlan_macip_lens)
253 {
254         uint32_t type_tucmd_mlhl;
255         uint32_t mss_l4len_idx;
256         uint32_t ctx_idx, ctx_curr;
257         uint32_t cmp_mask;
258
259         ctx_curr = txq->ctx_curr;
260         ctx_idx = ctx_curr + txq->ctx_start;
261
262         cmp_mask = 0;
263         type_tucmd_mlhl = 0;
264
265         if (ol_flags & PKT_TX_VLAN_PKT) {
266                 cmp_mask |= TX_VLAN_CMP_MASK;
267         }
268
269         if (ol_flags & PKT_TX_IP_CKSUM) {
270                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
271                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
272         }
273
274         /* Specify which HW CTX to upload. */
275         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
276         switch (ol_flags & PKT_TX_L4_MASK) {
277         case PKT_TX_UDP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         case PKT_TX_TCP_CKSUM:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
288                 break;
289         case PKT_TX_SCTP_CKSUM:
290                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
293                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
294                 break;
295         default:
296                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
297                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298                 break;
299         }
300
301         txq->ctx_cache[ctx_curr].flags           = ol_flags;
302         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
303         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
304                 vlan_macip_lens & cmp_mask;
305
306         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
307         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
308         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
309         ctx_txd->seqnum_seed     = 0;
310 }
311
312 /*
313  * Check which hardware context can be used. Use the existing match
314  * or create a new context descriptor.
315  */
316 static inline uint32_t
317 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
318                 uint32_t vlan_macip_lens)
319 {
320         /* If match with the current context */
321         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
322                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
323                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
324                         return txq->ctx_curr;
325         }
326
327         /* If match with the second context */
328         txq->ctx_curr ^= 1;
329         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
330                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
331                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
332                         return txq->ctx_curr;
333         }
334
335         /* Mismatch, use the previous context */
336         return (IGB_CTX_NUM);
337 }
338
339 static inline uint32_t
340 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
341 {
342         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
343         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
344         uint32_t tmp;
345
346         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
347         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
348         return tmp;
349 }
350
351 static inline uint32_t
352 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
353 {
354         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
355         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
356 }
357
358 uint16_t
359 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
360                uint16_t nb_pkts)
361 {
362         struct igb_tx_queue *txq;
363         struct igb_tx_entry *sw_ring;
364         struct igb_tx_entry *txe, *txn;
365         volatile union e1000_adv_tx_desc *txr;
366         volatile union e1000_adv_tx_desc *txd;
367         struct rte_mbuf     *tx_pkt;
368         struct rte_mbuf     *m_seg;
369         union igb_vlan_macip vlan_macip_lens;
370         union {
371                 uint16_t u16;
372                 struct {
373                         uint16_t l3_len:9;
374                         uint16_t l2_len:7;
375                 };
376         } l2_l3_len;
377         uint64_t buf_dma_addr;
378         uint32_t olinfo_status;
379         uint32_t cmd_type_len;
380         uint32_t pkt_len;
381         uint16_t slen;
382         uint64_t ol_flags;
383         uint16_t tx_end;
384         uint16_t tx_id;
385         uint16_t tx_last;
386         uint16_t nb_tx;
387         uint64_t tx_ol_req;
388         uint32_t new_ctx = 0;
389         uint32_t ctx = 0;
390
391         txq = tx_queue;
392         sw_ring = txq->sw_ring;
393         txr     = txq->tx_ring;
394         tx_id   = txq->tx_tail;
395         txe = &sw_ring[tx_id];
396
397         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
398                 tx_pkt = *tx_pkts++;
399                 pkt_len = tx_pkt->pkt_len;
400
401                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
402
403                 /*
404                  * The number of descriptors that must be allocated for a
405                  * packet is the number of segments of that packet, plus 1
406                  * Context Descriptor for the VLAN Tag Identifier, if any.
407                  * Determine the last TX descriptor to allocate in the TX ring
408                  * for the packet, starting from the current position (tx_id)
409                  * in the ring.
410                  */
411                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
412
413                 ol_flags = tx_pkt->ol_flags;
414                 l2_l3_len.l2_len = tx_pkt->l2_len;
415                 l2_l3_len.l3_len = tx_pkt->l3_len;
416                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
417                 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
418                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
419
420                 /* If a Context Descriptor need be built . */
421                 if (tx_ol_req) {
422                         ctx = what_advctx_update(txq, tx_ol_req,
423                                 vlan_macip_lens.data);
424                         /* Only allocate context descriptor if required*/
425                         new_ctx = (ctx == IGB_CTX_NUM);
426                         ctx = txq->ctx_curr;
427                         tx_last = (uint16_t) (tx_last + new_ctx);
428                 }
429                 if (tx_last >= txq->nb_tx_desc)
430                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
431
432                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
433                            " tx_first=%u tx_last=%u",
434                            (unsigned) txq->port_id,
435                            (unsigned) txq->queue_id,
436                            (unsigned) pkt_len,
437                            (unsigned) tx_id,
438                            (unsigned) tx_last);
439
440                 /*
441                  * Check if there are enough free descriptors in the TX ring
442                  * to transmit the next packet.
443                  * This operation is based on the two following rules:
444                  *
445                  *   1- Only check that the last needed TX descriptor can be
446                  *      allocated (by construction, if that descriptor is free,
447                  *      all intermediate ones are also free).
448                  *
449                  *      For this purpose, the index of the last TX descriptor
450                  *      used for a packet (the "last descriptor" of a packet)
451                  *      is recorded in the TX entries (the last one included)
452                  *      that are associated with all TX descriptors allocated
453                  *      for that packet.
454                  *
455                  *   2- Avoid to allocate the last free TX descriptor of the
456                  *      ring, in order to never set the TDT register with the
457                  *      same value stored in parallel by the NIC in the TDH
458                  *      register, which makes the TX engine of the NIC enter
459                  *      in a deadlock situation.
460                  *
461                  *      By extension, avoid to allocate a free descriptor that
462                  *      belongs to the last set of free descriptors allocated
463                  *      to the same packet previously transmitted.
464                  */
465
466                 /*
467                  * The "last descriptor" of the previously sent packet, if any,
468                  * which used the last descriptor to allocate.
469                  */
470                 tx_end = sw_ring[tx_last].last_id;
471
472                 /*
473                  * The next descriptor following that "last descriptor" in the
474                  * ring.
475                  */
476                 tx_end = sw_ring[tx_end].next_id;
477
478                 /*
479                  * The "last descriptor" associated with that next descriptor.
480                  */
481                 tx_end = sw_ring[tx_end].last_id;
482
483                 /*
484                  * Check that this descriptor is free.
485                  */
486                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
487                         if (nb_tx == 0)
488                                 return (0);
489                         goto end_of_tx;
490                 }
491
492                 /*
493                  * Set common flags of all TX Data Descriptors.
494                  *
495                  * The following bits must be set in all Data Descriptors:
496                  *   - E1000_ADVTXD_DTYP_DATA
497                  *   - E1000_ADVTXD_DCMD_DEXT
498                  *
499                  * The following bits must be set in the first Data Descriptor
500                  * and are ignored in the other ones:
501                  *   - E1000_ADVTXD_DCMD_IFCS
502                  *   - E1000_ADVTXD_MAC_1588
503                  *   - E1000_ADVTXD_DCMD_VLE
504                  *
505                  * The following bits must only be set in the last Data
506                  * Descriptor:
507                  *   - E1000_TXD_CMD_EOP
508                  *
509                  * The following bits can be set in any Data Descriptor, but
510                  * are only set in the last Data Descriptor:
511                  *   - E1000_TXD_CMD_RS
512                  */
513                 cmd_type_len = txq->txd_type |
514                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
515                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
516 #if defined(RTE_LIBRTE_IEEE1588)
517                 if (ol_flags & PKT_TX_IEEE1588_TMST)
518                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
519 #endif
520                 if (tx_ol_req) {
521                         /* Setup TX Advanced context descriptor if required */
522                         if (new_ctx) {
523                                 volatile struct e1000_adv_tx_context_desc *
524                                     ctx_txd;
525
526                                 ctx_txd = (volatile struct
527                                     e1000_adv_tx_context_desc *)
528                                     &txr[tx_id];
529
530                                 txn = &sw_ring[txe->next_id];
531                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
532
533                                 if (txe->mbuf != NULL) {
534                                         rte_pktmbuf_free_seg(txe->mbuf);
535                                         txe->mbuf = NULL;
536                                 }
537
538                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
539                                     vlan_macip_lens.data);
540
541                                 txe->last_id = tx_last;
542                                 tx_id = txe->next_id;
543                                 txe = txn;
544                         }
545
546                         /* Setup the TX Advanced Data Descriptor */
547                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
548                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
549                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
550                 }
551
552                 m_seg = tx_pkt;
553                 do {
554                         txn = &sw_ring[txe->next_id];
555                         txd = &txr[tx_id];
556
557                         if (txe->mbuf != NULL)
558                                 rte_pktmbuf_free_seg(txe->mbuf);
559                         txe->mbuf = m_seg;
560
561                         /*
562                          * Set up transmit descriptor.
563                          */
564                         slen = (uint16_t) m_seg->data_len;
565                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
566                         txd->read.buffer_addr =
567                                 rte_cpu_to_le_64(buf_dma_addr);
568                         txd->read.cmd_type_len =
569                                 rte_cpu_to_le_32(cmd_type_len | slen);
570                         txd->read.olinfo_status =
571                                 rte_cpu_to_le_32(olinfo_status);
572                         txe->last_id = tx_last;
573                         tx_id = txe->next_id;
574                         txe = txn;
575                         m_seg = m_seg->next;
576                 } while (m_seg != NULL);
577
578                 /*
579                  * The last packet data descriptor needs End Of Packet (EOP)
580                  * and Report Status (RS).
581                  */
582                 txd->read.cmd_type_len |=
583                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
584         }
585  end_of_tx:
586         rte_wmb();
587
588         /*
589          * Set the Transmit Descriptor Tail (TDT).
590          */
591         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
592         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
593                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
594                    (unsigned) tx_id, (unsigned) nb_tx);
595         txq->tx_tail = tx_id;
596
597         return (nb_tx);
598 }
599
600 /*********************************************************************
601  *
602  *  RX functions
603  *
604  **********************************************************************/
605 static inline uint64_t
606 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
607 {
608         uint64_t pkt_flags;
609
610         static uint64_t ip_pkt_types_map[16] = {
611                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
612                 PKT_RX_IPV6_HDR, 0, 0, 0,
613                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
614                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
615         };
616
617 #if defined(RTE_LIBRTE_IEEE1588)
618         static uint32_t ip_pkt_etqf_map[8] = {
619                 0, 0, 0, PKT_RX_IEEE1588_PTP,
620                 0, 0, 0, 0,
621         };
622
623         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
624                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
625                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
626 #else
627         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
628                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
629 #endif
630         return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH);
631 }
632
633 static inline uint64_t
634 rx_desc_status_to_pkt_flags(uint32_t rx_status)
635 {
636         uint64_t pkt_flags;
637
638         /* Check if VLAN present */
639         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
640
641 #if defined(RTE_LIBRTE_IEEE1588)
642         if (rx_status & E1000_RXD_STAT_TMST)
643                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
644 #endif
645         return pkt_flags;
646 }
647
648 static inline uint64_t
649 rx_desc_error_to_pkt_flags(uint32_t rx_status)
650 {
651         /*
652          * Bit 30: IPE, IPv4 checksum error
653          * Bit 29: L4I, L4I integrity error
654          */
655
656         static uint64_t error_to_pkt_flags_map[4] = {
657                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
658                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
659         };
660         return error_to_pkt_flags_map[(rx_status >>
661                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
662 }
663
664 uint16_t
665 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
666                uint16_t nb_pkts)
667 {
668         struct igb_rx_queue *rxq;
669         volatile union e1000_adv_rx_desc *rx_ring;
670         volatile union e1000_adv_rx_desc *rxdp;
671         struct igb_rx_entry *sw_ring;
672         struct igb_rx_entry *rxe;
673         struct rte_mbuf *rxm;
674         struct rte_mbuf *nmb;
675         union e1000_adv_rx_desc rxd;
676         uint64_t dma_addr;
677         uint32_t staterr;
678         uint32_t hlen_type_rss;
679         uint16_t pkt_len;
680         uint16_t rx_id;
681         uint16_t nb_rx;
682         uint16_t nb_hold;
683         uint64_t pkt_flags;
684
685         nb_rx = 0;
686         nb_hold = 0;
687         rxq = rx_queue;
688         rx_id = rxq->rx_tail;
689         rx_ring = rxq->rx_ring;
690         sw_ring = rxq->sw_ring;
691         while (nb_rx < nb_pkts) {
692                 /*
693                  * The order of operations here is important as the DD status
694                  * bit must not be read after any other descriptor fields.
695                  * rx_ring and rxdp are pointing to volatile data so the order
696                  * of accesses cannot be reordered by the compiler. If they were
697                  * not volatile, they could be reordered which could lead to
698                  * using invalid descriptor fields when read from rxd.
699                  */
700                 rxdp = &rx_ring[rx_id];
701                 staterr = rxdp->wb.upper.status_error;
702                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
703                         break;
704                 rxd = *rxdp;
705
706                 /*
707                  * End of packet.
708                  *
709                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
710                  * likely to be invalid and to be dropped by the various
711                  * validation checks performed by the network stack.
712                  *
713                  * Allocate a new mbuf to replenish the RX ring descriptor.
714                  * If the allocation fails:
715                  *    - arrange for that RX descriptor to be the first one
716                  *      being parsed the next time the receive function is
717                  *      invoked [on the same queue].
718                  *
719                  *    - Stop parsing the RX ring and return immediately.
720                  *
721                  * This policy do not drop the packet received in the RX
722                  * descriptor for which the allocation of a new mbuf failed.
723                  * Thus, it allows that packet to be later retrieved if
724                  * mbuf have been freed in the mean time.
725                  * As a side effect, holding RX descriptors instead of
726                  * systematically giving them back to the NIC may lead to
727                  * RX ring exhaustion situations.
728                  * However, the NIC can gracefully prevent such situations
729                  * to happen by sending specific "back-pressure" flow control
730                  * frames to its peer(s).
731                  */
732                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
733                            "staterr=0x%x pkt_len=%u",
734                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
735                            (unsigned) rx_id, (unsigned) staterr,
736                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
737
738                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
739                 if (nmb == NULL) {
740                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
741                                    "queue_id=%u", (unsigned) rxq->port_id,
742                                    (unsigned) rxq->queue_id);
743                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
744                         break;
745                 }
746
747                 nb_hold++;
748                 rxe = &sw_ring[rx_id];
749                 rx_id++;
750                 if (rx_id == rxq->nb_rx_desc)
751                         rx_id = 0;
752
753                 /* Prefetch next mbuf while processing current one. */
754                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
755
756                 /*
757                  * When next RX descriptor is on a cache-line boundary,
758                  * prefetch the next 4 RX descriptors and the next 8 pointers
759                  * to mbufs.
760                  */
761                 if ((rx_id & 0x3) == 0) {
762                         rte_igb_prefetch(&rx_ring[rx_id]);
763                         rte_igb_prefetch(&sw_ring[rx_id]);
764                 }
765
766                 rxm = rxe->mbuf;
767                 rxe->mbuf = nmb;
768                 dma_addr =
769                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
770                 rxdp->read.hdr_addr = dma_addr;
771                 rxdp->read.pkt_addr = dma_addr;
772
773                 /*
774                  * Initialize the returned mbuf.
775                  * 1) setup generic mbuf fields:
776                  *    - number of segments,
777                  *    - next segment,
778                  *    - packet length,
779                  *    - RX port identifier.
780                  * 2) integrate hardware offload data, if any:
781                  *    - RSS flag & hash,
782                  *    - IP checksum flag,
783                  *    - VLAN TCI, if any,
784                  *    - error flags.
785                  */
786                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
787                                       rxq->crc_len);
788                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
789                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
790                 rxm->nb_segs = 1;
791                 rxm->next = NULL;
792                 rxm->pkt_len = pkt_len;
793                 rxm->data_len = pkt_len;
794                 rxm->port = rxq->port_id;
795
796                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
797                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
798                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
799                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
800
801                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
802                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
803                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
804                 rxm->ol_flags = pkt_flags;
805
806                 /*
807                  * Store the mbuf address into the next entry of the array
808                  * of returned packets.
809                  */
810                 rx_pkts[nb_rx++] = rxm;
811         }
812         rxq->rx_tail = rx_id;
813
814         /*
815          * If the number of free RX descriptors is greater than the RX free
816          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
817          * register.
818          * Update the RDT with the value of the last processed RX descriptor
819          * minus 1, to guarantee that the RDT register is never equal to the
820          * RDH register, which creates a "full" ring situtation from the
821          * hardware point of view...
822          */
823         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
824         if (nb_hold > rxq->rx_free_thresh) {
825                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
826                            "nb_hold=%u nb_rx=%u",
827                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
828                            (unsigned) rx_id, (unsigned) nb_hold,
829                            (unsigned) nb_rx);
830                 rx_id = (uint16_t) ((rx_id == 0) ?
831                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
832                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
833                 nb_hold = 0;
834         }
835         rxq->nb_rx_hold = nb_hold;
836         return (nb_rx);
837 }
838
839 uint16_t
840 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
841                          uint16_t nb_pkts)
842 {
843         struct igb_rx_queue *rxq;
844         volatile union e1000_adv_rx_desc *rx_ring;
845         volatile union e1000_adv_rx_desc *rxdp;
846         struct igb_rx_entry *sw_ring;
847         struct igb_rx_entry *rxe;
848         struct rte_mbuf *first_seg;
849         struct rte_mbuf *last_seg;
850         struct rte_mbuf *rxm;
851         struct rte_mbuf *nmb;
852         union e1000_adv_rx_desc rxd;
853         uint64_t dma; /* Physical address of mbuf data buffer */
854         uint32_t staterr;
855         uint32_t hlen_type_rss;
856         uint16_t rx_id;
857         uint16_t nb_rx;
858         uint16_t nb_hold;
859         uint16_t data_len;
860         uint64_t pkt_flags;
861
862         nb_rx = 0;
863         nb_hold = 0;
864         rxq = rx_queue;
865         rx_id = rxq->rx_tail;
866         rx_ring = rxq->rx_ring;
867         sw_ring = rxq->sw_ring;
868
869         /*
870          * Retrieve RX context of current packet, if any.
871          */
872         first_seg = rxq->pkt_first_seg;
873         last_seg = rxq->pkt_last_seg;
874
875         while (nb_rx < nb_pkts) {
876         next_desc:
877                 /*
878                  * The order of operations here is important as the DD status
879                  * bit must not be read after any other descriptor fields.
880                  * rx_ring and rxdp are pointing to volatile data so the order
881                  * of accesses cannot be reordered by the compiler. If they were
882                  * not volatile, they could be reordered which could lead to
883                  * using invalid descriptor fields when read from rxd.
884                  */
885                 rxdp = &rx_ring[rx_id];
886                 staterr = rxdp->wb.upper.status_error;
887                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
888                         break;
889                 rxd = *rxdp;
890
891                 /*
892                  * Descriptor done.
893                  *
894                  * Allocate a new mbuf to replenish the RX ring descriptor.
895                  * If the allocation fails:
896                  *    - arrange for that RX descriptor to be the first one
897                  *      being parsed the next time the receive function is
898                  *      invoked [on the same queue].
899                  *
900                  *    - Stop parsing the RX ring and return immediately.
901                  *
902                  * This policy does not drop the packet received in the RX
903                  * descriptor for which the allocation of a new mbuf failed.
904                  * Thus, it allows that packet to be later retrieved if
905                  * mbuf have been freed in the mean time.
906                  * As a side effect, holding RX descriptors instead of
907                  * systematically giving them back to the NIC may lead to
908                  * RX ring exhaustion situations.
909                  * However, the NIC can gracefully prevent such situations
910                  * to happen by sending specific "back-pressure" flow control
911                  * frames to its peer(s).
912                  */
913                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
914                            "staterr=0x%x data_len=%u",
915                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
916                            (unsigned) rx_id, (unsigned) staterr,
917                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
918
919                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
920                 if (nmb == NULL) {
921                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
922                                    "queue_id=%u", (unsigned) rxq->port_id,
923                                    (unsigned) rxq->queue_id);
924                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
925                         break;
926                 }
927
928                 nb_hold++;
929                 rxe = &sw_ring[rx_id];
930                 rx_id++;
931                 if (rx_id == rxq->nb_rx_desc)
932                         rx_id = 0;
933
934                 /* Prefetch next mbuf while processing current one. */
935                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
936
937                 /*
938                  * When next RX descriptor is on a cache-line boundary,
939                  * prefetch the next 4 RX descriptors and the next 8 pointers
940                  * to mbufs.
941                  */
942                 if ((rx_id & 0x3) == 0) {
943                         rte_igb_prefetch(&rx_ring[rx_id]);
944                         rte_igb_prefetch(&sw_ring[rx_id]);
945                 }
946
947                 /*
948                  * Update RX descriptor with the physical address of the new
949                  * data buffer of the new allocated mbuf.
950                  */
951                 rxm = rxe->mbuf;
952                 rxe->mbuf = nmb;
953                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
954                 rxdp->read.pkt_addr = dma;
955                 rxdp->read.hdr_addr = dma;
956
957                 /*
958                  * Set data length & data buffer address of mbuf.
959                  */
960                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
961                 rxm->data_len = data_len;
962                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
963
964                 /*
965                  * If this is the first buffer of the received packet,
966                  * set the pointer to the first mbuf of the packet and
967                  * initialize its context.
968                  * Otherwise, update the total length and the number of segments
969                  * of the current scattered packet, and update the pointer to
970                  * the last mbuf of the current packet.
971                  */
972                 if (first_seg == NULL) {
973                         first_seg = rxm;
974                         first_seg->pkt_len = data_len;
975                         first_seg->nb_segs = 1;
976                 } else {
977                         first_seg->pkt_len += data_len;
978                         first_seg->nb_segs++;
979                         last_seg->next = rxm;
980                 }
981
982                 /*
983                  * If this is not the last buffer of the received packet,
984                  * update the pointer to the last mbuf of the current scattered
985                  * packet and continue to parse the RX ring.
986                  */
987                 if (! (staterr & E1000_RXD_STAT_EOP)) {
988                         last_seg = rxm;
989                         goto next_desc;
990                 }
991
992                 /*
993                  * This is the last buffer of the received packet.
994                  * If the CRC is not stripped by the hardware:
995                  *   - Subtract the CRC length from the total packet length.
996                  *   - If the last buffer only contains the whole CRC or a part
997                  *     of it, free the mbuf associated to the last buffer.
998                  *     If part of the CRC is also contained in the previous
999                  *     mbuf, subtract the length of that CRC part from the
1000                  *     data length of the previous mbuf.
1001                  */
1002                 rxm->next = NULL;
1003                 if (unlikely(rxq->crc_len > 0)) {
1004                         first_seg->pkt_len -= ETHER_CRC_LEN;
1005                         if (data_len <= ETHER_CRC_LEN) {
1006                                 rte_pktmbuf_free_seg(rxm);
1007                                 first_seg->nb_segs--;
1008                                 last_seg->data_len = (uint16_t)
1009                                         (last_seg->data_len -
1010                                          (ETHER_CRC_LEN - data_len));
1011                                 last_seg->next = NULL;
1012                         } else
1013                                 rxm->data_len =
1014                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1015                 }
1016
1017                 /*
1018                  * Initialize the first mbuf of the returned packet:
1019                  *    - RX port identifier,
1020                  *    - hardware offload data, if any:
1021                  *      - RSS flag & hash,
1022                  *      - IP checksum flag,
1023                  *      - VLAN TCI, if any,
1024                  *      - error flags.
1025                  */
1026                 first_seg->port = rxq->port_id;
1027                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1028
1029                 /*
1030                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1031                  * set in the pkt_flags field.
1032                  */
1033                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1034                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1035                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1036                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1037                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1038                 first_seg->ol_flags = pkt_flags;
1039
1040                 /* Prefetch data of first segment, if configured to do so. */
1041                 rte_packet_prefetch((char *)first_seg->buf_addr +
1042                         first_seg->data_off);
1043
1044                 /*
1045                  * Store the mbuf address into the next entry of the array
1046                  * of returned packets.
1047                  */
1048                 rx_pkts[nb_rx++] = first_seg;
1049
1050                 /*
1051                  * Setup receipt context for a new packet.
1052                  */
1053                 first_seg = NULL;
1054         }
1055
1056         /*
1057          * Record index of the next RX descriptor to probe.
1058          */
1059         rxq->rx_tail = rx_id;
1060
1061         /*
1062          * Save receive context.
1063          */
1064         rxq->pkt_first_seg = first_seg;
1065         rxq->pkt_last_seg = last_seg;
1066
1067         /*
1068          * If the number of free RX descriptors is greater than the RX free
1069          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1070          * register.
1071          * Update the RDT with the value of the last processed RX descriptor
1072          * minus 1, to guarantee that the RDT register is never equal to the
1073          * RDH register, which creates a "full" ring situtation from the
1074          * hardware point of view...
1075          */
1076         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1077         if (nb_hold > rxq->rx_free_thresh) {
1078                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1079                            "nb_hold=%u nb_rx=%u",
1080                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1081                            (unsigned) rx_id, (unsigned) nb_hold,
1082                            (unsigned) nb_rx);
1083                 rx_id = (uint16_t) ((rx_id == 0) ?
1084                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1085                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1086                 nb_hold = 0;
1087         }
1088         rxq->nb_rx_hold = nb_hold;
1089         return (nb_rx);
1090 }
1091
1092 /*
1093  * Rings setup and release.
1094  *
1095  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1096  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1097  * This will also optimize cache line size effect.
1098  * H/W supports up to cache line size 128.
1099  */
1100 #define IGB_ALIGN 128
1101
1102 /*
1103  * Maximum number of Ring Descriptors.
1104  *
1105  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1106  * desscriptors should meet the following condition:
1107  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1108  */
1109 #define IGB_MIN_RING_DESC 32
1110 #define IGB_MAX_RING_DESC 4096
1111
1112 static const struct rte_memzone *
1113 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1114                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1115 {
1116         char z_name[RTE_MEMZONE_NAMESIZE];
1117         const struct rte_memzone *mz;
1118
1119         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1120                         dev->driver->pci_drv.name, ring_name,
1121                                 dev->data->port_id, queue_id);
1122         mz = rte_memzone_lookup(z_name);
1123         if (mz)
1124                 return mz;
1125
1126 #ifdef RTE_LIBRTE_XEN_DOM0
1127         return rte_memzone_reserve_bounded(z_name, ring_size,
1128                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1129 #else
1130         return rte_memzone_reserve_aligned(z_name, ring_size,
1131                         socket_id, 0, IGB_ALIGN);
1132 #endif
1133 }
1134
1135 static void
1136 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1137 {
1138         unsigned i;
1139
1140         if (txq->sw_ring != NULL) {
1141                 for (i = 0; i < txq->nb_tx_desc; i++) {
1142                         if (txq->sw_ring[i].mbuf != NULL) {
1143                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1144                                 txq->sw_ring[i].mbuf = NULL;
1145                         }
1146                 }
1147         }
1148 }
1149
1150 static void
1151 igb_tx_queue_release(struct igb_tx_queue *txq)
1152 {
1153         if (txq != NULL) {
1154                 igb_tx_queue_release_mbufs(txq);
1155                 rte_free(txq->sw_ring);
1156                 rte_free(txq);
1157         }
1158 }
1159
1160 void
1161 eth_igb_tx_queue_release(void *txq)
1162 {
1163         igb_tx_queue_release(txq);
1164 }
1165
1166 static void
1167 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1168 {
1169         txq->tx_head = 0;
1170         txq->tx_tail = 0;
1171         txq->ctx_curr = 0;
1172         memset((void*)&txq->ctx_cache, 0,
1173                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1174 }
1175
1176 static void
1177 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1178 {
1179         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1180                         .buffer_addr = 0}};
1181         struct igb_tx_entry *txe = txq->sw_ring;
1182         uint16_t i, prev;
1183         struct e1000_hw *hw;
1184
1185         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186         /* Zero out HW ring memory */
1187         for (i = 0; i < txq->nb_tx_desc; i++) {
1188                 txq->tx_ring[i] = zeroed_desc;
1189         }
1190
1191         /* Initialize ring entries */
1192         prev = (uint16_t)(txq->nb_tx_desc - 1);
1193         for (i = 0; i < txq->nb_tx_desc; i++) {
1194                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1195
1196                 txd->wb.status = E1000_TXD_STAT_DD;
1197                 txe[i].mbuf = NULL;
1198                 txe[i].last_id = i;
1199                 txe[prev].next_id = i;
1200                 prev = i;
1201         }
1202
1203         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1204         /* 82575 specific, each tx queue will use 2 hw contexts */
1205         if (hw->mac.type == e1000_82575)
1206                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1207
1208         igb_reset_tx_queue_stat(txq);
1209 }
1210
1211 int
1212 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1213                          uint16_t queue_idx,
1214                          uint16_t nb_desc,
1215                          unsigned int socket_id,
1216                          const struct rte_eth_txconf *tx_conf)
1217 {
1218         const struct rte_memzone *tz;
1219         struct igb_tx_queue *txq;
1220         struct e1000_hw     *hw;
1221         uint32_t size;
1222
1223         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224
1225         /*
1226          * Validate number of transmit descriptors.
1227          * It must not exceed hardware maximum, and must be multiple
1228          * of IGB_ALIGN.
1229          */
1230         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1231             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1232                 return -EINVAL;
1233         }
1234
1235         /*
1236          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1237          * driver.
1238          */
1239         if (tx_conf->tx_free_thresh != 0)
1240                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1241                              "used for the 1G driver.");
1242         if (tx_conf->tx_rs_thresh != 0)
1243                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1244                              "used for the 1G driver.");
1245         if (tx_conf->tx_thresh.wthresh == 0)
1246                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1247                              "consider setting the TX WTHRESH value to 4, 8, "
1248                              "or 16.");
1249
1250         /* Free memory prior to re-allocation if needed */
1251         if (dev->data->tx_queues[queue_idx] != NULL) {
1252                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1253                 dev->data->tx_queues[queue_idx] = NULL;
1254         }
1255
1256         /* First allocate the tx queue data structure */
1257         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1258                                                         RTE_CACHE_LINE_SIZE);
1259         if (txq == NULL)
1260                 return (-ENOMEM);
1261
1262         /*
1263          * Allocate TX ring hardware descriptors. A memzone large enough to
1264          * handle the maximum ring size is allocated in order to allow for
1265          * resizing in later calls to the queue setup function.
1266          */
1267         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1268         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1269                                         size, socket_id);
1270         if (tz == NULL) {
1271                 igb_tx_queue_release(txq);
1272                 return (-ENOMEM);
1273         }
1274
1275         txq->nb_tx_desc = nb_desc;
1276         txq->pthresh = tx_conf->tx_thresh.pthresh;
1277         txq->hthresh = tx_conf->tx_thresh.hthresh;
1278         txq->wthresh = tx_conf->tx_thresh.wthresh;
1279         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1280                 txq->wthresh = 1;
1281         txq->queue_id = queue_idx;
1282         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1283                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1284         txq->port_id = dev->data->port_id;
1285
1286         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1287 #ifndef RTE_LIBRTE_XEN_DOM0
1288         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1289 #else
1290         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1291 #endif
1292          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1293         /* Allocate software ring */
1294         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1295                                    sizeof(struct igb_tx_entry) * nb_desc,
1296                                    RTE_CACHE_LINE_SIZE);
1297         if (txq->sw_ring == NULL) {
1298                 igb_tx_queue_release(txq);
1299                 return (-ENOMEM);
1300         }
1301         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1302                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1303
1304         igb_reset_tx_queue(txq, dev);
1305         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1306         dev->data->tx_queues[queue_idx] = txq;
1307
1308         return (0);
1309 }
1310
1311 static void
1312 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1313 {
1314         unsigned i;
1315
1316         if (rxq->sw_ring != NULL) {
1317                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1318                         if (rxq->sw_ring[i].mbuf != NULL) {
1319                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1320                                 rxq->sw_ring[i].mbuf = NULL;
1321                         }
1322                 }
1323         }
1324 }
1325
1326 static void
1327 igb_rx_queue_release(struct igb_rx_queue *rxq)
1328 {
1329         if (rxq != NULL) {
1330                 igb_rx_queue_release_mbufs(rxq);
1331                 rte_free(rxq->sw_ring);
1332                 rte_free(rxq);
1333         }
1334 }
1335
1336 void
1337 eth_igb_rx_queue_release(void *rxq)
1338 {
1339         igb_rx_queue_release(rxq);
1340 }
1341
1342 static void
1343 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1344 {
1345         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1346                         .pkt_addr = 0}};
1347         unsigned i;
1348
1349         /* Zero out HW ring memory */
1350         for (i = 0; i < rxq->nb_rx_desc; i++) {
1351                 rxq->rx_ring[i] = zeroed_desc;
1352         }
1353
1354         rxq->rx_tail = 0;
1355         rxq->pkt_first_seg = NULL;
1356         rxq->pkt_last_seg = NULL;
1357 }
1358
1359 int
1360 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1361                          uint16_t queue_idx,
1362                          uint16_t nb_desc,
1363                          unsigned int socket_id,
1364                          const struct rte_eth_rxconf *rx_conf,
1365                          struct rte_mempool *mp)
1366 {
1367         const struct rte_memzone *rz;
1368         struct igb_rx_queue *rxq;
1369         struct e1000_hw     *hw;
1370         unsigned int size;
1371
1372         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373
1374         /*
1375          * Validate number of receive descriptors.
1376          * It must not exceed hardware maximum, and must be multiple
1377          * of IGB_ALIGN.
1378          */
1379         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1380             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1381                 return (-EINVAL);
1382         }
1383
1384         /* Free memory prior to re-allocation if needed */
1385         if (dev->data->rx_queues[queue_idx] != NULL) {
1386                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1387                 dev->data->rx_queues[queue_idx] = NULL;
1388         }
1389
1390         /* First allocate the RX queue data structure. */
1391         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1392                           RTE_CACHE_LINE_SIZE);
1393         if (rxq == NULL)
1394                 return (-ENOMEM);
1395         rxq->mb_pool = mp;
1396         rxq->nb_rx_desc = nb_desc;
1397         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1398         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1399         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1400         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1401                 rxq->wthresh = 1;
1402         rxq->drop_en = rx_conf->rx_drop_en;
1403         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1404         rxq->queue_id = queue_idx;
1405         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1406                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1407         rxq->port_id = dev->data->port_id;
1408         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1409                                   ETHER_CRC_LEN);
1410
1411         /*
1412          *  Allocate RX ring hardware descriptors. A memzone large enough to
1413          *  handle the maximum ring size is allocated in order to allow for
1414          *  resizing in later calls to the queue setup function.
1415          */
1416         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1417         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1418         if (rz == NULL) {
1419                 igb_rx_queue_release(rxq);
1420                 return (-ENOMEM);
1421         }
1422         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1423         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1424 #ifndef RTE_LIBRTE_XEN_DOM0
1425         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1426 #else
1427         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1428 #endif
1429         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1430
1431         /* Allocate software ring. */
1432         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1433                                    sizeof(struct igb_rx_entry) * nb_desc,
1434                                    RTE_CACHE_LINE_SIZE);
1435         if (rxq->sw_ring == NULL) {
1436                 igb_rx_queue_release(rxq);
1437                 return (-ENOMEM);
1438         }
1439         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1440                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1441
1442         dev->data->rx_queues[queue_idx] = rxq;
1443         igb_reset_rx_queue(rxq);
1444
1445         return 0;
1446 }
1447
1448 uint32_t
1449 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1450 {
1451 #define IGB_RXQ_SCAN_INTERVAL 4
1452         volatile union e1000_adv_rx_desc *rxdp;
1453         struct igb_rx_queue *rxq;
1454         uint32_t desc = 0;
1455
1456         if (rx_queue_id >= dev->data->nb_rx_queues) {
1457                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1458                 return 0;
1459         }
1460
1461         rxq = dev->data->rx_queues[rx_queue_id];
1462         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1463
1464         while ((desc < rxq->nb_rx_desc) &&
1465                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1466                 desc += IGB_RXQ_SCAN_INTERVAL;
1467                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1468                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1469                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1470                                 desc - rxq->nb_rx_desc]);
1471         }
1472
1473         return 0;
1474 }
1475
1476 int
1477 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1478 {
1479         volatile union e1000_adv_rx_desc *rxdp;
1480         struct igb_rx_queue *rxq = rx_queue;
1481         uint32_t desc;
1482
1483         if (unlikely(offset >= rxq->nb_rx_desc))
1484                 return 0;
1485         desc = rxq->rx_tail + offset;
1486         if (desc >= rxq->nb_rx_desc)
1487                 desc -= rxq->nb_rx_desc;
1488
1489         rxdp = &rxq->rx_ring[desc];
1490         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1491 }
1492
1493 void
1494 igb_dev_clear_queues(struct rte_eth_dev *dev)
1495 {
1496         uint16_t i;
1497         struct igb_tx_queue *txq;
1498         struct igb_rx_queue *rxq;
1499
1500         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1501                 txq = dev->data->tx_queues[i];
1502                 if (txq != NULL) {
1503                         igb_tx_queue_release_mbufs(txq);
1504                         igb_reset_tx_queue(txq, dev);
1505                 }
1506         }
1507
1508         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1509                 rxq = dev->data->rx_queues[i];
1510                 if (rxq != NULL) {
1511                         igb_rx_queue_release_mbufs(rxq);
1512                         igb_reset_rx_queue(rxq);
1513                 }
1514         }
1515 }
1516
1517 /**
1518  * Receive Side Scaling (RSS).
1519  * See section 7.1.1.7 in the following document:
1520  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1521  *
1522  * Principles:
1523  * The source and destination IP addresses of the IP header and the source and
1524  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1525  * against a configurable random key to compute a 32-bit RSS hash result.
1526  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1527  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1528  * RSS output index which is used as the RX queue index where to store the
1529  * received packets.
1530  * The following output is supplied in the RX write-back descriptor:
1531  *     - 32-bit result of the Microsoft RSS hash function,
1532  *     - 4-bit RSS type field.
1533  */
1534
1535 /*
1536  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1537  * Used as the default key.
1538  */
1539 static uint8_t rss_intel_key[40] = {
1540         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1541         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1542         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1543         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1544         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1545 };
1546
1547 static void
1548 igb_rss_disable(struct rte_eth_dev *dev)
1549 {
1550         struct e1000_hw *hw;
1551         uint32_t mrqc;
1552
1553         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1555         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1556         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1557 }
1558
1559 static void
1560 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1561 {
1562         uint8_t  *hash_key;
1563         uint32_t rss_key;
1564         uint32_t mrqc;
1565         uint64_t rss_hf;
1566         uint16_t i;
1567
1568         hash_key = rss_conf->rss_key;
1569         if (hash_key != NULL) {
1570                 /* Fill in RSS hash key */
1571                 for (i = 0; i < 10; i++) {
1572                         rss_key  = hash_key[(i * 4)];
1573                         rss_key |= hash_key[(i * 4) + 1] << 8;
1574                         rss_key |= hash_key[(i * 4) + 2] << 16;
1575                         rss_key |= hash_key[(i * 4) + 3] << 24;
1576                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1577                 }
1578         }
1579
1580         /* Set configured hashing protocols in MRQC register */
1581         rss_hf = rss_conf->rss_hf;
1582         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1583         if (rss_hf & ETH_RSS_IPV4)
1584                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1585         if (rss_hf & ETH_RSS_IPV4_TCP)
1586                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1587         if (rss_hf & ETH_RSS_IPV6)
1588                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1589         if (rss_hf & ETH_RSS_IPV6_EX)
1590                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1591         if (rss_hf & ETH_RSS_IPV6_TCP)
1592                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1593         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1594                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1595         if (rss_hf & ETH_RSS_IPV4_UDP)
1596                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1597         if (rss_hf & ETH_RSS_IPV6_UDP)
1598                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1599         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1600                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1601         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1602 }
1603
1604 int
1605 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1606                         struct rte_eth_rss_conf *rss_conf)
1607 {
1608         struct e1000_hw *hw;
1609         uint32_t mrqc;
1610         uint64_t rss_hf;
1611
1612         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1613
1614         /*
1615          * Before changing anything, first check that the update RSS operation
1616          * does not attempt to disable RSS, if RSS was enabled at
1617          * initialization time, or does not attempt to enable RSS, if RSS was
1618          * disabled at initialization time.
1619          */
1620         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1621         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1622         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1623                 if (rss_hf != 0) /* Enable RSS */
1624                         return -(EINVAL);
1625                 return 0; /* Nothing to do */
1626         }
1627         /* RSS enabled */
1628         if (rss_hf == 0) /* Disable RSS */
1629                 return -(EINVAL);
1630         igb_hw_rss_hash_set(hw, rss_conf);
1631         return 0;
1632 }
1633
1634 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1635                               struct rte_eth_rss_conf *rss_conf)
1636 {
1637         struct e1000_hw *hw;
1638         uint8_t *hash_key;
1639         uint32_t rss_key;
1640         uint32_t mrqc;
1641         uint64_t rss_hf;
1642         uint16_t i;
1643
1644         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1645         hash_key = rss_conf->rss_key;
1646         if (hash_key != NULL) {
1647                 /* Return RSS hash key */
1648                 for (i = 0; i < 10; i++) {
1649                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1650                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1651                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1652                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1653                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1654                 }
1655         }
1656
1657         /* Get RSS functions configured in MRQC register */
1658         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1659         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1660                 rss_conf->rss_hf = 0;
1661                 return 0;
1662         }
1663         rss_hf = 0;
1664         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1665                 rss_hf |= ETH_RSS_IPV4;
1666         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1667                 rss_hf |= ETH_RSS_IPV4_TCP;
1668         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1669                 rss_hf |= ETH_RSS_IPV6;
1670         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1671                 rss_hf |= ETH_RSS_IPV6_EX;
1672         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1673                 rss_hf |= ETH_RSS_IPV6_TCP;
1674         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1675                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1676         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1677                 rss_hf |= ETH_RSS_IPV4_UDP;
1678         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1679                 rss_hf |= ETH_RSS_IPV6_UDP;
1680         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1681                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1682         rss_conf->rss_hf = rss_hf;
1683         return 0;
1684 }
1685
1686 static void
1687 igb_rss_configure(struct rte_eth_dev *dev)
1688 {
1689         struct rte_eth_rss_conf rss_conf;
1690         struct e1000_hw *hw;
1691         uint32_t shift;
1692         uint16_t i;
1693
1694         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1695
1696         /* Fill in redirection table. */
1697         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1698         for (i = 0; i < 128; i++) {
1699                 union e1000_reta {
1700                         uint32_t dword;
1701                         uint8_t  bytes[4];
1702                 } reta;
1703                 uint8_t q_idx;
1704
1705                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1706                                    i % dev->data->nb_rx_queues : 0);
1707                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1708                 if ((i & 3) == 3)
1709                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1710         }
1711
1712         /*
1713          * Configure the RSS key and the RSS protocols used to compute
1714          * the RSS hash of input packets.
1715          */
1716         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1717         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1718                 igb_rss_disable(dev);
1719                 return;
1720         }
1721         if (rss_conf.rss_key == NULL)
1722                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1723         igb_hw_rss_hash_set(hw, &rss_conf);
1724 }
1725
1726 /*
1727  * Check if the mac type support VMDq or not.
1728  * Return 1 if it supports, otherwise, return 0.
1729  */
1730 static int
1731 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1732 {
1733         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1734
1735         switch (hw->mac.type) {
1736         case e1000_82576:
1737         case e1000_82580:
1738         case e1000_i350:
1739                 return 1;
1740         case e1000_82540:
1741         case e1000_82541:
1742         case e1000_82542:
1743         case e1000_82543:
1744         case e1000_82544:
1745         case e1000_82545:
1746         case e1000_82546:
1747         case e1000_82547:
1748         case e1000_82571:
1749         case e1000_82572:
1750         case e1000_82573:
1751         case e1000_82574:
1752         case e1000_82583:
1753         case e1000_i210:
1754         case e1000_i211:
1755         default:
1756                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1757                 return 0;
1758         }
1759 }
1760
1761 static int
1762 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1763 {
1764         struct rte_eth_vmdq_rx_conf *cfg;
1765         struct e1000_hw *hw;
1766         uint32_t mrqc, vt_ctl, vmolr, rctl;
1767         int i;
1768
1769         PMD_INIT_FUNC_TRACE();
1770
1771         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1773
1774         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1775         if (igb_is_vmdq_supported(dev) == 0)
1776                 return -1;
1777
1778         igb_rss_disable(dev);
1779
1780         /* RCTL: eanble VLAN filter */
1781         rctl = E1000_READ_REG(hw, E1000_RCTL);
1782         rctl |= E1000_RCTL_VFE;
1783         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1784
1785         /* MRQC: enable vmdq */
1786         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1787         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1788         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1789
1790         /* VTCTL:  pool selection according to VLAN tag */
1791         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1792         if (cfg->enable_default_pool)
1793                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1794         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1795         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1796
1797         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1798                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1799                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1800                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1801                         E1000_VMOLR_MPME);
1802
1803                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1804                         vmolr |= E1000_VMOLR_AUPE;
1805                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1806                         vmolr |= E1000_VMOLR_ROMPE;
1807                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1808                         vmolr |= E1000_VMOLR_ROPE;
1809                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1810                         vmolr |= E1000_VMOLR_BAM;
1811                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1812                         vmolr |= E1000_VMOLR_MPME;
1813
1814                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1815         }
1816
1817         /*
1818          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1819          * Both 82576 and 82580 support it
1820          */
1821         if (hw->mac.type != e1000_i350) {
1822                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1823                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1824                         vmolr |= E1000_VMOLR_STRVLAN;
1825                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1826                 }
1827         }
1828
1829         /* VFTA - enable all vlan filters */
1830         for (i = 0; i < IGB_VFTA_SIZE; i++)
1831                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1832
1833         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1834         if (hw->mac.type != e1000_82580)
1835                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1836
1837         /*
1838          * RAH/RAL - allow pools to read specific mac addresses
1839          * In this case, all pools should be able to read from mac addr 0
1840          */
1841         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1842         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1843
1844         /* VLVF: set up filters for vlan tags as configured */
1845         for (i = 0; i < cfg->nb_pool_maps; i++) {
1846                 /* set vlan id in VF register and set the valid bit */
1847                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1848                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1849                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1850                         E1000_VLVF_POOLSEL_MASK)));
1851         }
1852
1853         E1000_WRITE_FLUSH(hw);
1854
1855         return 0;
1856 }
1857
1858
1859 /*********************************************************************
1860  *
1861  *  Enable receive unit.
1862  *
1863  **********************************************************************/
1864
1865 static int
1866 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1867 {
1868         struct igb_rx_entry *rxe = rxq->sw_ring;
1869         uint64_t dma_addr;
1870         unsigned i;
1871
1872         /* Initialize software ring entries. */
1873         for (i = 0; i < rxq->nb_rx_desc; i++) {
1874                 volatile union e1000_adv_rx_desc *rxd;
1875                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1876
1877                 if (mbuf == NULL) {
1878                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1879                                      "queue_id=%hu", rxq->queue_id);
1880                         return (-ENOMEM);
1881                 }
1882                 dma_addr =
1883                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1884                 rxd = &rxq->rx_ring[i];
1885                 rxd->read.hdr_addr = dma_addr;
1886                 rxd->read.pkt_addr = dma_addr;
1887                 rxe[i].mbuf = mbuf;
1888         }
1889
1890         return 0;
1891 }
1892
1893 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1894 static int
1895 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1896 {
1897         struct e1000_hw *hw =
1898                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         uint32_t mrqc;
1900
1901         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1902                 /*
1903                  * SRIOV active scheme
1904                  * FIXME if support RSS together with VMDq & SRIOV
1905                  */
1906                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1907                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1908                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1909                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1910         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1911                 /*
1912                  * SRIOV inactive scheme
1913                  */
1914                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1915                         case ETH_MQ_RX_RSS:
1916                                 igb_rss_configure(dev);
1917                                 break;
1918                         case ETH_MQ_RX_VMDQ_ONLY:
1919                                 /*Configure general VMDQ only RX parameters*/
1920                                 igb_vmdq_rx_hw_configure(dev);
1921                                 break;
1922                         case ETH_MQ_RX_NONE:
1923                                 /* if mq_mode is none, disable rss mode.*/
1924                         default:
1925                                 igb_rss_disable(dev);
1926                                 break;
1927                 }
1928         }
1929
1930         return 0;
1931 }
1932
1933 int
1934 eth_igb_rx_init(struct rte_eth_dev *dev)
1935 {
1936         struct e1000_hw     *hw;
1937         struct igb_rx_queue *rxq;
1938         struct rte_pktmbuf_pool_private *mbp_priv;
1939         uint32_t rctl;
1940         uint32_t rxcsum;
1941         uint32_t srrctl;
1942         uint16_t buf_size;
1943         uint16_t rctl_bsize;
1944         uint16_t i;
1945         int ret;
1946
1947         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1948         srrctl = 0;
1949
1950         /*
1951          * Make sure receives are disabled while setting
1952          * up the descriptor ring.
1953          */
1954         rctl = E1000_READ_REG(hw, E1000_RCTL);
1955         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1956
1957         /*
1958          * Configure support of jumbo frames, if any.
1959          */
1960         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1961                 rctl |= E1000_RCTL_LPE;
1962
1963                 /*
1964                  * Set maximum packet length by default, and might be updated
1965                  * together with enabling/disabling dual VLAN.
1966                  */
1967                 E1000_WRITE_REG(hw, E1000_RLPML,
1968                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1969                                                 VLAN_TAG_SIZE);
1970         } else
1971                 rctl &= ~E1000_RCTL_LPE;
1972
1973         /* Configure and enable each RX queue. */
1974         rctl_bsize = 0;
1975         dev->rx_pkt_burst = eth_igb_recv_pkts;
1976         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1977                 uint64_t bus_addr;
1978                 uint32_t rxdctl;
1979
1980                 rxq = dev->data->rx_queues[i];
1981
1982                 /* Allocate buffers for descriptor rings and set up queue */
1983                 ret = igb_alloc_rx_queue_mbufs(rxq);
1984                 if (ret)
1985                         return ret;
1986
1987                 /*
1988                  * Reset crc_len in case it was changed after queue setup by a
1989                  *  call to configure
1990                  */
1991                 rxq->crc_len =
1992                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1993                                                         0 : ETHER_CRC_LEN);
1994
1995                 bus_addr = rxq->rx_ring_phys_addr;
1996                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1997                                 rxq->nb_rx_desc *
1998                                 sizeof(union e1000_adv_rx_desc));
1999                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2000                                 (uint32_t)(bus_addr >> 32));
2001                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2002
2003                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2004
2005                 /*
2006                  * Configure RX buffer size.
2007                  */
2008                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2009                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2010                                        RTE_PKTMBUF_HEADROOM);
2011                 if (buf_size >= 1024) {
2012                         /*
2013                          * Configure the BSIZEPACKET field of the SRRCTL
2014                          * register of the queue.
2015                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2016                          * If this field is equal to 0b, then RCTL.BSIZE
2017                          * determines the RX packet buffer size.
2018                          */
2019                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2020                                    E1000_SRRCTL_BSIZEPKT_MASK);
2021                         buf_size = (uint16_t) ((srrctl &
2022                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2023                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2024
2025                         /* It adds dual VLAN length for supporting dual VLAN */
2026                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2027                                                 2 * VLAN_TAG_SIZE) > buf_size){
2028                                 if (!dev->data->scattered_rx)
2029                                         PMD_INIT_LOG(DEBUG,
2030                                                      "forcing scatter mode");
2031                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2032                                 dev->data->scattered_rx = 1;
2033                         }
2034                 } else {
2035                         /*
2036                          * Use BSIZE field of the device RCTL register.
2037                          */
2038                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2039                                 rctl_bsize = buf_size;
2040                         if (!dev->data->scattered_rx)
2041                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2042                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2043                         dev->data->scattered_rx = 1;
2044                 }
2045
2046                 /* Set if packets are dropped when no descriptors available */
2047                 if (rxq->drop_en)
2048                         srrctl |= E1000_SRRCTL_DROP_EN;
2049
2050                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2051
2052                 /* Enable this RX queue. */
2053                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2054                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2055                 rxdctl &= 0xFFF00000;
2056                 rxdctl |= (rxq->pthresh & 0x1F);
2057                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2058                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2059                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2060         }
2061
2062         if (dev->data->dev_conf.rxmode.enable_scatter) {
2063                 if (!dev->data->scattered_rx)
2064                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2065                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2066                 dev->data->scattered_rx = 1;
2067         }
2068
2069         /*
2070          * Setup BSIZE field of RCTL register, if needed.
2071          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2072          * register, since the code above configures the SRRCTL register of
2073          * the RX queue in such a case.
2074          * All configurable sizes are:
2075          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2076          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2077          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2078          *  2048: rctl |= E1000_RCTL_SZ_2048;
2079          *  1024: rctl |= E1000_RCTL_SZ_1024;
2080          *   512: rctl |= E1000_RCTL_SZ_512;
2081          *   256: rctl |= E1000_RCTL_SZ_256;
2082          */
2083         if (rctl_bsize > 0) {
2084                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2085                         rctl |= E1000_RCTL_SZ_512;
2086                 else /* 256 <= buf_size < 512 - use 256 */
2087                         rctl |= E1000_RCTL_SZ_256;
2088         }
2089
2090         /*
2091          * Configure RSS if device configured with multiple RX queues.
2092          */
2093         igb_dev_mq_rx_configure(dev);
2094
2095         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2096         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2097
2098         /*
2099          * Setup the Checksum Register.
2100          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2101          */
2102         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2103         rxcsum |= E1000_RXCSUM_PCSD;
2104
2105         /* Enable both L3/L4 rx checksum offload */
2106         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2107                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2108         else
2109                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2110         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2111
2112         /* Setup the Receive Control Register. */
2113         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2114                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2115
2116                 /* set STRCRC bit in all queues */
2117                 if (hw->mac.type == e1000_i350 ||
2118                     hw->mac.type == e1000_i210 ||
2119                     hw->mac.type == e1000_i211 ||
2120                     hw->mac.type == e1000_i354) {
2121                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2122                                 rxq = dev->data->rx_queues[i];
2123                                 uint32_t dvmolr = E1000_READ_REG(hw,
2124                                         E1000_DVMOLR(rxq->reg_idx));
2125                                 dvmolr |= E1000_DVMOLR_STRCRC;
2126                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2127                         }
2128                 }
2129         } else {
2130                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2131
2132                 /* clear STRCRC bit in all queues */
2133                 if (hw->mac.type == e1000_i350 ||
2134                     hw->mac.type == e1000_i210 ||
2135                     hw->mac.type == e1000_i211 ||
2136                     hw->mac.type == e1000_i354) {
2137                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2138                                 rxq = dev->data->rx_queues[i];
2139                                 uint32_t dvmolr = E1000_READ_REG(hw,
2140                                         E1000_DVMOLR(rxq->reg_idx));
2141                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2142                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2143                         }
2144                 }
2145         }
2146
2147         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2148         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2149                 E1000_RCTL_RDMTS_HALF |
2150                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2151
2152         /* Make sure VLAN Filters are off. */
2153         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2154                 rctl &= ~E1000_RCTL_VFE;
2155         /* Don't store bad packets. */
2156         rctl &= ~E1000_RCTL_SBP;
2157
2158         /* Enable Receives. */
2159         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2160
2161         /*
2162          * Setup the HW Rx Head and Tail Descriptor Pointers.
2163          * This needs to be done after enable.
2164          */
2165         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2166                 rxq = dev->data->rx_queues[i];
2167                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2168                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2169         }
2170
2171         return 0;
2172 }
2173
2174 /*********************************************************************
2175  *
2176  *  Enable transmit unit.
2177  *
2178  **********************************************************************/
2179 void
2180 eth_igb_tx_init(struct rte_eth_dev *dev)
2181 {
2182         struct e1000_hw     *hw;
2183         struct igb_tx_queue *txq;
2184         uint32_t tctl;
2185         uint32_t txdctl;
2186         uint16_t i;
2187
2188         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2189
2190         /* Setup the Base and Length of the Tx Descriptor Rings. */
2191         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2192                 uint64_t bus_addr;
2193                 txq = dev->data->tx_queues[i];
2194                 bus_addr = txq->tx_ring_phys_addr;
2195
2196                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2197                                 txq->nb_tx_desc *
2198                                 sizeof(union e1000_adv_tx_desc));
2199                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2200                                 (uint32_t)(bus_addr >> 32));
2201                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2202
2203                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2204                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2205                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2206
2207                 /* Setup Transmit threshold registers. */
2208                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2209                 txdctl |= txq->pthresh & 0x1F;
2210                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2211                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2212                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2213                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2214         }
2215
2216         /* Program the Transmit Control Register. */
2217         tctl = E1000_READ_REG(hw, E1000_TCTL);
2218         tctl &= ~E1000_TCTL_CT;
2219         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2220                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2221
2222         e1000_config_collision_dist(hw);
2223
2224         /* This write will effectively turn on the transmit unit. */
2225         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2226 }
2227
2228 /*********************************************************************
2229  *
2230  *  Enable VF receive unit.
2231  *
2232  **********************************************************************/
2233 int
2234 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2235 {
2236         struct e1000_hw     *hw;
2237         struct igb_rx_queue *rxq;
2238         struct rte_pktmbuf_pool_private *mbp_priv;
2239         uint32_t srrctl;
2240         uint16_t buf_size;
2241         uint16_t rctl_bsize;
2242         uint16_t i;
2243         int ret;
2244
2245         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2246
2247         /* setup MTU */
2248         e1000_rlpml_set_vf(hw,
2249                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2250                 VLAN_TAG_SIZE));
2251
2252         /* Configure and enable each RX queue. */
2253         rctl_bsize = 0;
2254         dev->rx_pkt_burst = eth_igb_recv_pkts;
2255         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2256                 uint64_t bus_addr;
2257                 uint32_t rxdctl;
2258
2259                 rxq = dev->data->rx_queues[i];
2260
2261                 /* Allocate buffers for descriptor rings and set up queue */
2262                 ret = igb_alloc_rx_queue_mbufs(rxq);
2263                 if (ret)
2264                         return ret;
2265
2266                 bus_addr = rxq->rx_ring_phys_addr;
2267                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2268                                 rxq->nb_rx_desc *
2269                                 sizeof(union e1000_adv_rx_desc));
2270                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2271                                 (uint32_t)(bus_addr >> 32));
2272                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2273
2274                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2275
2276                 /*
2277                  * Configure RX buffer size.
2278                  */
2279                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2280                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2281                                        RTE_PKTMBUF_HEADROOM);
2282                 if (buf_size >= 1024) {
2283                         /*
2284                          * Configure the BSIZEPACKET field of the SRRCTL
2285                          * register of the queue.
2286                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2287                          * If this field is equal to 0b, then RCTL.BSIZE
2288                          * determines the RX packet buffer size.
2289                          */
2290                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2291                                    E1000_SRRCTL_BSIZEPKT_MASK);
2292                         buf_size = (uint16_t) ((srrctl &
2293                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2294                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2295
2296                         /* It adds dual VLAN length for supporting dual VLAN */
2297                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2298                                                 2 * VLAN_TAG_SIZE) > buf_size){
2299                                 if (!dev->data->scattered_rx)
2300                                         PMD_INIT_LOG(DEBUG,
2301                                                      "forcing scatter mode");
2302                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2303                                 dev->data->scattered_rx = 1;
2304                         }
2305                 } else {
2306                         /*
2307                          * Use BSIZE field of the device RCTL register.
2308                          */
2309                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2310                                 rctl_bsize = buf_size;
2311                         if (!dev->data->scattered_rx)
2312                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2313                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2314                         dev->data->scattered_rx = 1;
2315                 }
2316
2317                 /* Set if packets are dropped when no descriptors available */
2318                 if (rxq->drop_en)
2319                         srrctl |= E1000_SRRCTL_DROP_EN;
2320
2321                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2322
2323                 /* Enable this RX queue. */
2324                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2325                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2326                 rxdctl &= 0xFFF00000;
2327                 rxdctl |= (rxq->pthresh & 0x1F);
2328                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2329                 if (hw->mac.type == e1000_vfadapt) {
2330                         /*
2331                          * Workaround of 82576 VF Erratum
2332                          * force set WTHRESH to 1
2333                          * to avoid Write-Back not triggered sometimes
2334                          */
2335                         rxdctl |= 0x10000;
2336                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2337                 }
2338                 else
2339                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2340                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2341         }
2342
2343         if (dev->data->dev_conf.rxmode.enable_scatter) {
2344                 if (!dev->data->scattered_rx)
2345                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2346                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2347                 dev->data->scattered_rx = 1;
2348         }
2349
2350         /*
2351          * Setup the HW Rx Head and Tail Descriptor Pointers.
2352          * This needs to be done after enable.
2353          */
2354         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2355                 rxq = dev->data->rx_queues[i];
2356                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2357                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2358         }
2359
2360         return 0;
2361 }
2362
2363 /*********************************************************************
2364  *
2365  *  Enable VF transmit unit.
2366  *
2367  **********************************************************************/
2368 void
2369 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2370 {
2371         struct e1000_hw     *hw;
2372         struct igb_tx_queue *txq;
2373         uint32_t txdctl;
2374         uint16_t i;
2375
2376         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2377
2378         /* Setup the Base and Length of the Tx Descriptor Rings. */
2379         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2380                 uint64_t bus_addr;
2381
2382                 txq = dev->data->tx_queues[i];
2383                 bus_addr = txq->tx_ring_phys_addr;
2384                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2385                                 txq->nb_tx_desc *
2386                                 sizeof(union e1000_adv_tx_desc));
2387                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2388                                 (uint32_t)(bus_addr >> 32));
2389                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2390
2391                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2392                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2393                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2394
2395                 /* Setup Transmit threshold registers. */
2396                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2397                 txdctl |= txq->pthresh & 0x1F;
2398                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2399                 if (hw->mac.type == e1000_82576) {
2400                         /*
2401                          * Workaround of 82576 VF Erratum
2402                          * force set WTHRESH to 1
2403                          * to avoid Write-Back not triggered sometimes
2404                          */
2405                         txdctl |= 0x10000;
2406                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2407                 }
2408                 else
2409                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2410                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2411                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2412         }
2413
2414 }
2415