mbuf: replace data pointer by an offset
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 #define IGB_RSS_OFFLOAD_ALL ( \
77                 ETH_RSS_IPV4 | \
78                 ETH_RSS_IPV4_TCP | \
79                 ETH_RSS_IPV6 | \
80                 ETH_RSS_IPV6_EX | \
81                 ETH_RSS_IPV6_TCP | \
82                 ETH_RSS_IPV6_TCP_EX | \
83                 ETH_RSS_IPV4_UDP | \
84                 ETH_RSS_IPV6_UDP | \
85                 ETH_RSS_IPV6_UDP_EX)
86
87 static inline struct rte_mbuf *
88 rte_rxmbuf_alloc(struct rte_mempool *mp)
89 {
90         struct rte_mbuf *m;
91
92         m = __rte_mbuf_raw_alloc(mp);
93         __rte_mbuf_sanity_check_raw(m, 0);
94         return (m);
95 }
96
97 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
98         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
99
100 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
101         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
102
103 /**
104  * Structure associated with each descriptor of the RX ring of a RX queue.
105  */
106 struct igb_rx_entry {
107         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
108 };
109
110 /**
111  * Structure associated with each descriptor of the TX ring of a TX queue.
112  */
113 struct igb_tx_entry {
114         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
115         uint16_t next_id; /**< Index of next descriptor in ring. */
116         uint16_t last_id; /**< Index of last scattered descriptor. */
117 };
118
119 /**
120  * Structure associated with each RX queue.
121  */
122 struct igb_rx_queue {
123         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
124         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
125         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
126         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
127         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
128         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
129         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
130         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
131         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
132         uint16_t            rx_tail;    /**< current value of RDT register. */
133         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
134         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
135         uint16_t            queue_id;   /**< RX queue index. */
136         uint16_t            reg_idx;    /**< RX queue register index. */
137         uint8_t             port_id;    /**< Device port identifier. */
138         uint8_t             pthresh;    /**< Prefetch threshold register. */
139         uint8_t             hthresh;    /**< Host threshold register. */
140         uint8_t             wthresh;    /**< Write-back threshold register. */
141         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
142         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
143 };
144
145 /**
146  * Hardware context number
147  */
148 enum igb_advctx_num {
149         IGB_CTX_0    = 0, /**< CTX0    */
150         IGB_CTX_1    = 1, /**< CTX1    */
151         IGB_CTX_NUM  = 2, /**< CTX_NUM */
152 };
153
154 /** Offload features */
155 union igb_vlan_macip {
156         uint32_t data;
157         struct {
158                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
159                 uint16_t vlan_tci;
160                 /**< VLAN Tag Control Identifier (CPU order). */
161         } f;
162 };
163
164 /*
165  * Compare mask for vlan_macip_len.data,
166  * should be in sync with igb_vlan_macip.f layout.
167  * */
168 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
169 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
170 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
171 /** MAC+IP  length. */
172 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
173
174 /**
175  * Strucutre to check if new context need be built
176  */
177 struct igb_advctx_info {
178         uint16_t flags;           /**< ol_flags related to context build. */
179         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
180         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
181 };
182
183 /**
184  * Structure associated with each TX queue.
185  */
186 struct igb_tx_queue {
187         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
188         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
189         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
190         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
191         uint32_t               txd_type;      /**< Device-specific TXD type */
192         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
193         uint16_t               tx_tail; /**< Current value of TDT register. */
194         uint16_t               tx_head;
195         /**< Index of first used TX descriptor. */
196         uint16_t               queue_id; /**< TX queue index. */
197         uint16_t               reg_idx;  /**< TX queue register index. */
198         uint8_t                port_id;  /**< Device port identifier. */
199         uint8_t                pthresh;  /**< Prefetch threshold register. */
200         uint8_t                hthresh;  /**< Host threshold register. */
201         uint8_t                wthresh;  /**< Write-back threshold register. */
202         uint32_t               ctx_curr;
203         /**< Current used hardware descriptor. */
204         uint32_t               ctx_start;
205         /**< Start context position for transmit queue. */
206         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
207         /**< Hardware context history.*/
208 };
209
210 #if 1
211 #define RTE_PMD_USE_PREFETCH
212 #endif
213
214 #ifdef RTE_PMD_USE_PREFETCH
215 #define rte_igb_prefetch(p)     rte_prefetch0(p)
216 #else
217 #define rte_igb_prefetch(p)     do {} while(0)
218 #endif
219
220 #ifdef RTE_PMD_PACKET_PREFETCH
221 #define rte_packet_prefetch(p) rte_prefetch1(p)
222 #else
223 #define rte_packet_prefetch(p)  do {} while(0)
224 #endif
225
226 /*
227  * Macro for VMDq feature for 1 GbE NIC.
228  */
229 #define E1000_VMOLR_SIZE                        (8)
230
231 /*********************************************************************
232  *
233  *  TX function
234  *
235  **********************************************************************/
236
237 /*
238  * Advanced context descriptor are almost same between igb/ixgbe
239  * This is a separate function, looking for optimization opportunity here
240  * Rework required to go with the pre-defined values.
241  */
242
243 static inline void
244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
246                 uint16_t ol_flags, uint32_t vlan_macip_lens)
247 {
248         uint32_t type_tucmd_mlhl;
249         uint32_t mss_l4len_idx;
250         uint32_t ctx_idx, ctx_curr;
251         uint32_t cmp_mask;
252
253         ctx_curr = txq->ctx_curr;
254         ctx_idx = ctx_curr + txq->ctx_start;
255
256         cmp_mask = 0;
257         type_tucmd_mlhl = 0;
258
259         if (ol_flags & PKT_TX_VLAN_PKT) {
260                 cmp_mask |= TX_VLAN_CMP_MASK;
261         }
262
263         if (ol_flags & PKT_TX_IP_CKSUM) {
264                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
265                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
266         }
267
268         /* Specify which HW CTX to upload. */
269         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
270         switch (ol_flags & PKT_TX_L4_MASK) {
271         case PKT_TX_UDP_CKSUM:
272                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
273                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
276                 break;
277         case PKT_TX_TCP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         case PKT_TX_SCTP_CKSUM:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
288                 break;
289         default:
290                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292                 break;
293         }
294
295         txq->ctx_cache[ctx_curr].flags           = ol_flags;
296         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
297         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
298                 vlan_macip_lens & cmp_mask;
299
300         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
301         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
302         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
303         ctx_txd->seqnum_seed     = 0;
304 }
305
306 /*
307  * Check which hardware context can be used. Use the existing match
308  * or create a new context descriptor.
309  */
310 static inline uint32_t
311 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
312                 uint32_t vlan_macip_lens)
313 {
314         /* If match with the current context */
315         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
316                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
317                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
318                         return txq->ctx_curr;
319         }
320
321         /* If match with the second context */
322         txq->ctx_curr ^= 1;
323         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
324                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
325                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
326                         return txq->ctx_curr;
327         }
328
329         /* Mismatch, use the previous context */
330         return (IGB_CTX_NUM);
331 }
332
333 static inline uint32_t
334 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
335 {
336         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
337         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
338         uint32_t tmp;
339
340         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
341         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
342         return tmp;
343 }
344
345 static inline uint32_t
346 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
347 {
348         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
349         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
350 }
351
352 uint16_t
353 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
354                uint16_t nb_pkts)
355 {
356         struct igb_tx_queue *txq;
357         struct igb_tx_entry *sw_ring;
358         struct igb_tx_entry *txe, *txn;
359         volatile union e1000_adv_tx_desc *txr;
360         volatile union e1000_adv_tx_desc *txd;
361         struct rte_mbuf     *tx_pkt;
362         struct rte_mbuf     *m_seg;
363         union igb_vlan_macip vlan_macip_lens;
364         uint64_t buf_dma_addr;
365         uint32_t olinfo_status;
366         uint32_t cmd_type_len;
367         uint32_t pkt_len;
368         uint16_t slen;
369         uint16_t ol_flags;
370         uint16_t tx_end;
371         uint16_t tx_id;
372         uint16_t tx_last;
373         uint16_t nb_tx;
374         uint16_t tx_ol_req;
375         uint32_t new_ctx = 0;
376         uint32_t ctx = 0;
377
378         txq = tx_queue;
379         sw_ring = txq->sw_ring;
380         txr     = txq->tx_ring;
381         tx_id   = txq->tx_tail;
382         txe = &sw_ring[tx_id];
383
384         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
385                 tx_pkt = *tx_pkts++;
386                 pkt_len = tx_pkt->pkt_len;
387
388                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
389
390                 /*
391                  * The number of descriptors that must be allocated for a
392                  * packet is the number of segments of that packet, plus 1
393                  * Context Descriptor for the VLAN Tag Identifier, if any.
394                  * Determine the last TX descriptor to allocate in the TX ring
395                  * for the packet, starting from the current position (tx_id)
396                  * in the ring.
397                  */
398                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
399
400                 ol_flags = tx_pkt->ol_flags;
401                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
402                 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
403                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
404
405                 /* If a Context Descriptor need be built . */
406                 if (tx_ol_req) {
407                         ctx = what_advctx_update(txq, tx_ol_req,
408                                 vlan_macip_lens.data);
409                         /* Only allocate context descriptor if required*/
410                         new_ctx = (ctx == IGB_CTX_NUM);
411                         ctx = txq->ctx_curr;
412                         tx_last = (uint16_t) (tx_last + new_ctx);
413                 }
414                 if (tx_last >= txq->nb_tx_desc)
415                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
416
417                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
418                            " tx_first=%u tx_last=%u\n",
419                            (unsigned) txq->port_id,
420                            (unsigned) txq->queue_id,
421                            (unsigned) pkt_len,
422                            (unsigned) tx_id,
423                            (unsigned) tx_last);
424
425                 /*
426                  * Check if there are enough free descriptors in the TX ring
427                  * to transmit the next packet.
428                  * This operation is based on the two following rules:
429                  *
430                  *   1- Only check that the last needed TX descriptor can be
431                  *      allocated (by construction, if that descriptor is free,
432                  *      all intermediate ones are also free).
433                  *
434                  *      For this purpose, the index of the last TX descriptor
435                  *      used for a packet (the "last descriptor" of a packet)
436                  *      is recorded in the TX entries (the last one included)
437                  *      that are associated with all TX descriptors allocated
438                  *      for that packet.
439                  *
440                  *   2- Avoid to allocate the last free TX descriptor of the
441                  *      ring, in order to never set the TDT register with the
442                  *      same value stored in parallel by the NIC in the TDH
443                  *      register, which makes the TX engine of the NIC enter
444                  *      in a deadlock situation.
445                  *
446                  *      By extension, avoid to allocate a free descriptor that
447                  *      belongs to the last set of free descriptors allocated
448                  *      to the same packet previously transmitted.
449                  */
450
451                 /*
452                  * The "last descriptor" of the previously sent packet, if any,
453                  * which used the last descriptor to allocate.
454                  */
455                 tx_end = sw_ring[tx_last].last_id;
456
457                 /*
458                  * The next descriptor following that "last descriptor" in the
459                  * ring.
460                  */
461                 tx_end = sw_ring[tx_end].next_id;
462
463                 /*
464                  * The "last descriptor" associated with that next descriptor.
465                  */
466                 tx_end = sw_ring[tx_end].last_id;
467
468                 /*
469                  * Check that this descriptor is free.
470                  */
471                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
472                         if (nb_tx == 0)
473                                 return (0);
474                         goto end_of_tx;
475                 }
476
477                 /*
478                  * Set common flags of all TX Data Descriptors.
479                  *
480                  * The following bits must be set in all Data Descriptors:
481                  *   - E1000_ADVTXD_DTYP_DATA
482                  *   - E1000_ADVTXD_DCMD_DEXT
483                  *
484                  * The following bits must be set in the first Data Descriptor
485                  * and are ignored in the other ones:
486                  *   - E1000_ADVTXD_DCMD_IFCS
487                  *   - E1000_ADVTXD_MAC_1588
488                  *   - E1000_ADVTXD_DCMD_VLE
489                  *
490                  * The following bits must only be set in the last Data
491                  * Descriptor:
492                  *   - E1000_TXD_CMD_EOP
493                  *
494                  * The following bits can be set in any Data Descriptor, but
495                  * are only set in the last Data Descriptor:
496                  *   - E1000_TXD_CMD_RS
497                  */
498                 cmd_type_len = txq->txd_type |
499                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
500                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
501 #if defined(RTE_LIBRTE_IEEE1588)
502                 if (ol_flags & PKT_TX_IEEE1588_TMST)
503                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
504 #endif
505                 if (tx_ol_req) {
506                         /* Setup TX Advanced context descriptor if required */
507                         if (new_ctx) {
508                                 volatile struct e1000_adv_tx_context_desc *
509                                     ctx_txd;
510
511                                 ctx_txd = (volatile struct
512                                     e1000_adv_tx_context_desc *)
513                                     &txr[tx_id];
514
515                                 txn = &sw_ring[txe->next_id];
516                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
517
518                                 if (txe->mbuf != NULL) {
519                                         rte_pktmbuf_free_seg(txe->mbuf);
520                                         txe->mbuf = NULL;
521                                 }
522
523                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
524                                     vlan_macip_lens.data);
525
526                                 txe->last_id = tx_last;
527                                 tx_id = txe->next_id;
528                                 txe = txn;
529                         }
530
531                         /* Setup the TX Advanced Data Descriptor */
532                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
533                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
534                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
535                 }
536
537                 m_seg = tx_pkt;
538                 do {
539                         txn = &sw_ring[txe->next_id];
540                         txd = &txr[tx_id];
541
542                         if (txe->mbuf != NULL)
543                                 rte_pktmbuf_free_seg(txe->mbuf);
544                         txe->mbuf = m_seg;
545
546                         /*
547                          * Set up transmit descriptor.
548                          */
549                         slen = (uint16_t) m_seg->data_len;
550                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
551                         txd->read.buffer_addr =
552                                 rte_cpu_to_le_64(buf_dma_addr);
553                         txd->read.cmd_type_len =
554                                 rte_cpu_to_le_32(cmd_type_len | slen);
555                         txd->read.olinfo_status =
556                                 rte_cpu_to_le_32(olinfo_status);
557                         txe->last_id = tx_last;
558                         tx_id = txe->next_id;
559                         txe = txn;
560                         m_seg = m_seg->next;
561                 } while (m_seg != NULL);
562
563                 /*
564                  * The last packet data descriptor needs End Of Packet (EOP)
565                  * and Report Status (RS).
566                  */
567                 txd->read.cmd_type_len |=
568                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
569         }
570  end_of_tx:
571         rte_wmb();
572
573         /*
574          * Set the Transmit Descriptor Tail (TDT).
575          */
576         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
577         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
578                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
579                    (unsigned) tx_id, (unsigned) nb_tx);
580         txq->tx_tail = tx_id;
581
582         return (nb_tx);
583 }
584
585 /*********************************************************************
586  *
587  *  RX functions
588  *
589  **********************************************************************/
590 static inline uint16_t
591 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
592 {
593         uint16_t pkt_flags;
594
595         static uint16_t ip_pkt_types_map[16] = {
596                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
597                 PKT_RX_IPV6_HDR, 0, 0, 0,
598                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
599                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
600         };
601
602 #if defined(RTE_LIBRTE_IEEE1588)
603         static uint32_t ip_pkt_etqf_map[8] = {
604                 0, 0, 0, PKT_RX_IEEE1588_PTP,
605                 0, 0, 0, 0,
606         };
607
608         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
609                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
610                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
611 #else
612         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
613                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
614 #endif
615         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
616                                                 0 : PKT_RX_RSS_HASH));
617 }
618
619 static inline uint16_t
620 rx_desc_status_to_pkt_flags(uint32_t rx_status)
621 {
622         uint16_t pkt_flags;
623
624         /* Check if VLAN present */
625         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
626                                                 PKT_RX_VLAN_PKT : 0);
627
628 #if defined(RTE_LIBRTE_IEEE1588)
629         if (rx_status & E1000_RXD_STAT_TMST)
630                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
631 #endif
632         return pkt_flags;
633 }
634
635 static inline uint16_t
636 rx_desc_error_to_pkt_flags(uint32_t rx_status)
637 {
638         /*
639          * Bit 30: IPE, IPv4 checksum error
640          * Bit 29: L4I, L4I integrity error
641          */
642
643         static uint16_t error_to_pkt_flags_map[4] = {
644                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
645                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
646         };
647         return error_to_pkt_flags_map[(rx_status >>
648                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
649 }
650
651 uint16_t
652 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
653                uint16_t nb_pkts)
654 {
655         struct igb_rx_queue *rxq;
656         volatile union e1000_adv_rx_desc *rx_ring;
657         volatile union e1000_adv_rx_desc *rxdp;
658         struct igb_rx_entry *sw_ring;
659         struct igb_rx_entry *rxe;
660         struct rte_mbuf *rxm;
661         struct rte_mbuf *nmb;
662         union e1000_adv_rx_desc rxd;
663         uint64_t dma_addr;
664         uint32_t staterr;
665         uint32_t hlen_type_rss;
666         uint16_t pkt_len;
667         uint16_t rx_id;
668         uint16_t nb_rx;
669         uint16_t nb_hold;
670         uint16_t pkt_flags;
671
672         nb_rx = 0;
673         nb_hold = 0;
674         rxq = rx_queue;
675         rx_id = rxq->rx_tail;
676         rx_ring = rxq->rx_ring;
677         sw_ring = rxq->sw_ring;
678         while (nb_rx < nb_pkts) {
679                 /*
680                  * The order of operations here is important as the DD status
681                  * bit must not be read after any other descriptor fields.
682                  * rx_ring and rxdp are pointing to volatile data so the order
683                  * of accesses cannot be reordered by the compiler. If they were
684                  * not volatile, they could be reordered which could lead to
685                  * using invalid descriptor fields when read from rxd.
686                  */
687                 rxdp = &rx_ring[rx_id];
688                 staterr = rxdp->wb.upper.status_error;
689                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
690                         break;
691                 rxd = *rxdp;
692
693                 /*
694                  * End of packet.
695                  *
696                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
697                  * likely to be invalid and to be dropped by the various
698                  * validation checks performed by the network stack.
699                  *
700                  * Allocate a new mbuf to replenish the RX ring descriptor.
701                  * If the allocation fails:
702                  *    - arrange for that RX descriptor to be the first one
703                  *      being parsed the next time the receive function is
704                  *      invoked [on the same queue].
705                  *
706                  *    - Stop parsing the RX ring and return immediately.
707                  *
708                  * This policy do not drop the packet received in the RX
709                  * descriptor for which the allocation of a new mbuf failed.
710                  * Thus, it allows that packet to be later retrieved if
711                  * mbuf have been freed in the mean time.
712                  * As a side effect, holding RX descriptors instead of
713                  * systematically giving them back to the NIC may lead to
714                  * RX ring exhaustion situations.
715                  * However, the NIC can gracefully prevent such situations
716                  * to happen by sending specific "back-pressure" flow control
717                  * frames to its peer(s).
718                  */
719                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
720                            "staterr=0x%x pkt_len=%u\n",
721                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
722                            (unsigned) rx_id, (unsigned) staterr,
723                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
724
725                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
726                 if (nmb == NULL) {
727                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
728                                    "queue_id=%u\n", (unsigned) rxq->port_id,
729                                    (unsigned) rxq->queue_id);
730                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
731                         break;
732                 }
733
734                 nb_hold++;
735                 rxe = &sw_ring[rx_id];
736                 rx_id++;
737                 if (rx_id == rxq->nb_rx_desc)
738                         rx_id = 0;
739
740                 /* Prefetch next mbuf while processing current one. */
741                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
742
743                 /*
744                  * When next RX descriptor is on a cache-line boundary,
745                  * prefetch the next 4 RX descriptors and the next 8 pointers
746                  * to mbufs.
747                  */
748                 if ((rx_id & 0x3) == 0) {
749                         rte_igb_prefetch(&rx_ring[rx_id]);
750                         rte_igb_prefetch(&sw_ring[rx_id]);
751                 }
752
753                 rxm = rxe->mbuf;
754                 rxe->mbuf = nmb;
755                 dma_addr =
756                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
757                 rxdp->read.hdr_addr = dma_addr;
758                 rxdp->read.pkt_addr = dma_addr;
759
760                 /*
761                  * Initialize the returned mbuf.
762                  * 1) setup generic mbuf fields:
763                  *    - number of segments,
764                  *    - next segment,
765                  *    - packet length,
766                  *    - RX port identifier.
767                  * 2) integrate hardware offload data, if any:
768                  *    - RSS flag & hash,
769                  *    - IP checksum flag,
770                  *    - VLAN TCI, if any,
771                  *    - error flags.
772                  */
773                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
774                                       rxq->crc_len);
775                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
776                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
777                 rxm->nb_segs = 1;
778                 rxm->next = NULL;
779                 rxm->pkt_len = pkt_len;
780                 rxm->data_len = pkt_len;
781                 rxm->port = rxq->port_id;
782
783                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
784                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
785                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
786                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
787
788                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
789                 pkt_flags = (uint16_t)(pkt_flags |
790                                 rx_desc_status_to_pkt_flags(staterr));
791                 pkt_flags = (uint16_t)(pkt_flags |
792                                 rx_desc_error_to_pkt_flags(staterr));
793                 rxm->ol_flags = pkt_flags;
794
795                 /*
796                  * Store the mbuf address into the next entry of the array
797                  * of returned packets.
798                  */
799                 rx_pkts[nb_rx++] = rxm;
800         }
801         rxq->rx_tail = rx_id;
802
803         /*
804          * If the number of free RX descriptors is greater than the RX free
805          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
806          * register.
807          * Update the RDT with the value of the last processed RX descriptor
808          * minus 1, to guarantee that the RDT register is never equal to the
809          * RDH register, which creates a "full" ring situtation from the
810          * hardware point of view...
811          */
812         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
813         if (nb_hold > rxq->rx_free_thresh) {
814                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
815                            "nb_hold=%u nb_rx=%u\n",
816                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
817                            (unsigned) rx_id, (unsigned) nb_hold,
818                            (unsigned) nb_rx);
819                 rx_id = (uint16_t) ((rx_id == 0) ?
820                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
821                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
822                 nb_hold = 0;
823         }
824         rxq->nb_rx_hold = nb_hold;
825         return (nb_rx);
826 }
827
828 uint16_t
829 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
830                          uint16_t nb_pkts)
831 {
832         struct igb_rx_queue *rxq;
833         volatile union e1000_adv_rx_desc *rx_ring;
834         volatile union e1000_adv_rx_desc *rxdp;
835         struct igb_rx_entry *sw_ring;
836         struct igb_rx_entry *rxe;
837         struct rte_mbuf *first_seg;
838         struct rte_mbuf *last_seg;
839         struct rte_mbuf *rxm;
840         struct rte_mbuf *nmb;
841         union e1000_adv_rx_desc rxd;
842         uint64_t dma; /* Physical address of mbuf data buffer */
843         uint32_t staterr;
844         uint32_t hlen_type_rss;
845         uint16_t rx_id;
846         uint16_t nb_rx;
847         uint16_t nb_hold;
848         uint16_t data_len;
849         uint16_t pkt_flags;
850
851         nb_rx = 0;
852         nb_hold = 0;
853         rxq = rx_queue;
854         rx_id = rxq->rx_tail;
855         rx_ring = rxq->rx_ring;
856         sw_ring = rxq->sw_ring;
857
858         /*
859          * Retrieve RX context of current packet, if any.
860          */
861         first_seg = rxq->pkt_first_seg;
862         last_seg = rxq->pkt_last_seg;
863
864         while (nb_rx < nb_pkts) {
865         next_desc:
866                 /*
867                  * The order of operations here is important as the DD status
868                  * bit must not be read after any other descriptor fields.
869                  * rx_ring and rxdp are pointing to volatile data so the order
870                  * of accesses cannot be reordered by the compiler. If they were
871                  * not volatile, they could be reordered which could lead to
872                  * using invalid descriptor fields when read from rxd.
873                  */
874                 rxdp = &rx_ring[rx_id];
875                 staterr = rxdp->wb.upper.status_error;
876                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
877                         break;
878                 rxd = *rxdp;
879
880                 /*
881                  * Descriptor done.
882                  *
883                  * Allocate a new mbuf to replenish the RX ring descriptor.
884                  * If the allocation fails:
885                  *    - arrange for that RX descriptor to be the first one
886                  *      being parsed the next time the receive function is
887                  *      invoked [on the same queue].
888                  *
889                  *    - Stop parsing the RX ring and return immediately.
890                  *
891                  * This policy does not drop the packet received in the RX
892                  * descriptor for which the allocation of a new mbuf failed.
893                  * Thus, it allows that packet to be later retrieved if
894                  * mbuf have been freed in the mean time.
895                  * As a side effect, holding RX descriptors instead of
896                  * systematically giving them back to the NIC may lead to
897                  * RX ring exhaustion situations.
898                  * However, the NIC can gracefully prevent such situations
899                  * to happen by sending specific "back-pressure" flow control
900                  * frames to its peer(s).
901                  */
902                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
903                            "staterr=0x%x data_len=%u\n",
904                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
905                            (unsigned) rx_id, (unsigned) staterr,
906                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
907
908                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
909                 if (nmb == NULL) {
910                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
911                                    "queue_id=%u\n", (unsigned) rxq->port_id,
912                                    (unsigned) rxq->queue_id);
913                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
914                         break;
915                 }
916
917                 nb_hold++;
918                 rxe = &sw_ring[rx_id];
919                 rx_id++;
920                 if (rx_id == rxq->nb_rx_desc)
921                         rx_id = 0;
922
923                 /* Prefetch next mbuf while processing current one. */
924                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
925
926                 /*
927                  * When next RX descriptor is on a cache-line boundary,
928                  * prefetch the next 4 RX descriptors and the next 8 pointers
929                  * to mbufs.
930                  */
931                 if ((rx_id & 0x3) == 0) {
932                         rte_igb_prefetch(&rx_ring[rx_id]);
933                         rte_igb_prefetch(&sw_ring[rx_id]);
934                 }
935
936                 /*
937                  * Update RX descriptor with the physical address of the new
938                  * data buffer of the new allocated mbuf.
939                  */
940                 rxm = rxe->mbuf;
941                 rxe->mbuf = nmb;
942                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
943                 rxdp->read.pkt_addr = dma;
944                 rxdp->read.hdr_addr = dma;
945
946                 /*
947                  * Set data length & data buffer address of mbuf.
948                  */
949                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
950                 rxm->data_len = data_len;
951                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
952
953                 /*
954                  * If this is the first buffer of the received packet,
955                  * set the pointer to the first mbuf of the packet and
956                  * initialize its context.
957                  * Otherwise, update the total length and the number of segments
958                  * of the current scattered packet, and update the pointer to
959                  * the last mbuf of the current packet.
960                  */
961                 if (first_seg == NULL) {
962                         first_seg = rxm;
963                         first_seg->pkt_len = data_len;
964                         first_seg->nb_segs = 1;
965                 } else {
966                         first_seg->pkt_len += data_len;
967                         first_seg->nb_segs++;
968                         last_seg->next = rxm;
969                 }
970
971                 /*
972                  * If this is not the last buffer of the received packet,
973                  * update the pointer to the last mbuf of the current scattered
974                  * packet and continue to parse the RX ring.
975                  */
976                 if (! (staterr & E1000_RXD_STAT_EOP)) {
977                         last_seg = rxm;
978                         goto next_desc;
979                 }
980
981                 /*
982                  * This is the last buffer of the received packet.
983                  * If the CRC is not stripped by the hardware:
984                  *   - Subtract the CRC length from the total packet length.
985                  *   - If the last buffer only contains the whole CRC or a part
986                  *     of it, free the mbuf associated to the last buffer.
987                  *     If part of the CRC is also contained in the previous
988                  *     mbuf, subtract the length of that CRC part from the
989                  *     data length of the previous mbuf.
990                  */
991                 rxm->next = NULL;
992                 if (unlikely(rxq->crc_len > 0)) {
993                         first_seg->pkt_len -= ETHER_CRC_LEN;
994                         if (data_len <= ETHER_CRC_LEN) {
995                                 rte_pktmbuf_free_seg(rxm);
996                                 first_seg->nb_segs--;
997                                 last_seg->data_len = (uint16_t)
998                                         (last_seg->data_len -
999                                          (ETHER_CRC_LEN - data_len));
1000                                 last_seg->next = NULL;
1001                         } else
1002                                 rxm->data_len =
1003                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1004                 }
1005
1006                 /*
1007                  * Initialize the first mbuf of the returned packet:
1008                  *    - RX port identifier,
1009                  *    - hardware offload data, if any:
1010                  *      - RSS flag & hash,
1011                  *      - IP checksum flag,
1012                  *      - VLAN TCI, if any,
1013                  *      - error flags.
1014                  */
1015                 first_seg->port = rxq->port_id;
1016                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1017
1018                 /*
1019                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1020                  * set in the pkt_flags field.
1021                  */
1022                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1023                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1024                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1025                 pkt_flags = (uint16_t)(pkt_flags |
1026                                 rx_desc_status_to_pkt_flags(staterr));
1027                 pkt_flags = (uint16_t)(pkt_flags |
1028                                 rx_desc_error_to_pkt_flags(staterr));
1029                 first_seg->ol_flags = pkt_flags;
1030
1031                 /* Prefetch data of first segment, if configured to do so. */
1032                 rte_packet_prefetch((char *)first_seg->buf_addr +
1033                         first_seg->data_off);
1034
1035                 /*
1036                  * Store the mbuf address into the next entry of the array
1037                  * of returned packets.
1038                  */
1039                 rx_pkts[nb_rx++] = first_seg;
1040
1041                 /*
1042                  * Setup receipt context for a new packet.
1043                  */
1044                 first_seg = NULL;
1045         }
1046
1047         /*
1048          * Record index of the next RX descriptor to probe.
1049          */
1050         rxq->rx_tail = rx_id;
1051
1052         /*
1053          * Save receive context.
1054          */
1055         rxq->pkt_first_seg = first_seg;
1056         rxq->pkt_last_seg = last_seg;
1057
1058         /*
1059          * If the number of free RX descriptors is greater than the RX free
1060          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1061          * register.
1062          * Update the RDT with the value of the last processed RX descriptor
1063          * minus 1, to guarantee that the RDT register is never equal to the
1064          * RDH register, which creates a "full" ring situtation from the
1065          * hardware point of view...
1066          */
1067         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1068         if (nb_hold > rxq->rx_free_thresh) {
1069                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1070                            "nb_hold=%u nb_rx=%u\n",
1071                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1072                            (unsigned) rx_id, (unsigned) nb_hold,
1073                            (unsigned) nb_rx);
1074                 rx_id = (uint16_t) ((rx_id == 0) ?
1075                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1076                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1077                 nb_hold = 0;
1078         }
1079         rxq->nb_rx_hold = nb_hold;
1080         return (nb_rx);
1081 }
1082
1083 /*
1084  * Rings setup and release.
1085  *
1086  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1087  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1088  * This will also optimize cache line size effect.
1089  * H/W supports up to cache line size 128.
1090  */
1091 #define IGB_ALIGN 128
1092
1093 /*
1094  * Maximum number of Ring Descriptors.
1095  *
1096  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1097  * desscriptors should meet the following condition:
1098  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1099  */
1100 #define IGB_MIN_RING_DESC 32
1101 #define IGB_MAX_RING_DESC 4096
1102
1103 static const struct rte_memzone *
1104 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1105                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1106 {
1107         char z_name[RTE_MEMZONE_NAMESIZE];
1108         const struct rte_memzone *mz;
1109
1110         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1111                         dev->driver->pci_drv.name, ring_name,
1112                                 dev->data->port_id, queue_id);
1113         mz = rte_memzone_lookup(z_name);
1114         if (mz)
1115                 return mz;
1116
1117 #ifdef RTE_LIBRTE_XEN_DOM0
1118         return rte_memzone_reserve_bounded(z_name, ring_size,
1119                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1120 #else
1121         return rte_memzone_reserve_aligned(z_name, ring_size,
1122                         socket_id, 0, IGB_ALIGN);
1123 #endif
1124 }
1125
1126 static void
1127 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1128 {
1129         unsigned i;
1130
1131         if (txq->sw_ring != NULL) {
1132                 for (i = 0; i < txq->nb_tx_desc; i++) {
1133                         if (txq->sw_ring[i].mbuf != NULL) {
1134                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1135                                 txq->sw_ring[i].mbuf = NULL;
1136                         }
1137                 }
1138         }
1139 }
1140
1141 static void
1142 igb_tx_queue_release(struct igb_tx_queue *txq)
1143 {
1144         if (txq != NULL) {
1145                 igb_tx_queue_release_mbufs(txq);
1146                 rte_free(txq->sw_ring);
1147                 rte_free(txq);
1148         }
1149 }
1150
1151 void
1152 eth_igb_tx_queue_release(void *txq)
1153 {
1154         igb_tx_queue_release(txq);
1155 }
1156
1157 static void
1158 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1159 {
1160         txq->tx_head = 0;
1161         txq->tx_tail = 0;
1162         txq->ctx_curr = 0;
1163         memset((void*)&txq->ctx_cache, 0,
1164                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1165 }
1166
1167 static void
1168 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1169 {
1170         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1171                         .buffer_addr = 0}};
1172         struct igb_tx_entry *txe = txq->sw_ring;
1173         uint16_t i, prev;
1174         struct e1000_hw *hw;
1175
1176         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1177         /* Zero out HW ring memory */
1178         for (i = 0; i < txq->nb_tx_desc; i++) {
1179                 txq->tx_ring[i] = zeroed_desc;
1180         }
1181
1182         /* Initialize ring entries */
1183         prev = (uint16_t)(txq->nb_tx_desc - 1);
1184         for (i = 0; i < txq->nb_tx_desc; i++) {
1185                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1186
1187                 txd->wb.status = E1000_TXD_STAT_DD;
1188                 txe[i].mbuf = NULL;
1189                 txe[i].last_id = i;
1190                 txe[prev].next_id = i;
1191                 prev = i;
1192         }
1193
1194         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1195         /* 82575 specific, each tx queue will use 2 hw contexts */
1196         if (hw->mac.type == e1000_82575)
1197                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1198
1199         igb_reset_tx_queue_stat(txq);
1200 }
1201
1202 int
1203 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1204                          uint16_t queue_idx,
1205                          uint16_t nb_desc,
1206                          unsigned int socket_id,
1207                          const struct rte_eth_txconf *tx_conf)
1208 {
1209         const struct rte_memzone *tz;
1210         struct igb_tx_queue *txq;
1211         struct e1000_hw     *hw;
1212         uint32_t size;
1213
1214         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215
1216         /*
1217          * Validate number of transmit descriptors.
1218          * It must not exceed hardware maximum, and must be multiple
1219          * of IGB_ALIGN.
1220          */
1221         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1222             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1223                 return -EINVAL;
1224         }
1225
1226         /*
1227          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1228          * driver.
1229          */
1230         if (tx_conf->tx_free_thresh != 0)
1231                 RTE_LOG(WARNING, PMD,
1232                         "The tx_free_thresh parameter is not "
1233                         "used for the 1G driver.\n");
1234         if (tx_conf->tx_rs_thresh != 0)
1235                 RTE_LOG(WARNING, PMD,
1236                         "The tx_rs_thresh parameter is not "
1237                         "used for the 1G driver.\n");
1238         if (tx_conf->tx_thresh.wthresh == 0)
1239                 RTE_LOG(WARNING, PMD,
1240                         "To improve 1G driver performance, consider setting "
1241                         "the TX WTHRESH value to 4, 8, or 16.\n");
1242
1243         /* Free memory prior to re-allocation if needed */
1244         if (dev->data->tx_queues[queue_idx] != NULL) {
1245                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1246                 dev->data->tx_queues[queue_idx] = NULL;
1247         }
1248
1249         /* First allocate the tx queue data structure */
1250         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1251                                                         CACHE_LINE_SIZE);
1252         if (txq == NULL)
1253                 return (-ENOMEM);
1254
1255         /*
1256          * Allocate TX ring hardware descriptors. A memzone large enough to
1257          * handle the maximum ring size is allocated in order to allow for
1258          * resizing in later calls to the queue setup function.
1259          */
1260         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1261         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1262                                         size, socket_id);
1263         if (tz == NULL) {
1264                 igb_tx_queue_release(txq);
1265                 return (-ENOMEM);
1266         }
1267
1268         txq->nb_tx_desc = nb_desc;
1269         txq->pthresh = tx_conf->tx_thresh.pthresh;
1270         txq->hthresh = tx_conf->tx_thresh.hthresh;
1271         txq->wthresh = tx_conf->tx_thresh.wthresh;
1272         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1273                 txq->wthresh = 1;
1274         txq->queue_id = queue_idx;
1275         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1276                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1277         txq->port_id = dev->data->port_id;
1278
1279         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1280 #ifndef RTE_LIBRTE_XEN_DOM0
1281         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1282 #else
1283         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1284 #endif
1285          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1286         /* Allocate software ring */
1287         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1288                                    sizeof(struct igb_tx_entry) * nb_desc,
1289                                    CACHE_LINE_SIZE);
1290         if (txq->sw_ring == NULL) {
1291                 igb_tx_queue_release(txq);
1292                 return (-ENOMEM);
1293         }
1294         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1295                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1296
1297         igb_reset_tx_queue(txq, dev);
1298         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1299         dev->data->tx_queues[queue_idx] = txq;
1300
1301         return (0);
1302 }
1303
1304 static void
1305 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1306 {
1307         unsigned i;
1308
1309         if (rxq->sw_ring != NULL) {
1310                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1311                         if (rxq->sw_ring[i].mbuf != NULL) {
1312                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1313                                 rxq->sw_ring[i].mbuf = NULL;
1314                         }
1315                 }
1316         }
1317 }
1318
1319 static void
1320 igb_rx_queue_release(struct igb_rx_queue *rxq)
1321 {
1322         if (rxq != NULL) {
1323                 igb_rx_queue_release_mbufs(rxq);
1324                 rte_free(rxq->sw_ring);
1325                 rte_free(rxq);
1326         }
1327 }
1328
1329 void
1330 eth_igb_rx_queue_release(void *rxq)
1331 {
1332         igb_rx_queue_release(rxq);
1333 }
1334
1335 static void
1336 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1337 {
1338         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1339                         .pkt_addr = 0}};
1340         unsigned i;
1341
1342         /* Zero out HW ring memory */
1343         for (i = 0; i < rxq->nb_rx_desc; i++) {
1344                 rxq->rx_ring[i] = zeroed_desc;
1345         }
1346
1347         rxq->rx_tail = 0;
1348         rxq->pkt_first_seg = NULL;
1349         rxq->pkt_last_seg = NULL;
1350 }
1351
1352 int
1353 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1354                          uint16_t queue_idx,
1355                          uint16_t nb_desc,
1356                          unsigned int socket_id,
1357                          const struct rte_eth_rxconf *rx_conf,
1358                          struct rte_mempool *mp)
1359 {
1360         const struct rte_memzone *rz;
1361         struct igb_rx_queue *rxq;
1362         struct e1000_hw     *hw;
1363         unsigned int size;
1364
1365         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366
1367         /*
1368          * Validate number of receive descriptors.
1369          * It must not exceed hardware maximum, and must be multiple
1370          * of IGB_ALIGN.
1371          */
1372         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1373             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1374                 return (-EINVAL);
1375         }
1376
1377         /* Free memory prior to re-allocation if needed */
1378         if (dev->data->rx_queues[queue_idx] != NULL) {
1379                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1380                 dev->data->rx_queues[queue_idx] = NULL;
1381         }
1382
1383         /* First allocate the RX queue data structure. */
1384         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1385                           CACHE_LINE_SIZE);
1386         if (rxq == NULL)
1387                 return (-ENOMEM);
1388         rxq->mb_pool = mp;
1389         rxq->nb_rx_desc = nb_desc;
1390         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1391         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1392         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1393         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1394                 rxq->wthresh = 1;
1395         rxq->drop_en = rx_conf->rx_drop_en;
1396         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1397         rxq->queue_id = queue_idx;
1398         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1399                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1400         rxq->port_id = dev->data->port_id;
1401         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1402                                   ETHER_CRC_LEN);
1403
1404         /*
1405          *  Allocate RX ring hardware descriptors. A memzone large enough to
1406          *  handle the maximum ring size is allocated in order to allow for
1407          *  resizing in later calls to the queue setup function.
1408          */
1409         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1410         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1411         if (rz == NULL) {
1412                 igb_rx_queue_release(rxq);
1413                 return (-ENOMEM);
1414         }
1415         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1416         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1417 #ifndef RTE_LIBRTE_XEN_DOM0
1418         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1419 #else
1420         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1421 #endif
1422         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1423
1424         /* Allocate software ring. */
1425         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1426                                    sizeof(struct igb_rx_entry) * nb_desc,
1427                                    CACHE_LINE_SIZE);
1428         if (rxq->sw_ring == NULL) {
1429                 igb_rx_queue_release(rxq);
1430                 return (-ENOMEM);
1431         }
1432         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1433                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1434
1435         dev->data->rx_queues[queue_idx] = rxq;
1436         igb_reset_rx_queue(rxq);
1437
1438         return 0;
1439 }
1440
1441 uint32_t
1442 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1443 {
1444 #define IGB_RXQ_SCAN_INTERVAL 4
1445         volatile union e1000_adv_rx_desc *rxdp;
1446         struct igb_rx_queue *rxq;
1447         uint32_t desc = 0;
1448
1449         if (rx_queue_id >= dev->data->nb_rx_queues) {
1450                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1451                 return 0;
1452         }
1453
1454         rxq = dev->data->rx_queues[rx_queue_id];
1455         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1456
1457         while ((desc < rxq->nb_rx_desc) &&
1458                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1459                 desc += IGB_RXQ_SCAN_INTERVAL;
1460                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1461                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1462                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1463                                 desc - rxq->nb_rx_desc]);
1464         }
1465
1466         return 0;
1467 }
1468
1469 int
1470 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1471 {
1472         volatile union e1000_adv_rx_desc *rxdp;
1473         struct igb_rx_queue *rxq = rx_queue;
1474         uint32_t desc;
1475
1476         if (unlikely(offset >= rxq->nb_rx_desc))
1477                 return 0;
1478         desc = rxq->rx_tail + offset;
1479         if (desc >= rxq->nb_rx_desc)
1480                 desc -= rxq->nb_rx_desc;
1481
1482         rxdp = &rxq->rx_ring[desc];
1483         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1484 }
1485
1486 void
1487 igb_dev_clear_queues(struct rte_eth_dev *dev)
1488 {
1489         uint16_t i;
1490         struct igb_tx_queue *txq;
1491         struct igb_rx_queue *rxq;
1492
1493         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1494                 txq = dev->data->tx_queues[i];
1495                 if (txq != NULL) {
1496                         igb_tx_queue_release_mbufs(txq);
1497                         igb_reset_tx_queue(txq, dev);
1498                 }
1499         }
1500
1501         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1502                 rxq = dev->data->rx_queues[i];
1503                 if (rxq != NULL) {
1504                         igb_rx_queue_release_mbufs(rxq);
1505                         igb_reset_rx_queue(rxq);
1506                 }
1507         }
1508 }
1509
1510 /**
1511  * Receive Side Scaling (RSS).
1512  * See section 7.1.1.7 in the following document:
1513  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1514  *
1515  * Principles:
1516  * The source and destination IP addresses of the IP header and the source and
1517  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1518  * against a configurable random key to compute a 32-bit RSS hash result.
1519  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1520  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1521  * RSS output index which is used as the RX queue index where to store the
1522  * received packets.
1523  * The following output is supplied in the RX write-back descriptor:
1524  *     - 32-bit result of the Microsoft RSS hash function,
1525  *     - 4-bit RSS type field.
1526  */
1527
1528 /*
1529  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1530  * Used as the default key.
1531  */
1532 static uint8_t rss_intel_key[40] = {
1533         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1534         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1535         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1536         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1537         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1538 };
1539
1540 static void
1541 igb_rss_disable(struct rte_eth_dev *dev)
1542 {
1543         struct e1000_hw *hw;
1544         uint32_t mrqc;
1545
1546         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1547         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1548         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1549         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1550 }
1551
1552 static void
1553 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1554 {
1555         uint8_t  *hash_key;
1556         uint32_t rss_key;
1557         uint32_t mrqc;
1558         uint64_t rss_hf;
1559         uint16_t i;
1560
1561         hash_key = rss_conf->rss_key;
1562         if (hash_key != NULL) {
1563                 /* Fill in RSS hash key */
1564                 for (i = 0; i < 10; i++) {
1565                         rss_key  = hash_key[(i * 4)];
1566                         rss_key |= hash_key[(i * 4) + 1] << 8;
1567                         rss_key |= hash_key[(i * 4) + 2] << 16;
1568                         rss_key |= hash_key[(i * 4) + 3] << 24;
1569                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1570                 }
1571         }
1572
1573         /* Set configured hashing protocols in MRQC register */
1574         rss_hf = rss_conf->rss_hf;
1575         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1576         if (rss_hf & ETH_RSS_IPV4)
1577                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1578         if (rss_hf & ETH_RSS_IPV4_TCP)
1579                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1580         if (rss_hf & ETH_RSS_IPV6)
1581                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1582         if (rss_hf & ETH_RSS_IPV6_EX)
1583                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1584         if (rss_hf & ETH_RSS_IPV6_TCP)
1585                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1586         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1587                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1588         if (rss_hf & ETH_RSS_IPV4_UDP)
1589                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1590         if (rss_hf & ETH_RSS_IPV6_UDP)
1591                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1592         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1593                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1594         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1595 }
1596
1597 int
1598 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1599                         struct rte_eth_rss_conf *rss_conf)
1600 {
1601         struct e1000_hw *hw;
1602         uint32_t mrqc;
1603         uint64_t rss_hf;
1604
1605         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1606
1607         /*
1608          * Before changing anything, first check that the update RSS operation
1609          * does not attempt to disable RSS, if RSS was enabled at
1610          * initialization time, or does not attempt to enable RSS, if RSS was
1611          * disabled at initialization time.
1612          */
1613         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1614         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1615         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1616                 if (rss_hf != 0) /* Enable RSS */
1617                         return -(EINVAL);
1618                 return 0; /* Nothing to do */
1619         }
1620         /* RSS enabled */
1621         if (rss_hf == 0) /* Disable RSS */
1622                 return -(EINVAL);
1623         igb_hw_rss_hash_set(hw, rss_conf);
1624         return 0;
1625 }
1626
1627 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1628                               struct rte_eth_rss_conf *rss_conf)
1629 {
1630         struct e1000_hw *hw;
1631         uint8_t *hash_key;
1632         uint32_t rss_key;
1633         uint32_t mrqc;
1634         uint64_t rss_hf;
1635         uint16_t i;
1636
1637         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1638         hash_key = rss_conf->rss_key;
1639         if (hash_key != NULL) {
1640                 /* Return RSS hash key */
1641                 for (i = 0; i < 10; i++) {
1642                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1643                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1644                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1645                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1646                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1647                 }
1648         }
1649
1650         /* Get RSS functions configured in MRQC register */
1651         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1652         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1653                 rss_conf->rss_hf = 0;
1654                 return 0;
1655         }
1656         rss_hf = 0;
1657         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1658                 rss_hf |= ETH_RSS_IPV4;
1659         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1660                 rss_hf |= ETH_RSS_IPV4_TCP;
1661         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1662                 rss_hf |= ETH_RSS_IPV6;
1663         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1664                 rss_hf |= ETH_RSS_IPV6_EX;
1665         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1666                 rss_hf |= ETH_RSS_IPV6_TCP;
1667         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1668                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1669         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1670                 rss_hf |= ETH_RSS_IPV4_UDP;
1671         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1672                 rss_hf |= ETH_RSS_IPV6_UDP;
1673         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1674                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1675         rss_conf->rss_hf = rss_hf;
1676         return 0;
1677 }
1678
1679 static void
1680 igb_rss_configure(struct rte_eth_dev *dev)
1681 {
1682         struct rte_eth_rss_conf rss_conf;
1683         struct e1000_hw *hw;
1684         uint32_t shift;
1685         uint16_t i;
1686
1687         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1688
1689         /* Fill in redirection table. */
1690         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1691         for (i = 0; i < 128; i++) {
1692                 union e1000_reta {
1693                         uint32_t dword;
1694                         uint8_t  bytes[4];
1695                 } reta;
1696                 uint8_t q_idx;
1697
1698                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1699                                    i % dev->data->nb_rx_queues : 0);
1700                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1701                 if ((i & 3) == 3)
1702                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1703         }
1704
1705         /*
1706          * Configure the RSS key and the RSS protocols used to compute
1707          * the RSS hash of input packets.
1708          */
1709         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1710         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1711                 igb_rss_disable(dev);
1712                 return;
1713         }
1714         if (rss_conf.rss_key == NULL)
1715                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1716         igb_hw_rss_hash_set(hw, &rss_conf);
1717 }
1718
1719 /*
1720  * Check if the mac type support VMDq or not.
1721  * Return 1 if it supports, otherwise, return 0.
1722  */
1723 static int
1724 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1725 {
1726         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1727
1728         switch (hw->mac.type) {
1729         case e1000_82576:
1730         case e1000_82580:
1731         case e1000_i350:
1732                 return 1;
1733         case e1000_82540:
1734         case e1000_82541:
1735         case e1000_82542:
1736         case e1000_82543:
1737         case e1000_82544:
1738         case e1000_82545:
1739         case e1000_82546:
1740         case e1000_82547:
1741         case e1000_82571:
1742         case e1000_82572:
1743         case e1000_82573:
1744         case e1000_82574:
1745         case e1000_82583:
1746         case e1000_i210:
1747         case e1000_i211:
1748         default:
1749                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1750                 return 0;
1751         }
1752 }
1753
1754 static int
1755 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1756 {
1757         struct rte_eth_vmdq_rx_conf *cfg;
1758         struct e1000_hw *hw;
1759         uint32_t mrqc, vt_ctl, vmolr, rctl;
1760         int i;
1761
1762         PMD_INIT_LOG(DEBUG, ">>");
1763         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1764         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1765
1766         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1767         if (igb_is_vmdq_supported(dev) == 0)
1768                 return -1;
1769
1770         igb_rss_disable(dev);
1771
1772         /* RCTL: eanble VLAN filter */
1773         rctl = E1000_READ_REG(hw, E1000_RCTL);
1774         rctl |= E1000_RCTL_VFE;
1775         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1776
1777         /* MRQC: enable vmdq */
1778         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1779         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1780         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1781
1782         /* VTCTL:  pool selection according to VLAN tag */
1783         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1784         if (cfg->enable_default_pool)
1785                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1786         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1787         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1788
1789         /*
1790          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1791          * Both 82576 and 82580 support it
1792          */
1793         if (hw->mac.type != e1000_i350) {
1794                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1795                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1796                         vmolr |= E1000_VMOLR_STRVLAN;
1797                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1798                 }
1799         }
1800
1801         /* VFTA - enable all vlan filters */
1802         for (i = 0; i < IGB_VFTA_SIZE; i++)
1803                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1804
1805         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1806         if (hw->mac.type != e1000_82580)
1807                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1808
1809         /*
1810          * RAH/RAL - allow pools to read specific mac addresses
1811          * In this case, all pools should be able to read from mac addr 0
1812          */
1813         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1814         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1815
1816         /* VLVF: set up filters for vlan tags as configured */
1817         for (i = 0; i < cfg->nb_pool_maps; i++) {
1818                 /* set vlan id in VF register and set the valid bit */
1819                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1820                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1821                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1822                         E1000_VLVF_POOLSEL_MASK)));
1823         }
1824
1825         E1000_WRITE_FLUSH(hw);
1826
1827         return 0;
1828 }
1829
1830
1831 /*********************************************************************
1832  *
1833  *  Enable receive unit.
1834  *
1835  **********************************************************************/
1836
1837 static int
1838 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1839 {
1840         struct igb_rx_entry *rxe = rxq->sw_ring;
1841         uint64_t dma_addr;
1842         unsigned i;
1843
1844         /* Initialize software ring entries. */
1845         for (i = 0; i < rxq->nb_rx_desc; i++) {
1846                 volatile union e1000_adv_rx_desc *rxd;
1847                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1848
1849                 if (mbuf == NULL) {
1850                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1851                                 "queue_id=%hu\n", rxq->queue_id);
1852                         return (-ENOMEM);
1853                 }
1854                 dma_addr =
1855                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1856                 rxd = &rxq->rx_ring[i];
1857                 rxd->read.hdr_addr = dma_addr;
1858                 rxd->read.pkt_addr = dma_addr;
1859                 rxe[i].mbuf = mbuf;
1860         }
1861
1862         return 0;
1863 }
1864
1865 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1866 static int
1867 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1868 {
1869         struct e1000_hw *hw =
1870                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1871         uint32_t mrqc;
1872
1873         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1874                 /*
1875                  * SRIOV active scheme
1876                  * FIXME if support RSS together with VMDq & SRIOV
1877                  */
1878                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1879                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1880                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1881                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1882         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1883                 /*
1884                  * SRIOV inactive scheme
1885                  */
1886                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1887                         case ETH_MQ_RX_RSS:
1888                                 igb_rss_configure(dev);
1889                                 break;
1890                         case ETH_MQ_RX_VMDQ_ONLY:
1891                                 /*Configure general VMDQ only RX parameters*/
1892                                 igb_vmdq_rx_hw_configure(dev);
1893                                 break;
1894                         case ETH_MQ_RX_NONE:
1895                                 /* if mq_mode is none, disable rss mode.*/
1896                         default:
1897                                 igb_rss_disable(dev);
1898                                 break;
1899                 }
1900         }
1901
1902         return 0;
1903 }
1904
1905 int
1906 eth_igb_rx_init(struct rte_eth_dev *dev)
1907 {
1908         struct e1000_hw     *hw;
1909         struct igb_rx_queue *rxq;
1910         struct rte_pktmbuf_pool_private *mbp_priv;
1911         uint32_t rctl;
1912         uint32_t rxcsum;
1913         uint32_t srrctl;
1914         uint16_t buf_size;
1915         uint16_t rctl_bsize;
1916         uint16_t i;
1917         int ret;
1918
1919         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1920         srrctl = 0;
1921
1922         /*
1923          * Make sure receives are disabled while setting
1924          * up the descriptor ring.
1925          */
1926         rctl = E1000_READ_REG(hw, E1000_RCTL);
1927         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1928
1929         /*
1930          * Configure support of jumbo frames, if any.
1931          */
1932         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1933                 rctl |= E1000_RCTL_LPE;
1934
1935                 /*
1936                  * Set maximum packet length by default, and might be updated
1937                  * together with enabling/disabling dual VLAN.
1938                  */
1939                 E1000_WRITE_REG(hw, E1000_RLPML,
1940                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1941                                                 VLAN_TAG_SIZE);
1942         } else
1943                 rctl &= ~E1000_RCTL_LPE;
1944
1945         /* Configure and enable each RX queue. */
1946         rctl_bsize = 0;
1947         dev->rx_pkt_burst = eth_igb_recv_pkts;
1948         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1949                 uint64_t bus_addr;
1950                 uint32_t rxdctl;
1951
1952                 rxq = dev->data->rx_queues[i];
1953
1954                 /* Allocate buffers for descriptor rings and set up queue */
1955                 ret = igb_alloc_rx_queue_mbufs(rxq);
1956                 if (ret)
1957                         return ret;
1958
1959                 /*
1960                  * Reset crc_len in case it was changed after queue setup by a
1961                  *  call to configure
1962                  */
1963                 rxq->crc_len =
1964                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1965                                                         0 : ETHER_CRC_LEN);
1966
1967                 bus_addr = rxq->rx_ring_phys_addr;
1968                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1969                                 rxq->nb_rx_desc *
1970                                 sizeof(union e1000_adv_rx_desc));
1971                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1972                                 (uint32_t)(bus_addr >> 32));
1973                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1974
1975                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1976
1977                 /*
1978                  * Configure RX buffer size.
1979                  */
1980                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1981                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1982                                        RTE_PKTMBUF_HEADROOM);
1983                 if (buf_size >= 1024) {
1984                         /*
1985                          * Configure the BSIZEPACKET field of the SRRCTL
1986                          * register of the queue.
1987                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1988                          * If this field is equal to 0b, then RCTL.BSIZE
1989                          * determines the RX packet buffer size.
1990                          */
1991                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1992                                    E1000_SRRCTL_BSIZEPKT_MASK);
1993                         buf_size = (uint16_t) ((srrctl &
1994                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1995                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1996
1997                         /* It adds dual VLAN length for supporting dual VLAN */
1998                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1999                                                 2 * VLAN_TAG_SIZE) > buf_size){
2000                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2001                                 dev->data->scattered_rx = 1;
2002                         }
2003                 } else {
2004                         /*
2005                          * Use BSIZE field of the device RCTL register.
2006                          */
2007                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2008                                 rctl_bsize = buf_size;
2009                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2010                         dev->data->scattered_rx = 1;
2011                 }
2012
2013                 /* Set if packets are dropped when no descriptors available */
2014                 if (rxq->drop_en)
2015                         srrctl |= E1000_SRRCTL_DROP_EN;
2016
2017                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2018
2019                 /* Enable this RX queue. */
2020                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2021                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2022                 rxdctl &= 0xFFF00000;
2023                 rxdctl |= (rxq->pthresh & 0x1F);
2024                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2025                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2026                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2027         }
2028
2029         if (dev->data->dev_conf.rxmode.enable_scatter) {
2030                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2031                 dev->data->scattered_rx = 1;
2032         }
2033
2034         /*
2035          * Setup BSIZE field of RCTL register, if needed.
2036          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2037          * register, since the code above configures the SRRCTL register of
2038          * the RX queue in such a case.
2039          * All configurable sizes are:
2040          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2041          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2042          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2043          *  2048: rctl |= E1000_RCTL_SZ_2048;
2044          *  1024: rctl |= E1000_RCTL_SZ_1024;
2045          *   512: rctl |= E1000_RCTL_SZ_512;
2046          *   256: rctl |= E1000_RCTL_SZ_256;
2047          */
2048         if (rctl_bsize > 0) {
2049                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2050                         rctl |= E1000_RCTL_SZ_512;
2051                 else /* 256 <= buf_size < 512 - use 256 */
2052                         rctl |= E1000_RCTL_SZ_256;
2053         }
2054
2055         /*
2056          * Configure RSS if device configured with multiple RX queues.
2057          */
2058         igb_dev_mq_rx_configure(dev);
2059
2060         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2061         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2062
2063         /*
2064          * Setup the Checksum Register.
2065          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2066          */
2067         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2068         rxcsum |= E1000_RXCSUM_PCSD;
2069
2070         /* Enable both L3/L4 rx checksum offload */
2071         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2072                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2073         else
2074                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2075         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2076
2077         /* Setup the Receive Control Register. */
2078         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2079                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2080
2081                 /* set STRCRC bit in all queues */
2082                 if (hw->mac.type == e1000_i350 ||
2083                     hw->mac.type == e1000_i210 ||
2084                     hw->mac.type == e1000_i211 ||
2085                     hw->mac.type == e1000_i354) {
2086                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2087                                 rxq = dev->data->rx_queues[i];
2088                                 uint32_t dvmolr = E1000_READ_REG(hw,
2089                                         E1000_DVMOLR(rxq->reg_idx));
2090                                 dvmolr |= E1000_DVMOLR_STRCRC;
2091                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2092                         }
2093                 }
2094         } else {
2095                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2096
2097                 /* clear STRCRC bit in all queues */
2098                 if (hw->mac.type == e1000_i350 ||
2099                     hw->mac.type == e1000_i210 ||
2100                     hw->mac.type == e1000_i211 ||
2101                     hw->mac.type == e1000_i354) {
2102                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2103                                 rxq = dev->data->rx_queues[i];
2104                                 uint32_t dvmolr = E1000_READ_REG(hw,
2105                                         E1000_DVMOLR(rxq->reg_idx));
2106                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2107                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2108                         }
2109                 }
2110         }
2111
2112         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2113         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2114                 E1000_RCTL_RDMTS_HALF |
2115                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2116
2117         /* Make sure VLAN Filters are off. */
2118         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2119                 rctl &= ~E1000_RCTL_VFE;
2120         /* Don't store bad packets. */
2121         rctl &= ~E1000_RCTL_SBP;
2122
2123         /* Enable Receives. */
2124         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2125
2126         /*
2127          * Setup the HW Rx Head and Tail Descriptor Pointers.
2128          * This needs to be done after enable.
2129          */
2130         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2131                 rxq = dev->data->rx_queues[i];
2132                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2133                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2134         }
2135
2136         return 0;
2137 }
2138
2139 /*********************************************************************
2140  *
2141  *  Enable transmit unit.
2142  *
2143  **********************************************************************/
2144 void
2145 eth_igb_tx_init(struct rte_eth_dev *dev)
2146 {
2147         struct e1000_hw     *hw;
2148         struct igb_tx_queue *txq;
2149         uint32_t tctl;
2150         uint32_t txdctl;
2151         uint16_t i;
2152
2153         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2154
2155         /* Setup the Base and Length of the Tx Descriptor Rings. */
2156         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2157                 uint64_t bus_addr;
2158                 txq = dev->data->tx_queues[i];
2159                 bus_addr = txq->tx_ring_phys_addr;
2160
2161                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2162                                 txq->nb_tx_desc *
2163                                 sizeof(union e1000_adv_tx_desc));
2164                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2165                                 (uint32_t)(bus_addr >> 32));
2166                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2167
2168                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2169                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2170                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2171
2172                 /* Setup Transmit threshold registers. */
2173                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2174                 txdctl |= txq->pthresh & 0x1F;
2175                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2176                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2177                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2178                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2179         }
2180
2181         /* Program the Transmit Control Register. */
2182         tctl = E1000_READ_REG(hw, E1000_TCTL);
2183         tctl &= ~E1000_TCTL_CT;
2184         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2185                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2186
2187         e1000_config_collision_dist(hw);
2188
2189         /* This write will effectively turn on the transmit unit. */
2190         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2191 }
2192
2193 /*********************************************************************
2194  *
2195  *  Enable VF receive unit.
2196  *
2197  **********************************************************************/
2198 int
2199 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2200 {
2201         struct e1000_hw     *hw;
2202         struct igb_rx_queue *rxq;
2203         struct rte_pktmbuf_pool_private *mbp_priv;
2204         uint32_t srrctl;
2205         uint16_t buf_size;
2206         uint16_t rctl_bsize;
2207         uint16_t i;
2208         int ret;
2209
2210         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2211
2212         /* setup MTU */
2213         e1000_rlpml_set_vf(hw,
2214                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2215                 VLAN_TAG_SIZE));
2216
2217         /* Configure and enable each RX queue. */
2218         rctl_bsize = 0;
2219         dev->rx_pkt_burst = eth_igb_recv_pkts;
2220         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2221                 uint64_t bus_addr;
2222                 uint32_t rxdctl;
2223
2224                 rxq = dev->data->rx_queues[i];
2225
2226                 /* Allocate buffers for descriptor rings and set up queue */
2227                 ret = igb_alloc_rx_queue_mbufs(rxq);
2228                 if (ret)
2229                         return ret;
2230
2231                 bus_addr = rxq->rx_ring_phys_addr;
2232                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2233                                 rxq->nb_rx_desc *
2234                                 sizeof(union e1000_adv_rx_desc));
2235                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2236                                 (uint32_t)(bus_addr >> 32));
2237                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2238
2239                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2240
2241                 /*
2242                  * Configure RX buffer size.
2243                  */
2244                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2245                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2246                                        RTE_PKTMBUF_HEADROOM);
2247                 if (buf_size >= 1024) {
2248                         /*
2249                          * Configure the BSIZEPACKET field of the SRRCTL
2250                          * register of the queue.
2251                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2252                          * If this field is equal to 0b, then RCTL.BSIZE
2253                          * determines the RX packet buffer size.
2254                          */
2255                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2256                                    E1000_SRRCTL_BSIZEPKT_MASK);
2257                         buf_size = (uint16_t) ((srrctl &
2258                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2259                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2260
2261                         /* It adds dual VLAN length for supporting dual VLAN */
2262                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2263                                                 2 * VLAN_TAG_SIZE) > buf_size){
2264                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2265                                 dev->data->scattered_rx = 1;
2266                         }
2267                 } else {
2268                         /*
2269                          * Use BSIZE field of the device RCTL register.
2270                          */
2271                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2272                                 rctl_bsize = buf_size;
2273                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2274                         dev->data->scattered_rx = 1;
2275                 }
2276
2277                 /* Set if packets are dropped when no descriptors available */
2278                 if (rxq->drop_en)
2279                         srrctl |= E1000_SRRCTL_DROP_EN;
2280
2281                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2282
2283                 /* Enable this RX queue. */
2284                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2285                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2286                 rxdctl &= 0xFFF00000;
2287                 rxdctl |= (rxq->pthresh & 0x1F);
2288                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2289                 if (hw->mac.type == e1000_vfadapt) {
2290                         /*
2291                          * Workaround of 82576 VF Erratum
2292                          * force set WTHRESH to 1
2293                          * to avoid Write-Back not triggered sometimes
2294                          */
2295                         rxdctl |= 0x10000;
2296                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2297                 }
2298                 else
2299                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2300                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2301         }
2302
2303         if (dev->data->dev_conf.rxmode.enable_scatter) {
2304                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2305                 dev->data->scattered_rx = 1;
2306         }
2307
2308         /*
2309          * Setup the HW Rx Head and Tail Descriptor Pointers.
2310          * This needs to be done after enable.
2311          */
2312         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2313                 rxq = dev->data->rx_queues[i];
2314                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2315                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2316         }
2317
2318         return 0;
2319 }
2320
2321 /*********************************************************************
2322  *
2323  *  Enable VF transmit unit.
2324  *
2325  **********************************************************************/
2326 void
2327 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2328 {
2329         struct e1000_hw     *hw;
2330         struct igb_tx_queue *txq;
2331         uint32_t txdctl;
2332         uint16_t i;
2333
2334         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2335
2336         /* Setup the Base and Length of the Tx Descriptor Rings. */
2337         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2338                 uint64_t bus_addr;
2339
2340                 txq = dev->data->tx_queues[i];
2341                 bus_addr = txq->tx_ring_phys_addr;
2342                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2343                                 txq->nb_tx_desc *
2344                                 sizeof(union e1000_adv_tx_desc));
2345                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2346                                 (uint32_t)(bus_addr >> 32));
2347                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2348
2349                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2350                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2351                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2352
2353                 /* Setup Transmit threshold registers. */
2354                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2355                 txdctl |= txq->pthresh & 0x1F;
2356                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2357                 if (hw->mac.type == e1000_82576) {
2358                         /*
2359                          * Workaround of 82576 VF Erratum
2360                          * force set WTHRESH to 1
2361                          * to avoid Write-Back not triggered sometimes
2362                          */
2363                         txdctl |= 0x10000;
2364                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2365                 }
2366                 else
2367                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2368                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2369                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2370         }
2371
2372 }
2373