tailq: remove unneeded inclusions
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "e1000/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK)
80
81 static inline struct rte_mbuf *
82 rte_rxmbuf_alloc(struct rte_mempool *mp)
83 {
84         struct rte_mbuf *m;
85
86         m = __rte_mbuf_raw_alloc(mp);
87         __rte_mbuf_sanity_check_raw(m, 0);
88         return (m);
89 }
90
91 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
92         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
93
94 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
95         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
96
97 /**
98  * Structure associated with each descriptor of the RX ring of a RX queue.
99  */
100 struct igb_rx_entry {
101         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
102 };
103
104 /**
105  * Structure associated with each descriptor of the TX ring of a TX queue.
106  */
107 struct igb_tx_entry {
108         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
109         uint16_t next_id; /**< Index of next descriptor in ring. */
110         uint16_t last_id; /**< Index of last scattered descriptor. */
111 };
112
113 /**
114  * Structure associated with each RX queue.
115  */
116 struct igb_rx_queue {
117         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
118         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
119         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
120         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
121         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
122         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
123         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
124         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
125         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
126         uint16_t            rx_tail;    /**< current value of RDT register. */
127         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
128         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
129         uint16_t            queue_id;   /**< RX queue index. */
130         uint16_t            reg_idx;    /**< RX queue register index. */
131         uint8_t             port_id;    /**< Device port identifier. */
132         uint8_t             pthresh;    /**< Prefetch threshold register. */
133         uint8_t             hthresh;    /**< Host threshold register. */
134         uint8_t             wthresh;    /**< Write-back threshold register. */
135         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
136         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
137 };
138
139 /**
140  * Hardware context number
141  */
142 enum igb_advctx_num {
143         IGB_CTX_0    = 0, /**< CTX0    */
144         IGB_CTX_1    = 1, /**< CTX1    */
145         IGB_CTX_NUM  = 2, /**< CTX_NUM */
146 };
147
148 /** Offload features */
149 union igb_vlan_macip {
150         uint32_t data;
151         struct {
152                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
153                 uint16_t vlan_tci;
154                 /**< VLAN Tag Control Identifier (CPU order). */
155         } f;
156 };
157
158 /*
159  * Compare mask for vlan_macip_len.data,
160  * should be in sync with igb_vlan_macip.f layout.
161  * */
162 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
163 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
164 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
165 /** MAC+IP  length. */
166 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
167
168 /**
169  * Strucutre to check if new context need be built
170  */
171 struct igb_advctx_info {
172         uint64_t flags;           /**< ol_flags related to context build. */
173         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
174         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
175 };
176
177 /**
178  * Structure associated with each TX queue.
179  */
180 struct igb_tx_queue {
181         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
182         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
183         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
184         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
185         uint32_t               txd_type;      /**< Device-specific TXD type */
186         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
187         uint16_t               tx_tail; /**< Current value of TDT register. */
188         uint16_t               tx_head;
189         /**< Index of first used TX descriptor. */
190         uint16_t               queue_id; /**< TX queue index. */
191         uint16_t               reg_idx;  /**< TX queue register index. */
192         uint8_t                port_id;  /**< Device port identifier. */
193         uint8_t                pthresh;  /**< Prefetch threshold register. */
194         uint8_t                hthresh;  /**< Host threshold register. */
195         uint8_t                wthresh;  /**< Write-back threshold register. */
196         uint32_t               ctx_curr;
197         /**< Current used hardware descriptor. */
198         uint32_t               ctx_start;
199         /**< Start context position for transmit queue. */
200         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
201         /**< Hardware context history.*/
202 };
203
204 #if 1
205 #define RTE_PMD_USE_PREFETCH
206 #endif
207
208 #ifdef RTE_PMD_USE_PREFETCH
209 #define rte_igb_prefetch(p)     rte_prefetch0(p)
210 #else
211 #define rte_igb_prefetch(p)     do {} while(0)
212 #endif
213
214 #ifdef RTE_PMD_PACKET_PREFETCH
215 #define rte_packet_prefetch(p) rte_prefetch1(p)
216 #else
217 #define rte_packet_prefetch(p)  do {} while(0)
218 #endif
219
220 /*
221  * Macro for VMDq feature for 1 GbE NIC.
222  */
223 #define E1000_VMOLR_SIZE                        (8)
224
225 /*********************************************************************
226  *
227  *  TX function
228  *
229  **********************************************************************/
230
231 /*
232  * Advanced context descriptor are almost same between igb/ixgbe
233  * This is a separate function, looking for optimization opportunity here
234  * Rework required to go with the pre-defined values.
235  */
236
237 static inline void
238 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
239                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
240                 uint64_t ol_flags, uint32_t vlan_macip_lens)
241 {
242         uint32_t type_tucmd_mlhl;
243         uint32_t mss_l4len_idx;
244         uint32_t ctx_idx, ctx_curr;
245         uint32_t cmp_mask;
246
247         ctx_curr = txq->ctx_curr;
248         ctx_idx = ctx_curr + txq->ctx_start;
249
250         cmp_mask = 0;
251         type_tucmd_mlhl = 0;
252
253         if (ol_flags & PKT_TX_VLAN_PKT) {
254                 cmp_mask |= TX_VLAN_CMP_MASK;
255         }
256
257         if (ol_flags & PKT_TX_IP_CKSUM) {
258                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
259                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
260         }
261
262         /* Specify which HW CTX to upload. */
263         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264         switch (ol_flags & PKT_TX_L4_MASK) {
265         case PKT_TX_UDP_CKSUM:
266                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
267                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
268                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
269                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
270                 break;
271         case PKT_TX_TCP_CKSUM:
272                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
273                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
276                 break;
277         case PKT_TX_SCTP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         default:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 break;
287         }
288
289         txq->ctx_cache[ctx_curr].flags           = ol_flags;
290         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
291         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
292                 vlan_macip_lens & cmp_mask;
293
294         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
295         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
296         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
297         ctx_txd->seqnum_seed     = 0;
298 }
299
300 /*
301  * Check which hardware context can be used. Use the existing match
302  * or create a new context descriptor.
303  */
304 static inline uint32_t
305 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
306                 uint32_t vlan_macip_lens)
307 {
308         /* If match with the current context */
309         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
310                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
311                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
312                         return txq->ctx_curr;
313         }
314
315         /* If match with the second context */
316         txq->ctx_curr ^= 1;
317         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320                         return txq->ctx_curr;
321         }
322
323         /* Mismatch, use the previous context */
324         return (IGB_CTX_NUM);
325 }
326
327 static inline uint32_t
328 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
329 {
330         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
331         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
332         uint32_t tmp;
333
334         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
335         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
336         return tmp;
337 }
338
339 static inline uint32_t
340 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
341 {
342         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
343         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
344 }
345
346 uint16_t
347 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
348                uint16_t nb_pkts)
349 {
350         struct igb_tx_queue *txq;
351         struct igb_tx_entry *sw_ring;
352         struct igb_tx_entry *txe, *txn;
353         volatile union e1000_adv_tx_desc *txr;
354         volatile union e1000_adv_tx_desc *txd;
355         struct rte_mbuf     *tx_pkt;
356         struct rte_mbuf     *m_seg;
357         union igb_vlan_macip vlan_macip_lens;
358         union {
359                 uint16_t u16;
360                 struct {
361                         uint16_t l3_len:9;
362                         uint16_t l2_len:7;
363                 };
364         } l2_l3_len;
365         uint64_t buf_dma_addr;
366         uint32_t olinfo_status;
367         uint32_t cmd_type_len;
368         uint32_t pkt_len;
369         uint16_t slen;
370         uint64_t ol_flags;
371         uint16_t tx_end;
372         uint16_t tx_id;
373         uint16_t tx_last;
374         uint16_t nb_tx;
375         uint64_t tx_ol_req;
376         uint32_t new_ctx = 0;
377         uint32_t ctx = 0;
378
379         txq = tx_queue;
380         sw_ring = txq->sw_ring;
381         txr     = txq->tx_ring;
382         tx_id   = txq->tx_tail;
383         txe = &sw_ring[tx_id];
384
385         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
386                 tx_pkt = *tx_pkts++;
387                 pkt_len = tx_pkt->pkt_len;
388
389                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
390
391                 /*
392                  * The number of descriptors that must be allocated for a
393                  * packet is the number of segments of that packet, plus 1
394                  * Context Descriptor for the VLAN Tag Identifier, if any.
395                  * Determine the last TX descriptor to allocate in the TX ring
396                  * for the packet, starting from the current position (tx_id)
397                  * in the ring.
398                  */
399                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
400
401                 ol_flags = tx_pkt->ol_flags;
402                 l2_l3_len.l2_len = tx_pkt->l2_len;
403                 l2_l3_len.l3_len = tx_pkt->l3_len;
404                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
405                 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
406                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
407
408                 /* If a Context Descriptor need be built . */
409                 if (tx_ol_req) {
410                         ctx = what_advctx_update(txq, tx_ol_req,
411                                 vlan_macip_lens.data);
412                         /* Only allocate context descriptor if required*/
413                         new_ctx = (ctx == IGB_CTX_NUM);
414                         ctx = txq->ctx_curr;
415                         tx_last = (uint16_t) (tx_last + new_ctx);
416                 }
417                 if (tx_last >= txq->nb_tx_desc)
418                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
419
420                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
421                            " tx_first=%u tx_last=%u",
422                            (unsigned) txq->port_id,
423                            (unsigned) txq->queue_id,
424                            (unsigned) pkt_len,
425                            (unsigned) tx_id,
426                            (unsigned) tx_last);
427
428                 /*
429                  * Check if there are enough free descriptors in the TX ring
430                  * to transmit the next packet.
431                  * This operation is based on the two following rules:
432                  *
433                  *   1- Only check that the last needed TX descriptor can be
434                  *      allocated (by construction, if that descriptor is free,
435                  *      all intermediate ones are also free).
436                  *
437                  *      For this purpose, the index of the last TX descriptor
438                  *      used for a packet (the "last descriptor" of a packet)
439                  *      is recorded in the TX entries (the last one included)
440                  *      that are associated with all TX descriptors allocated
441                  *      for that packet.
442                  *
443                  *   2- Avoid to allocate the last free TX descriptor of the
444                  *      ring, in order to never set the TDT register with the
445                  *      same value stored in parallel by the NIC in the TDH
446                  *      register, which makes the TX engine of the NIC enter
447                  *      in a deadlock situation.
448                  *
449                  *      By extension, avoid to allocate a free descriptor that
450                  *      belongs to the last set of free descriptors allocated
451                  *      to the same packet previously transmitted.
452                  */
453
454                 /*
455                  * The "last descriptor" of the previously sent packet, if any,
456                  * which used the last descriptor to allocate.
457                  */
458                 tx_end = sw_ring[tx_last].last_id;
459
460                 /*
461                  * The next descriptor following that "last descriptor" in the
462                  * ring.
463                  */
464                 tx_end = sw_ring[tx_end].next_id;
465
466                 /*
467                  * The "last descriptor" associated with that next descriptor.
468                  */
469                 tx_end = sw_ring[tx_end].last_id;
470
471                 /*
472                  * Check that this descriptor is free.
473                  */
474                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
475                         if (nb_tx == 0)
476                                 return (0);
477                         goto end_of_tx;
478                 }
479
480                 /*
481                  * Set common flags of all TX Data Descriptors.
482                  *
483                  * The following bits must be set in all Data Descriptors:
484                  *   - E1000_ADVTXD_DTYP_DATA
485                  *   - E1000_ADVTXD_DCMD_DEXT
486                  *
487                  * The following bits must be set in the first Data Descriptor
488                  * and are ignored in the other ones:
489                  *   - E1000_ADVTXD_DCMD_IFCS
490                  *   - E1000_ADVTXD_MAC_1588
491                  *   - E1000_ADVTXD_DCMD_VLE
492                  *
493                  * The following bits must only be set in the last Data
494                  * Descriptor:
495                  *   - E1000_TXD_CMD_EOP
496                  *
497                  * The following bits can be set in any Data Descriptor, but
498                  * are only set in the last Data Descriptor:
499                  *   - E1000_TXD_CMD_RS
500                  */
501                 cmd_type_len = txq->txd_type |
502                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
503                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
504 #if defined(RTE_LIBRTE_IEEE1588)
505                 if (ol_flags & PKT_TX_IEEE1588_TMST)
506                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
507 #endif
508                 if (tx_ol_req) {
509                         /* Setup TX Advanced context descriptor if required */
510                         if (new_ctx) {
511                                 volatile struct e1000_adv_tx_context_desc *
512                                     ctx_txd;
513
514                                 ctx_txd = (volatile struct
515                                     e1000_adv_tx_context_desc *)
516                                     &txr[tx_id];
517
518                                 txn = &sw_ring[txe->next_id];
519                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
520
521                                 if (txe->mbuf != NULL) {
522                                         rte_pktmbuf_free_seg(txe->mbuf);
523                                         txe->mbuf = NULL;
524                                 }
525
526                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
527                                     vlan_macip_lens.data);
528
529                                 txe->last_id = tx_last;
530                                 tx_id = txe->next_id;
531                                 txe = txn;
532                         }
533
534                         /* Setup the TX Advanced Data Descriptor */
535                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
536                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
537                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
538                 }
539
540                 m_seg = tx_pkt;
541                 do {
542                         txn = &sw_ring[txe->next_id];
543                         txd = &txr[tx_id];
544
545                         if (txe->mbuf != NULL)
546                                 rte_pktmbuf_free_seg(txe->mbuf);
547                         txe->mbuf = m_seg;
548
549                         /*
550                          * Set up transmit descriptor.
551                          */
552                         slen = (uint16_t) m_seg->data_len;
553                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
554                         txd->read.buffer_addr =
555                                 rte_cpu_to_le_64(buf_dma_addr);
556                         txd->read.cmd_type_len =
557                                 rte_cpu_to_le_32(cmd_type_len | slen);
558                         txd->read.olinfo_status =
559                                 rte_cpu_to_le_32(olinfo_status);
560                         txe->last_id = tx_last;
561                         tx_id = txe->next_id;
562                         txe = txn;
563                         m_seg = m_seg->next;
564                 } while (m_seg != NULL);
565
566                 /*
567                  * The last packet data descriptor needs End Of Packet (EOP)
568                  * and Report Status (RS).
569                  */
570                 txd->read.cmd_type_len |=
571                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
572         }
573  end_of_tx:
574         rte_wmb();
575
576         /*
577          * Set the Transmit Descriptor Tail (TDT).
578          */
579         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
580         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
581                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
582                    (unsigned) tx_id, (unsigned) nb_tx);
583         txq->tx_tail = tx_id;
584
585         return (nb_tx);
586 }
587
588 /*********************************************************************
589  *
590  *  RX functions
591  *
592  **********************************************************************/
593 static inline uint64_t
594 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
595 {
596         uint64_t pkt_flags;
597
598         static uint64_t ip_pkt_types_map[16] = {
599                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
600                 PKT_RX_IPV6_HDR, 0, 0, 0,
601                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
602                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
603         };
604
605 #if defined(RTE_LIBRTE_IEEE1588)
606         static uint32_t ip_pkt_etqf_map[8] = {
607                 0, 0, 0, PKT_RX_IEEE1588_PTP,
608                 0, 0, 0, 0,
609         };
610
611         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
612                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
613                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
614 #else
615         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
616                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
617 #endif
618         return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH);
619 }
620
621 static inline uint64_t
622 rx_desc_status_to_pkt_flags(uint32_t rx_status)
623 {
624         uint64_t pkt_flags;
625
626         /* Check if VLAN present */
627         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
628
629 #if defined(RTE_LIBRTE_IEEE1588)
630         if (rx_status & E1000_RXD_STAT_TMST)
631                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
632 #endif
633         return pkt_flags;
634 }
635
636 static inline uint64_t
637 rx_desc_error_to_pkt_flags(uint32_t rx_status)
638 {
639         /*
640          * Bit 30: IPE, IPv4 checksum error
641          * Bit 29: L4I, L4I integrity error
642          */
643
644         static uint64_t error_to_pkt_flags_map[4] = {
645                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
646                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
647         };
648         return error_to_pkt_flags_map[(rx_status >>
649                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
650 }
651
652 uint16_t
653 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
654                uint16_t nb_pkts)
655 {
656         struct igb_rx_queue *rxq;
657         volatile union e1000_adv_rx_desc *rx_ring;
658         volatile union e1000_adv_rx_desc *rxdp;
659         struct igb_rx_entry *sw_ring;
660         struct igb_rx_entry *rxe;
661         struct rte_mbuf *rxm;
662         struct rte_mbuf *nmb;
663         union e1000_adv_rx_desc rxd;
664         uint64_t dma_addr;
665         uint32_t staterr;
666         uint32_t hlen_type_rss;
667         uint16_t pkt_len;
668         uint16_t rx_id;
669         uint16_t nb_rx;
670         uint16_t nb_hold;
671         uint64_t pkt_flags;
672
673         nb_rx = 0;
674         nb_hold = 0;
675         rxq = rx_queue;
676         rx_id = rxq->rx_tail;
677         rx_ring = rxq->rx_ring;
678         sw_ring = rxq->sw_ring;
679         while (nb_rx < nb_pkts) {
680                 /*
681                  * The order of operations here is important as the DD status
682                  * bit must not be read after any other descriptor fields.
683                  * rx_ring and rxdp are pointing to volatile data so the order
684                  * of accesses cannot be reordered by the compiler. If they were
685                  * not volatile, they could be reordered which could lead to
686                  * using invalid descriptor fields when read from rxd.
687                  */
688                 rxdp = &rx_ring[rx_id];
689                 staterr = rxdp->wb.upper.status_error;
690                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
691                         break;
692                 rxd = *rxdp;
693
694                 /*
695                  * End of packet.
696                  *
697                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
698                  * likely to be invalid and to be dropped by the various
699                  * validation checks performed by the network stack.
700                  *
701                  * Allocate a new mbuf to replenish the RX ring descriptor.
702                  * If the allocation fails:
703                  *    - arrange for that RX descriptor to be the first one
704                  *      being parsed the next time the receive function is
705                  *      invoked [on the same queue].
706                  *
707                  *    - Stop parsing the RX ring and return immediately.
708                  *
709                  * This policy do not drop the packet received in the RX
710                  * descriptor for which the allocation of a new mbuf failed.
711                  * Thus, it allows that packet to be later retrieved if
712                  * mbuf have been freed in the mean time.
713                  * As a side effect, holding RX descriptors instead of
714                  * systematically giving them back to the NIC may lead to
715                  * RX ring exhaustion situations.
716                  * However, the NIC can gracefully prevent such situations
717                  * to happen by sending specific "back-pressure" flow control
718                  * frames to its peer(s).
719                  */
720                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
721                            "staterr=0x%x pkt_len=%u",
722                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
723                            (unsigned) rx_id, (unsigned) staterr,
724                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
725
726                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
727                 if (nmb == NULL) {
728                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
729                                    "queue_id=%u", (unsigned) rxq->port_id,
730                                    (unsigned) rxq->queue_id);
731                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
732                         break;
733                 }
734
735                 nb_hold++;
736                 rxe = &sw_ring[rx_id];
737                 rx_id++;
738                 if (rx_id == rxq->nb_rx_desc)
739                         rx_id = 0;
740
741                 /* Prefetch next mbuf while processing current one. */
742                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
743
744                 /*
745                  * When next RX descriptor is on a cache-line boundary,
746                  * prefetch the next 4 RX descriptors and the next 8 pointers
747                  * to mbufs.
748                  */
749                 if ((rx_id & 0x3) == 0) {
750                         rte_igb_prefetch(&rx_ring[rx_id]);
751                         rte_igb_prefetch(&sw_ring[rx_id]);
752                 }
753
754                 rxm = rxe->mbuf;
755                 rxe->mbuf = nmb;
756                 dma_addr =
757                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
758                 rxdp->read.hdr_addr = dma_addr;
759                 rxdp->read.pkt_addr = dma_addr;
760
761                 /*
762                  * Initialize the returned mbuf.
763                  * 1) setup generic mbuf fields:
764                  *    - number of segments,
765                  *    - next segment,
766                  *    - packet length,
767                  *    - RX port identifier.
768                  * 2) integrate hardware offload data, if any:
769                  *    - RSS flag & hash,
770                  *    - IP checksum flag,
771                  *    - VLAN TCI, if any,
772                  *    - error flags.
773                  */
774                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
775                                       rxq->crc_len);
776                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
777                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
778                 rxm->nb_segs = 1;
779                 rxm->next = NULL;
780                 rxm->pkt_len = pkt_len;
781                 rxm->data_len = pkt_len;
782                 rxm->port = rxq->port_id;
783
784                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
785                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
786                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
787                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
788
789                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
790                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
791                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
792                 rxm->ol_flags = pkt_flags;
793
794                 /*
795                  * Store the mbuf address into the next entry of the array
796                  * of returned packets.
797                  */
798                 rx_pkts[nb_rx++] = rxm;
799         }
800         rxq->rx_tail = rx_id;
801
802         /*
803          * If the number of free RX descriptors is greater than the RX free
804          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
805          * register.
806          * Update the RDT with the value of the last processed RX descriptor
807          * minus 1, to guarantee that the RDT register is never equal to the
808          * RDH register, which creates a "full" ring situtation from the
809          * hardware point of view...
810          */
811         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
812         if (nb_hold > rxq->rx_free_thresh) {
813                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
814                            "nb_hold=%u nb_rx=%u",
815                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
816                            (unsigned) rx_id, (unsigned) nb_hold,
817                            (unsigned) nb_rx);
818                 rx_id = (uint16_t) ((rx_id == 0) ?
819                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
820                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
821                 nb_hold = 0;
822         }
823         rxq->nb_rx_hold = nb_hold;
824         return (nb_rx);
825 }
826
827 uint16_t
828 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
829                          uint16_t nb_pkts)
830 {
831         struct igb_rx_queue *rxq;
832         volatile union e1000_adv_rx_desc *rx_ring;
833         volatile union e1000_adv_rx_desc *rxdp;
834         struct igb_rx_entry *sw_ring;
835         struct igb_rx_entry *rxe;
836         struct rte_mbuf *first_seg;
837         struct rte_mbuf *last_seg;
838         struct rte_mbuf *rxm;
839         struct rte_mbuf *nmb;
840         union e1000_adv_rx_desc rxd;
841         uint64_t dma; /* Physical address of mbuf data buffer */
842         uint32_t staterr;
843         uint32_t hlen_type_rss;
844         uint16_t rx_id;
845         uint16_t nb_rx;
846         uint16_t nb_hold;
847         uint16_t data_len;
848         uint64_t pkt_flags;
849
850         nb_rx = 0;
851         nb_hold = 0;
852         rxq = rx_queue;
853         rx_id = rxq->rx_tail;
854         rx_ring = rxq->rx_ring;
855         sw_ring = rxq->sw_ring;
856
857         /*
858          * Retrieve RX context of current packet, if any.
859          */
860         first_seg = rxq->pkt_first_seg;
861         last_seg = rxq->pkt_last_seg;
862
863         while (nb_rx < nb_pkts) {
864         next_desc:
865                 /*
866                  * The order of operations here is important as the DD status
867                  * bit must not be read after any other descriptor fields.
868                  * rx_ring and rxdp are pointing to volatile data so the order
869                  * of accesses cannot be reordered by the compiler. If they were
870                  * not volatile, they could be reordered which could lead to
871                  * using invalid descriptor fields when read from rxd.
872                  */
873                 rxdp = &rx_ring[rx_id];
874                 staterr = rxdp->wb.upper.status_error;
875                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
876                         break;
877                 rxd = *rxdp;
878
879                 /*
880                  * Descriptor done.
881                  *
882                  * Allocate a new mbuf to replenish the RX ring descriptor.
883                  * If the allocation fails:
884                  *    - arrange for that RX descriptor to be the first one
885                  *      being parsed the next time the receive function is
886                  *      invoked [on the same queue].
887                  *
888                  *    - Stop parsing the RX ring and return immediately.
889                  *
890                  * This policy does not drop the packet received in the RX
891                  * descriptor for which the allocation of a new mbuf failed.
892                  * Thus, it allows that packet to be later retrieved if
893                  * mbuf have been freed in the mean time.
894                  * As a side effect, holding RX descriptors instead of
895                  * systematically giving them back to the NIC may lead to
896                  * RX ring exhaustion situations.
897                  * However, the NIC can gracefully prevent such situations
898                  * to happen by sending specific "back-pressure" flow control
899                  * frames to its peer(s).
900                  */
901                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
902                            "staterr=0x%x data_len=%u",
903                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
904                            (unsigned) rx_id, (unsigned) staterr,
905                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
906
907                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
908                 if (nmb == NULL) {
909                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
910                                    "queue_id=%u", (unsigned) rxq->port_id,
911                                    (unsigned) rxq->queue_id);
912                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
913                         break;
914                 }
915
916                 nb_hold++;
917                 rxe = &sw_ring[rx_id];
918                 rx_id++;
919                 if (rx_id == rxq->nb_rx_desc)
920                         rx_id = 0;
921
922                 /* Prefetch next mbuf while processing current one. */
923                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
924
925                 /*
926                  * When next RX descriptor is on a cache-line boundary,
927                  * prefetch the next 4 RX descriptors and the next 8 pointers
928                  * to mbufs.
929                  */
930                 if ((rx_id & 0x3) == 0) {
931                         rte_igb_prefetch(&rx_ring[rx_id]);
932                         rte_igb_prefetch(&sw_ring[rx_id]);
933                 }
934
935                 /*
936                  * Update RX descriptor with the physical address of the new
937                  * data buffer of the new allocated mbuf.
938                  */
939                 rxm = rxe->mbuf;
940                 rxe->mbuf = nmb;
941                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
942                 rxdp->read.pkt_addr = dma;
943                 rxdp->read.hdr_addr = dma;
944
945                 /*
946                  * Set data length & data buffer address of mbuf.
947                  */
948                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
949                 rxm->data_len = data_len;
950                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
951
952                 /*
953                  * If this is the first buffer of the received packet,
954                  * set the pointer to the first mbuf of the packet and
955                  * initialize its context.
956                  * Otherwise, update the total length and the number of segments
957                  * of the current scattered packet, and update the pointer to
958                  * the last mbuf of the current packet.
959                  */
960                 if (first_seg == NULL) {
961                         first_seg = rxm;
962                         first_seg->pkt_len = data_len;
963                         first_seg->nb_segs = 1;
964                 } else {
965                         first_seg->pkt_len += data_len;
966                         first_seg->nb_segs++;
967                         last_seg->next = rxm;
968                 }
969
970                 /*
971                  * If this is not the last buffer of the received packet,
972                  * update the pointer to the last mbuf of the current scattered
973                  * packet and continue to parse the RX ring.
974                  */
975                 if (! (staterr & E1000_RXD_STAT_EOP)) {
976                         last_seg = rxm;
977                         goto next_desc;
978                 }
979
980                 /*
981                  * This is the last buffer of the received packet.
982                  * If the CRC is not stripped by the hardware:
983                  *   - Subtract the CRC length from the total packet length.
984                  *   - If the last buffer only contains the whole CRC or a part
985                  *     of it, free the mbuf associated to the last buffer.
986                  *     If part of the CRC is also contained in the previous
987                  *     mbuf, subtract the length of that CRC part from the
988                  *     data length of the previous mbuf.
989                  */
990                 rxm->next = NULL;
991                 if (unlikely(rxq->crc_len > 0)) {
992                         first_seg->pkt_len -= ETHER_CRC_LEN;
993                         if (data_len <= ETHER_CRC_LEN) {
994                                 rte_pktmbuf_free_seg(rxm);
995                                 first_seg->nb_segs--;
996                                 last_seg->data_len = (uint16_t)
997                                         (last_seg->data_len -
998                                          (ETHER_CRC_LEN - data_len));
999                                 last_seg->next = NULL;
1000                         } else
1001                                 rxm->data_len =
1002                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1003                 }
1004
1005                 /*
1006                  * Initialize the first mbuf of the returned packet:
1007                  *    - RX port identifier,
1008                  *    - hardware offload data, if any:
1009                  *      - RSS flag & hash,
1010                  *      - IP checksum flag,
1011                  *      - VLAN TCI, if any,
1012                  *      - error flags.
1013                  */
1014                 first_seg->port = rxq->port_id;
1015                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1016
1017                 /*
1018                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1019                  * set in the pkt_flags field.
1020                  */
1021                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1022                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1023                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1024                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1025                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1026                 first_seg->ol_flags = pkt_flags;
1027
1028                 /* Prefetch data of first segment, if configured to do so. */
1029                 rte_packet_prefetch((char *)first_seg->buf_addr +
1030                         first_seg->data_off);
1031
1032                 /*
1033                  * Store the mbuf address into the next entry of the array
1034                  * of returned packets.
1035                  */
1036                 rx_pkts[nb_rx++] = first_seg;
1037
1038                 /*
1039                  * Setup receipt context for a new packet.
1040                  */
1041                 first_seg = NULL;
1042         }
1043
1044         /*
1045          * Record index of the next RX descriptor to probe.
1046          */
1047         rxq->rx_tail = rx_id;
1048
1049         /*
1050          * Save receive context.
1051          */
1052         rxq->pkt_first_seg = first_seg;
1053         rxq->pkt_last_seg = last_seg;
1054
1055         /*
1056          * If the number of free RX descriptors is greater than the RX free
1057          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1058          * register.
1059          * Update the RDT with the value of the last processed RX descriptor
1060          * minus 1, to guarantee that the RDT register is never equal to the
1061          * RDH register, which creates a "full" ring situtation from the
1062          * hardware point of view...
1063          */
1064         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1065         if (nb_hold > rxq->rx_free_thresh) {
1066                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1067                            "nb_hold=%u nb_rx=%u",
1068                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1069                            (unsigned) rx_id, (unsigned) nb_hold,
1070                            (unsigned) nb_rx);
1071                 rx_id = (uint16_t) ((rx_id == 0) ?
1072                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1073                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1074                 nb_hold = 0;
1075         }
1076         rxq->nb_rx_hold = nb_hold;
1077         return (nb_rx);
1078 }
1079
1080 /*
1081  * Rings setup and release.
1082  *
1083  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1084  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1085  * This will also optimize cache line size effect.
1086  * H/W supports up to cache line size 128.
1087  */
1088 #define IGB_ALIGN 128
1089
1090 /*
1091  * Maximum number of Ring Descriptors.
1092  *
1093  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1094  * desscriptors should meet the following condition:
1095  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1096  */
1097 #define IGB_MIN_RING_DESC 32
1098 #define IGB_MAX_RING_DESC 4096
1099
1100 static const struct rte_memzone *
1101 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1102                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1103 {
1104         char z_name[RTE_MEMZONE_NAMESIZE];
1105         const struct rte_memzone *mz;
1106
1107         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1108                         dev->driver->pci_drv.name, ring_name,
1109                                 dev->data->port_id, queue_id);
1110         mz = rte_memzone_lookup(z_name);
1111         if (mz)
1112                 return mz;
1113
1114 #ifdef RTE_LIBRTE_XEN_DOM0
1115         return rte_memzone_reserve_bounded(z_name, ring_size,
1116                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1117 #else
1118         return rte_memzone_reserve_aligned(z_name, ring_size,
1119                         socket_id, 0, IGB_ALIGN);
1120 #endif
1121 }
1122
1123 static void
1124 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1125 {
1126         unsigned i;
1127
1128         if (txq->sw_ring != NULL) {
1129                 for (i = 0; i < txq->nb_tx_desc; i++) {
1130                         if (txq->sw_ring[i].mbuf != NULL) {
1131                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1132                                 txq->sw_ring[i].mbuf = NULL;
1133                         }
1134                 }
1135         }
1136 }
1137
1138 static void
1139 igb_tx_queue_release(struct igb_tx_queue *txq)
1140 {
1141         if (txq != NULL) {
1142                 igb_tx_queue_release_mbufs(txq);
1143                 rte_free(txq->sw_ring);
1144                 rte_free(txq);
1145         }
1146 }
1147
1148 void
1149 eth_igb_tx_queue_release(void *txq)
1150 {
1151         igb_tx_queue_release(txq);
1152 }
1153
1154 static void
1155 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1156 {
1157         txq->tx_head = 0;
1158         txq->tx_tail = 0;
1159         txq->ctx_curr = 0;
1160         memset((void*)&txq->ctx_cache, 0,
1161                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1162 }
1163
1164 static void
1165 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1166 {
1167         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1168                         .buffer_addr = 0}};
1169         struct igb_tx_entry *txe = txq->sw_ring;
1170         uint16_t i, prev;
1171         struct e1000_hw *hw;
1172
1173         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174         /* Zero out HW ring memory */
1175         for (i = 0; i < txq->nb_tx_desc; i++) {
1176                 txq->tx_ring[i] = zeroed_desc;
1177         }
1178
1179         /* Initialize ring entries */
1180         prev = (uint16_t)(txq->nb_tx_desc - 1);
1181         for (i = 0; i < txq->nb_tx_desc; i++) {
1182                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1183
1184                 txd->wb.status = E1000_TXD_STAT_DD;
1185                 txe[i].mbuf = NULL;
1186                 txe[i].last_id = i;
1187                 txe[prev].next_id = i;
1188                 prev = i;
1189         }
1190
1191         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1192         /* 82575 specific, each tx queue will use 2 hw contexts */
1193         if (hw->mac.type == e1000_82575)
1194                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1195
1196         igb_reset_tx_queue_stat(txq);
1197 }
1198
1199 int
1200 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1201                          uint16_t queue_idx,
1202                          uint16_t nb_desc,
1203                          unsigned int socket_id,
1204                          const struct rte_eth_txconf *tx_conf)
1205 {
1206         const struct rte_memzone *tz;
1207         struct igb_tx_queue *txq;
1208         struct e1000_hw     *hw;
1209         uint32_t size;
1210
1211         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1212
1213         /*
1214          * Validate number of transmit descriptors.
1215          * It must not exceed hardware maximum, and must be multiple
1216          * of IGB_ALIGN.
1217          */
1218         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1219             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1220                 return -EINVAL;
1221         }
1222
1223         /*
1224          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1225          * driver.
1226          */
1227         if (tx_conf->tx_free_thresh != 0)
1228                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1229                              "used for the 1G driver.");
1230         if (tx_conf->tx_rs_thresh != 0)
1231                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1232                              "used for the 1G driver.");
1233         if (tx_conf->tx_thresh.wthresh == 0)
1234                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1235                              "consider setting the TX WTHRESH value to 4, 8, "
1236                              "or 16.");
1237
1238         /* Free memory prior to re-allocation if needed */
1239         if (dev->data->tx_queues[queue_idx] != NULL) {
1240                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1241                 dev->data->tx_queues[queue_idx] = NULL;
1242         }
1243
1244         /* First allocate the tx queue data structure */
1245         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1246                                                         RTE_CACHE_LINE_SIZE);
1247         if (txq == NULL)
1248                 return (-ENOMEM);
1249
1250         /*
1251          * Allocate TX ring hardware descriptors. A memzone large enough to
1252          * handle the maximum ring size is allocated in order to allow for
1253          * resizing in later calls to the queue setup function.
1254          */
1255         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1256         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1257                                         size, socket_id);
1258         if (tz == NULL) {
1259                 igb_tx_queue_release(txq);
1260                 return (-ENOMEM);
1261         }
1262
1263         txq->nb_tx_desc = nb_desc;
1264         txq->pthresh = tx_conf->tx_thresh.pthresh;
1265         txq->hthresh = tx_conf->tx_thresh.hthresh;
1266         txq->wthresh = tx_conf->tx_thresh.wthresh;
1267         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1268                 txq->wthresh = 1;
1269         txq->queue_id = queue_idx;
1270         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1271                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1272         txq->port_id = dev->data->port_id;
1273
1274         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1275 #ifndef RTE_LIBRTE_XEN_DOM0
1276         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1277 #else
1278         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1279 #endif
1280          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1281         /* Allocate software ring */
1282         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1283                                    sizeof(struct igb_tx_entry) * nb_desc,
1284                                    RTE_CACHE_LINE_SIZE);
1285         if (txq->sw_ring == NULL) {
1286                 igb_tx_queue_release(txq);
1287                 return (-ENOMEM);
1288         }
1289         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1290                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1291
1292         igb_reset_tx_queue(txq, dev);
1293         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1294         dev->data->tx_queues[queue_idx] = txq;
1295
1296         return (0);
1297 }
1298
1299 static void
1300 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1301 {
1302         unsigned i;
1303
1304         if (rxq->sw_ring != NULL) {
1305                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1306                         if (rxq->sw_ring[i].mbuf != NULL) {
1307                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1308                                 rxq->sw_ring[i].mbuf = NULL;
1309                         }
1310                 }
1311         }
1312 }
1313
1314 static void
1315 igb_rx_queue_release(struct igb_rx_queue *rxq)
1316 {
1317         if (rxq != NULL) {
1318                 igb_rx_queue_release_mbufs(rxq);
1319                 rte_free(rxq->sw_ring);
1320                 rte_free(rxq);
1321         }
1322 }
1323
1324 void
1325 eth_igb_rx_queue_release(void *rxq)
1326 {
1327         igb_rx_queue_release(rxq);
1328 }
1329
1330 static void
1331 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1332 {
1333         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1334                         .pkt_addr = 0}};
1335         unsigned i;
1336
1337         /* Zero out HW ring memory */
1338         for (i = 0; i < rxq->nb_rx_desc; i++) {
1339                 rxq->rx_ring[i] = zeroed_desc;
1340         }
1341
1342         rxq->rx_tail = 0;
1343         rxq->pkt_first_seg = NULL;
1344         rxq->pkt_last_seg = NULL;
1345 }
1346
1347 int
1348 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1349                          uint16_t queue_idx,
1350                          uint16_t nb_desc,
1351                          unsigned int socket_id,
1352                          const struct rte_eth_rxconf *rx_conf,
1353                          struct rte_mempool *mp)
1354 {
1355         const struct rte_memzone *rz;
1356         struct igb_rx_queue *rxq;
1357         struct e1000_hw     *hw;
1358         unsigned int size;
1359
1360         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1361
1362         /*
1363          * Validate number of receive descriptors.
1364          * It must not exceed hardware maximum, and must be multiple
1365          * of IGB_ALIGN.
1366          */
1367         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1368             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1369                 return (-EINVAL);
1370         }
1371
1372         /* Free memory prior to re-allocation if needed */
1373         if (dev->data->rx_queues[queue_idx] != NULL) {
1374                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1375                 dev->data->rx_queues[queue_idx] = NULL;
1376         }
1377
1378         /* First allocate the RX queue data structure. */
1379         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1380                           RTE_CACHE_LINE_SIZE);
1381         if (rxq == NULL)
1382                 return (-ENOMEM);
1383         rxq->mb_pool = mp;
1384         rxq->nb_rx_desc = nb_desc;
1385         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1386         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1387         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1388         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1389                 rxq->wthresh = 1;
1390         rxq->drop_en = rx_conf->rx_drop_en;
1391         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1392         rxq->queue_id = queue_idx;
1393         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1394                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1395         rxq->port_id = dev->data->port_id;
1396         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1397                                   ETHER_CRC_LEN);
1398
1399         /*
1400          *  Allocate RX ring hardware descriptors. A memzone large enough to
1401          *  handle the maximum ring size is allocated in order to allow for
1402          *  resizing in later calls to the queue setup function.
1403          */
1404         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1405         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1406         if (rz == NULL) {
1407                 igb_rx_queue_release(rxq);
1408                 return (-ENOMEM);
1409         }
1410         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1411         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1412 #ifndef RTE_LIBRTE_XEN_DOM0
1413         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1414 #else
1415         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1416 #endif
1417         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1418
1419         /* Allocate software ring. */
1420         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1421                                    sizeof(struct igb_rx_entry) * nb_desc,
1422                                    RTE_CACHE_LINE_SIZE);
1423         if (rxq->sw_ring == NULL) {
1424                 igb_rx_queue_release(rxq);
1425                 return (-ENOMEM);
1426         }
1427         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1428                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1429
1430         dev->data->rx_queues[queue_idx] = rxq;
1431         igb_reset_rx_queue(rxq);
1432
1433         return 0;
1434 }
1435
1436 uint32_t
1437 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1438 {
1439 #define IGB_RXQ_SCAN_INTERVAL 4
1440         volatile union e1000_adv_rx_desc *rxdp;
1441         struct igb_rx_queue *rxq;
1442         uint32_t desc = 0;
1443
1444         if (rx_queue_id >= dev->data->nb_rx_queues) {
1445                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1446                 return 0;
1447         }
1448
1449         rxq = dev->data->rx_queues[rx_queue_id];
1450         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1451
1452         while ((desc < rxq->nb_rx_desc) &&
1453                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1454                 desc += IGB_RXQ_SCAN_INTERVAL;
1455                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1456                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1457                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1458                                 desc - rxq->nb_rx_desc]);
1459         }
1460
1461         return 0;
1462 }
1463
1464 int
1465 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1466 {
1467         volatile union e1000_adv_rx_desc *rxdp;
1468         struct igb_rx_queue *rxq = rx_queue;
1469         uint32_t desc;
1470
1471         if (unlikely(offset >= rxq->nb_rx_desc))
1472                 return 0;
1473         desc = rxq->rx_tail + offset;
1474         if (desc >= rxq->nb_rx_desc)
1475                 desc -= rxq->nb_rx_desc;
1476
1477         rxdp = &rxq->rx_ring[desc];
1478         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1479 }
1480
1481 void
1482 igb_dev_clear_queues(struct rte_eth_dev *dev)
1483 {
1484         uint16_t i;
1485         struct igb_tx_queue *txq;
1486         struct igb_rx_queue *rxq;
1487
1488         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1489                 txq = dev->data->tx_queues[i];
1490                 if (txq != NULL) {
1491                         igb_tx_queue_release_mbufs(txq);
1492                         igb_reset_tx_queue(txq, dev);
1493                 }
1494         }
1495
1496         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1497                 rxq = dev->data->rx_queues[i];
1498                 if (rxq != NULL) {
1499                         igb_rx_queue_release_mbufs(rxq);
1500                         igb_reset_rx_queue(rxq);
1501                 }
1502         }
1503 }
1504
1505 /**
1506  * Receive Side Scaling (RSS).
1507  * See section 7.1.1.7 in the following document:
1508  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1509  *
1510  * Principles:
1511  * The source and destination IP addresses of the IP header and the source and
1512  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1513  * against a configurable random key to compute a 32-bit RSS hash result.
1514  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1515  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1516  * RSS output index which is used as the RX queue index where to store the
1517  * received packets.
1518  * The following output is supplied in the RX write-back descriptor:
1519  *     - 32-bit result of the Microsoft RSS hash function,
1520  *     - 4-bit RSS type field.
1521  */
1522
1523 /*
1524  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1525  * Used as the default key.
1526  */
1527 static uint8_t rss_intel_key[40] = {
1528         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1529         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1530         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1531         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1532         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1533 };
1534
1535 static void
1536 igb_rss_disable(struct rte_eth_dev *dev)
1537 {
1538         struct e1000_hw *hw;
1539         uint32_t mrqc;
1540
1541         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1543         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1544         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1545 }
1546
1547 static void
1548 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1549 {
1550         uint8_t  *hash_key;
1551         uint32_t rss_key;
1552         uint32_t mrqc;
1553         uint64_t rss_hf;
1554         uint16_t i;
1555
1556         hash_key = rss_conf->rss_key;
1557         if (hash_key != NULL) {
1558                 /* Fill in RSS hash key */
1559                 for (i = 0; i < 10; i++) {
1560                         rss_key  = hash_key[(i * 4)];
1561                         rss_key |= hash_key[(i * 4) + 1] << 8;
1562                         rss_key |= hash_key[(i * 4) + 2] << 16;
1563                         rss_key |= hash_key[(i * 4) + 3] << 24;
1564                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1565                 }
1566         }
1567
1568         /* Set configured hashing protocols in MRQC register */
1569         rss_hf = rss_conf->rss_hf;
1570         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1571         if (rss_hf & ETH_RSS_IPV4)
1572                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1573         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1574                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1575         if (rss_hf & ETH_RSS_IPV6)
1576                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1577         if (rss_hf & ETH_RSS_IPV6_EX)
1578                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1579         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1580                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1581         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1582                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1583         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1584                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1585         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1586                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1587         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1588                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1589         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1590 }
1591
1592 int
1593 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1594                         struct rte_eth_rss_conf *rss_conf)
1595 {
1596         struct e1000_hw *hw;
1597         uint32_t mrqc;
1598         uint64_t rss_hf;
1599
1600         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1601
1602         /*
1603          * Before changing anything, first check that the update RSS operation
1604          * does not attempt to disable RSS, if RSS was enabled at
1605          * initialization time, or does not attempt to enable RSS, if RSS was
1606          * disabled at initialization time.
1607          */
1608         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1609         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1610         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1611                 if (rss_hf != 0) /* Enable RSS */
1612                         return -(EINVAL);
1613                 return 0; /* Nothing to do */
1614         }
1615         /* RSS enabled */
1616         if (rss_hf == 0) /* Disable RSS */
1617                 return -(EINVAL);
1618         igb_hw_rss_hash_set(hw, rss_conf);
1619         return 0;
1620 }
1621
1622 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1623                               struct rte_eth_rss_conf *rss_conf)
1624 {
1625         struct e1000_hw *hw;
1626         uint8_t *hash_key;
1627         uint32_t rss_key;
1628         uint32_t mrqc;
1629         uint64_t rss_hf;
1630         uint16_t i;
1631
1632         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1633         hash_key = rss_conf->rss_key;
1634         if (hash_key != NULL) {
1635                 /* Return RSS hash key */
1636                 for (i = 0; i < 10; i++) {
1637                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1638                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1639                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1640                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1641                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1642                 }
1643         }
1644
1645         /* Get RSS functions configured in MRQC register */
1646         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1647         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1648                 rss_conf->rss_hf = 0;
1649                 return 0;
1650         }
1651         rss_hf = 0;
1652         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1653                 rss_hf |= ETH_RSS_IPV4;
1654         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1655                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1656         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1657                 rss_hf |= ETH_RSS_IPV6;
1658         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1659                 rss_hf |= ETH_RSS_IPV6_EX;
1660         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1661                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1662         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1663                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1664         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1665                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1666         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1667                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1668         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1669                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1670         rss_conf->rss_hf = rss_hf;
1671         return 0;
1672 }
1673
1674 static void
1675 igb_rss_configure(struct rte_eth_dev *dev)
1676 {
1677         struct rte_eth_rss_conf rss_conf;
1678         struct e1000_hw *hw;
1679         uint32_t shift;
1680         uint16_t i;
1681
1682         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1683
1684         /* Fill in redirection table. */
1685         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1686         for (i = 0; i < 128; i++) {
1687                 union e1000_reta {
1688                         uint32_t dword;
1689                         uint8_t  bytes[4];
1690                 } reta;
1691                 uint8_t q_idx;
1692
1693                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1694                                    i % dev->data->nb_rx_queues : 0);
1695                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1696                 if ((i & 3) == 3)
1697                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1698         }
1699
1700         /*
1701          * Configure the RSS key and the RSS protocols used to compute
1702          * the RSS hash of input packets.
1703          */
1704         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1705         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1706                 igb_rss_disable(dev);
1707                 return;
1708         }
1709         if (rss_conf.rss_key == NULL)
1710                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1711         igb_hw_rss_hash_set(hw, &rss_conf);
1712 }
1713
1714 /*
1715  * Check if the mac type support VMDq or not.
1716  * Return 1 if it supports, otherwise, return 0.
1717  */
1718 static int
1719 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1720 {
1721         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1722
1723         switch (hw->mac.type) {
1724         case e1000_82576:
1725         case e1000_82580:
1726         case e1000_i350:
1727                 return 1;
1728         case e1000_82540:
1729         case e1000_82541:
1730         case e1000_82542:
1731         case e1000_82543:
1732         case e1000_82544:
1733         case e1000_82545:
1734         case e1000_82546:
1735         case e1000_82547:
1736         case e1000_82571:
1737         case e1000_82572:
1738         case e1000_82573:
1739         case e1000_82574:
1740         case e1000_82583:
1741         case e1000_i210:
1742         case e1000_i211:
1743         default:
1744                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1745                 return 0;
1746         }
1747 }
1748
1749 static int
1750 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1751 {
1752         struct rte_eth_vmdq_rx_conf *cfg;
1753         struct e1000_hw *hw;
1754         uint32_t mrqc, vt_ctl, vmolr, rctl;
1755         int i;
1756
1757         PMD_INIT_FUNC_TRACE();
1758
1759         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1760         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1761
1762         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1763         if (igb_is_vmdq_supported(dev) == 0)
1764                 return -1;
1765
1766         igb_rss_disable(dev);
1767
1768         /* RCTL: eanble VLAN filter */
1769         rctl = E1000_READ_REG(hw, E1000_RCTL);
1770         rctl |= E1000_RCTL_VFE;
1771         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1772
1773         /* MRQC: enable vmdq */
1774         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1775         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1776         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1777
1778         /* VTCTL:  pool selection according to VLAN tag */
1779         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1780         if (cfg->enable_default_pool)
1781                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1782         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1783         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1784
1785         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1786                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1787                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1788                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1789                         E1000_VMOLR_MPME);
1790
1791                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1792                         vmolr |= E1000_VMOLR_AUPE;
1793                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1794                         vmolr |= E1000_VMOLR_ROMPE;
1795                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1796                         vmolr |= E1000_VMOLR_ROPE;
1797                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1798                         vmolr |= E1000_VMOLR_BAM;
1799                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1800                         vmolr |= E1000_VMOLR_MPME;
1801
1802                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1803         }
1804
1805         /*
1806          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1807          * Both 82576 and 82580 support it
1808          */
1809         if (hw->mac.type != e1000_i350) {
1810                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1811                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1812                         vmolr |= E1000_VMOLR_STRVLAN;
1813                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1814                 }
1815         }
1816
1817         /* VFTA - enable all vlan filters */
1818         for (i = 0; i < IGB_VFTA_SIZE; i++)
1819                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1820
1821         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1822         if (hw->mac.type != e1000_82580)
1823                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1824
1825         /*
1826          * RAH/RAL - allow pools to read specific mac addresses
1827          * In this case, all pools should be able to read from mac addr 0
1828          */
1829         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1830         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1831
1832         /* VLVF: set up filters for vlan tags as configured */
1833         for (i = 0; i < cfg->nb_pool_maps; i++) {
1834                 /* set vlan id in VF register and set the valid bit */
1835                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1836                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1837                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1838                         E1000_VLVF_POOLSEL_MASK)));
1839         }
1840
1841         E1000_WRITE_FLUSH(hw);
1842
1843         return 0;
1844 }
1845
1846
1847 /*********************************************************************
1848  *
1849  *  Enable receive unit.
1850  *
1851  **********************************************************************/
1852
1853 static int
1854 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1855 {
1856         struct igb_rx_entry *rxe = rxq->sw_ring;
1857         uint64_t dma_addr;
1858         unsigned i;
1859
1860         /* Initialize software ring entries. */
1861         for (i = 0; i < rxq->nb_rx_desc; i++) {
1862                 volatile union e1000_adv_rx_desc *rxd;
1863                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1864
1865                 if (mbuf == NULL) {
1866                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1867                                      "queue_id=%hu", rxq->queue_id);
1868                         return (-ENOMEM);
1869                 }
1870                 dma_addr =
1871                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1872                 rxd = &rxq->rx_ring[i];
1873                 rxd->read.hdr_addr = dma_addr;
1874                 rxd->read.pkt_addr = dma_addr;
1875                 rxe[i].mbuf = mbuf;
1876         }
1877
1878         return 0;
1879 }
1880
1881 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1882 static int
1883 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1884 {
1885         struct e1000_hw *hw =
1886                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1887         uint32_t mrqc;
1888
1889         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1890                 /*
1891                  * SRIOV active scheme
1892                  * FIXME if support RSS together with VMDq & SRIOV
1893                  */
1894                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1895                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1896                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1897                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1898         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1899                 /*
1900                  * SRIOV inactive scheme
1901                  */
1902                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1903                         case ETH_MQ_RX_RSS:
1904                                 igb_rss_configure(dev);
1905                                 break;
1906                         case ETH_MQ_RX_VMDQ_ONLY:
1907                                 /*Configure general VMDQ only RX parameters*/
1908                                 igb_vmdq_rx_hw_configure(dev);
1909                                 break;
1910                         case ETH_MQ_RX_NONE:
1911                                 /* if mq_mode is none, disable rss mode.*/
1912                         default:
1913                                 igb_rss_disable(dev);
1914                                 break;
1915                 }
1916         }
1917
1918         return 0;
1919 }
1920
1921 int
1922 eth_igb_rx_init(struct rte_eth_dev *dev)
1923 {
1924         struct e1000_hw     *hw;
1925         struct igb_rx_queue *rxq;
1926         struct rte_pktmbuf_pool_private *mbp_priv;
1927         uint32_t rctl;
1928         uint32_t rxcsum;
1929         uint32_t srrctl;
1930         uint16_t buf_size;
1931         uint16_t rctl_bsize;
1932         uint16_t i;
1933         int ret;
1934
1935         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1936         srrctl = 0;
1937
1938         /*
1939          * Make sure receives are disabled while setting
1940          * up the descriptor ring.
1941          */
1942         rctl = E1000_READ_REG(hw, E1000_RCTL);
1943         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1944
1945         /*
1946          * Configure support of jumbo frames, if any.
1947          */
1948         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1949                 rctl |= E1000_RCTL_LPE;
1950
1951                 /*
1952                  * Set maximum packet length by default, and might be updated
1953                  * together with enabling/disabling dual VLAN.
1954                  */
1955                 E1000_WRITE_REG(hw, E1000_RLPML,
1956                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1957                                                 VLAN_TAG_SIZE);
1958         } else
1959                 rctl &= ~E1000_RCTL_LPE;
1960
1961         /* Configure and enable each RX queue. */
1962         rctl_bsize = 0;
1963         dev->rx_pkt_burst = eth_igb_recv_pkts;
1964         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1965                 uint64_t bus_addr;
1966                 uint32_t rxdctl;
1967
1968                 rxq = dev->data->rx_queues[i];
1969
1970                 /* Allocate buffers for descriptor rings and set up queue */
1971                 ret = igb_alloc_rx_queue_mbufs(rxq);
1972                 if (ret)
1973                         return ret;
1974
1975                 /*
1976                  * Reset crc_len in case it was changed after queue setup by a
1977                  *  call to configure
1978                  */
1979                 rxq->crc_len =
1980                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1981                                                         0 : ETHER_CRC_LEN);
1982
1983                 bus_addr = rxq->rx_ring_phys_addr;
1984                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1985                                 rxq->nb_rx_desc *
1986                                 sizeof(union e1000_adv_rx_desc));
1987                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1988                                 (uint32_t)(bus_addr >> 32));
1989                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1990
1991                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1992
1993                 /*
1994                  * Configure RX buffer size.
1995                  */
1996                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1997                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1998                                        RTE_PKTMBUF_HEADROOM);
1999                 if (buf_size >= 1024) {
2000                         /*
2001                          * Configure the BSIZEPACKET field of the SRRCTL
2002                          * register of the queue.
2003                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2004                          * If this field is equal to 0b, then RCTL.BSIZE
2005                          * determines the RX packet buffer size.
2006                          */
2007                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2008                                    E1000_SRRCTL_BSIZEPKT_MASK);
2009                         buf_size = (uint16_t) ((srrctl &
2010                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2011                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2012
2013                         /* It adds dual VLAN length for supporting dual VLAN */
2014                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2015                                                 2 * VLAN_TAG_SIZE) > buf_size){
2016                                 if (!dev->data->scattered_rx)
2017                                         PMD_INIT_LOG(DEBUG,
2018                                                      "forcing scatter mode");
2019                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2020                                 dev->data->scattered_rx = 1;
2021                         }
2022                 } else {
2023                         /*
2024                          * Use BSIZE field of the device RCTL register.
2025                          */
2026                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2027                                 rctl_bsize = buf_size;
2028                         if (!dev->data->scattered_rx)
2029                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2030                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2031                         dev->data->scattered_rx = 1;
2032                 }
2033
2034                 /* Set if packets are dropped when no descriptors available */
2035                 if (rxq->drop_en)
2036                         srrctl |= E1000_SRRCTL_DROP_EN;
2037
2038                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2039
2040                 /* Enable this RX queue. */
2041                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2042                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2043                 rxdctl &= 0xFFF00000;
2044                 rxdctl |= (rxq->pthresh & 0x1F);
2045                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2046                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2047                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2048         }
2049
2050         if (dev->data->dev_conf.rxmode.enable_scatter) {
2051                 if (!dev->data->scattered_rx)
2052                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2053                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2054                 dev->data->scattered_rx = 1;
2055         }
2056
2057         /*
2058          * Setup BSIZE field of RCTL register, if needed.
2059          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2060          * register, since the code above configures the SRRCTL register of
2061          * the RX queue in such a case.
2062          * All configurable sizes are:
2063          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2064          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2065          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2066          *  2048: rctl |= E1000_RCTL_SZ_2048;
2067          *  1024: rctl |= E1000_RCTL_SZ_1024;
2068          *   512: rctl |= E1000_RCTL_SZ_512;
2069          *   256: rctl |= E1000_RCTL_SZ_256;
2070          */
2071         if (rctl_bsize > 0) {
2072                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2073                         rctl |= E1000_RCTL_SZ_512;
2074                 else /* 256 <= buf_size < 512 - use 256 */
2075                         rctl |= E1000_RCTL_SZ_256;
2076         }
2077
2078         /*
2079          * Configure RSS if device configured with multiple RX queues.
2080          */
2081         igb_dev_mq_rx_configure(dev);
2082
2083         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2084         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2085
2086         /*
2087          * Setup the Checksum Register.
2088          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2089          */
2090         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2091         rxcsum |= E1000_RXCSUM_PCSD;
2092
2093         /* Enable both L3/L4 rx checksum offload */
2094         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2095                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2096         else
2097                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2098         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2099
2100         /* Setup the Receive Control Register. */
2101         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2102                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2103
2104                 /* set STRCRC bit in all queues */
2105                 if (hw->mac.type == e1000_i350 ||
2106                     hw->mac.type == e1000_i210 ||
2107                     hw->mac.type == e1000_i211 ||
2108                     hw->mac.type == e1000_i354) {
2109                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2110                                 rxq = dev->data->rx_queues[i];
2111                                 uint32_t dvmolr = E1000_READ_REG(hw,
2112                                         E1000_DVMOLR(rxq->reg_idx));
2113                                 dvmolr |= E1000_DVMOLR_STRCRC;
2114                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2115                         }
2116                 }
2117         } else {
2118                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2119
2120                 /* clear STRCRC bit in all queues */
2121                 if (hw->mac.type == e1000_i350 ||
2122                     hw->mac.type == e1000_i210 ||
2123                     hw->mac.type == e1000_i211 ||
2124                     hw->mac.type == e1000_i354) {
2125                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2126                                 rxq = dev->data->rx_queues[i];
2127                                 uint32_t dvmolr = E1000_READ_REG(hw,
2128                                         E1000_DVMOLR(rxq->reg_idx));
2129                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2130                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2131                         }
2132                 }
2133         }
2134
2135         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2136         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2137                 E1000_RCTL_RDMTS_HALF |
2138                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2139
2140         /* Make sure VLAN Filters are off. */
2141         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2142                 rctl &= ~E1000_RCTL_VFE;
2143         /* Don't store bad packets. */
2144         rctl &= ~E1000_RCTL_SBP;
2145
2146         /* Enable Receives. */
2147         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2148
2149         /*
2150          * Setup the HW Rx Head and Tail Descriptor Pointers.
2151          * This needs to be done after enable.
2152          */
2153         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2154                 rxq = dev->data->rx_queues[i];
2155                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2156                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2157         }
2158
2159         return 0;
2160 }
2161
2162 /*********************************************************************
2163  *
2164  *  Enable transmit unit.
2165  *
2166  **********************************************************************/
2167 void
2168 eth_igb_tx_init(struct rte_eth_dev *dev)
2169 {
2170         struct e1000_hw     *hw;
2171         struct igb_tx_queue *txq;
2172         uint32_t tctl;
2173         uint32_t txdctl;
2174         uint16_t i;
2175
2176         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2177
2178         /* Setup the Base and Length of the Tx Descriptor Rings. */
2179         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2180                 uint64_t bus_addr;
2181                 txq = dev->data->tx_queues[i];
2182                 bus_addr = txq->tx_ring_phys_addr;
2183
2184                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2185                                 txq->nb_tx_desc *
2186                                 sizeof(union e1000_adv_tx_desc));
2187                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2188                                 (uint32_t)(bus_addr >> 32));
2189                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2190
2191                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2192                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2193                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2194
2195                 /* Setup Transmit threshold registers. */
2196                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2197                 txdctl |= txq->pthresh & 0x1F;
2198                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2199                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2200                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2201                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2202         }
2203
2204         /* Program the Transmit Control Register. */
2205         tctl = E1000_READ_REG(hw, E1000_TCTL);
2206         tctl &= ~E1000_TCTL_CT;
2207         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2208                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2209
2210         e1000_config_collision_dist(hw);
2211
2212         /* This write will effectively turn on the transmit unit. */
2213         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2214 }
2215
2216 /*********************************************************************
2217  *
2218  *  Enable VF receive unit.
2219  *
2220  **********************************************************************/
2221 int
2222 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2223 {
2224         struct e1000_hw     *hw;
2225         struct igb_rx_queue *rxq;
2226         struct rte_pktmbuf_pool_private *mbp_priv;
2227         uint32_t srrctl;
2228         uint16_t buf_size;
2229         uint16_t rctl_bsize;
2230         uint16_t i;
2231         int ret;
2232
2233         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2234
2235         /* setup MTU */
2236         e1000_rlpml_set_vf(hw,
2237                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2238                 VLAN_TAG_SIZE));
2239
2240         /* Configure and enable each RX queue. */
2241         rctl_bsize = 0;
2242         dev->rx_pkt_burst = eth_igb_recv_pkts;
2243         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2244                 uint64_t bus_addr;
2245                 uint32_t rxdctl;
2246
2247                 rxq = dev->data->rx_queues[i];
2248
2249                 /* Allocate buffers for descriptor rings and set up queue */
2250                 ret = igb_alloc_rx_queue_mbufs(rxq);
2251                 if (ret)
2252                         return ret;
2253
2254                 bus_addr = rxq->rx_ring_phys_addr;
2255                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2256                                 rxq->nb_rx_desc *
2257                                 sizeof(union e1000_adv_rx_desc));
2258                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2259                                 (uint32_t)(bus_addr >> 32));
2260                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2261
2262                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2263
2264                 /*
2265                  * Configure RX buffer size.
2266                  */
2267                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2268                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2269                                        RTE_PKTMBUF_HEADROOM);
2270                 if (buf_size >= 1024) {
2271                         /*
2272                          * Configure the BSIZEPACKET field of the SRRCTL
2273                          * register of the queue.
2274                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2275                          * If this field is equal to 0b, then RCTL.BSIZE
2276                          * determines the RX packet buffer size.
2277                          */
2278                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2279                                    E1000_SRRCTL_BSIZEPKT_MASK);
2280                         buf_size = (uint16_t) ((srrctl &
2281                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2282                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2283
2284                         /* It adds dual VLAN length for supporting dual VLAN */
2285                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2286                                                 2 * VLAN_TAG_SIZE) > buf_size){
2287                                 if (!dev->data->scattered_rx)
2288                                         PMD_INIT_LOG(DEBUG,
2289                                                      "forcing scatter mode");
2290                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2291                                 dev->data->scattered_rx = 1;
2292                         }
2293                 } else {
2294                         /*
2295                          * Use BSIZE field of the device RCTL register.
2296                          */
2297                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2298                                 rctl_bsize = buf_size;
2299                         if (!dev->data->scattered_rx)
2300                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2301                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2302                         dev->data->scattered_rx = 1;
2303                 }
2304
2305                 /* Set if packets are dropped when no descriptors available */
2306                 if (rxq->drop_en)
2307                         srrctl |= E1000_SRRCTL_DROP_EN;
2308
2309                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2310
2311                 /* Enable this RX queue. */
2312                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2313                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2314                 rxdctl &= 0xFFF00000;
2315                 rxdctl |= (rxq->pthresh & 0x1F);
2316                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2317                 if (hw->mac.type == e1000_vfadapt) {
2318                         /*
2319                          * Workaround of 82576 VF Erratum
2320                          * force set WTHRESH to 1
2321                          * to avoid Write-Back not triggered sometimes
2322                          */
2323                         rxdctl |= 0x10000;
2324                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2325                 }
2326                 else
2327                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2328                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2329         }
2330
2331         if (dev->data->dev_conf.rxmode.enable_scatter) {
2332                 if (!dev->data->scattered_rx)
2333                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2334                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2335                 dev->data->scattered_rx = 1;
2336         }
2337
2338         /*
2339          * Setup the HW Rx Head and Tail Descriptor Pointers.
2340          * This needs to be done after enable.
2341          */
2342         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2343                 rxq = dev->data->rx_queues[i];
2344                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2345                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2346         }
2347
2348         return 0;
2349 }
2350
2351 /*********************************************************************
2352  *
2353  *  Enable VF transmit unit.
2354  *
2355  **********************************************************************/
2356 void
2357 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2358 {
2359         struct e1000_hw     *hw;
2360         struct igb_tx_queue *txq;
2361         uint32_t txdctl;
2362         uint16_t i;
2363
2364         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2365
2366         /* Setup the Base and Length of the Tx Descriptor Rings. */
2367         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2368                 uint64_t bus_addr;
2369
2370                 txq = dev->data->tx_queues[i];
2371                 bus_addr = txq->tx_ring_phys_addr;
2372                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2373                                 txq->nb_tx_desc *
2374                                 sizeof(union e1000_adv_tx_desc));
2375                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2376                                 (uint32_t)(bus_addr >> 32));
2377                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2378
2379                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2380                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2381                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2382
2383                 /* Setup Transmit threshold registers. */
2384                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2385                 txdctl |= txq->pthresh & 0x1F;
2386                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2387                 if (hw->mac.type == e1000_82576) {
2388                         /*
2389                          * Workaround of 82576 VF Erratum
2390                          * force set WTHRESH to 1
2391                          * to avoid Write-Back not triggered sometimes
2392                          */
2393                         txdctl |= 0x10000;
2394                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2395                 }
2396                 else
2397                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2398                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2399                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2400         }
2401
2402 }
2403