remove trailing whitespaces
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 static inline struct rte_mbuf *
77 rte_rxmbuf_alloc(struct rte_mempool *mp)
78 {
79         struct rte_mbuf *m;
80
81         m = __rte_mbuf_raw_alloc(mp);
82         __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
83         return (m);
84 }
85
86 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
87         (uint64_t) ((mb)->buf_physaddr +                   \
88                         (uint64_t) ((char *)((mb)->pkt.data) -     \
89                                 (char *)(mb)->buf_addr))
90
91 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
92         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
93
94 /**
95  * Structure associated with each descriptor of the RX ring of a RX queue.
96  */
97 struct igb_rx_entry {
98         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
99 };
100
101 /**
102  * Structure associated with each descriptor of the TX ring of a TX queue.
103  */
104 struct igb_tx_entry {
105         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
106         uint16_t next_id; /**< Index of next descriptor in ring. */
107         uint16_t last_id; /**< Index of last scattered descriptor. */
108 };
109
110 /**
111  * Structure associated with each RX queue.
112  */
113 struct igb_rx_queue {
114         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
115         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
116         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
117         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
118         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
119         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
120         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
121         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
122         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
123         uint16_t            rx_tail;    /**< current value of RDT register. */
124         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
125         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
126         uint16_t            queue_id;   /**< RX queue index. */
127         uint16_t            reg_idx;    /**< RX queue register index. */
128         uint8_t             port_id;    /**< Device port identifier. */
129         uint8_t             pthresh;    /**< Prefetch threshold register. */
130         uint8_t             hthresh;    /**< Host threshold register. */
131         uint8_t             wthresh;    /**< Write-back threshold register. */
132         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
133         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
134 };
135
136 /**
137  * Hardware context number
138  */
139 enum igb_advctx_num {
140         IGB_CTX_0    = 0, /**< CTX0    */
141         IGB_CTX_1    = 1, /**< CTX1    */
142         IGB_CTX_NUM  = 2, /**< CTX_NUM */
143 };
144
145 /**
146  * Strucutre to check if new context need be built
147  */
148 struct igb_advctx_info {
149         uint16_t flags;           /**< ol_flags related to context build. */
150         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
151         union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
152 };
153
154 /**
155  * Structure associated with each TX queue.
156  */
157 struct igb_tx_queue {
158         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
159         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
160         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
161         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
162         uint32_t               txd_type;      /**< Device-specific TXD type */
163         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
164         uint16_t               tx_tail; /**< Current value of TDT register. */
165         uint16_t               tx_head;
166         /**< Index of first used TX descriptor. */
167         uint16_t               queue_id; /**< TX queue index. */
168         uint16_t               reg_idx;  /**< TX queue register index. */
169         uint8_t                port_id;  /**< Device port identifier. */
170         uint8_t                pthresh;  /**< Prefetch threshold register. */
171         uint8_t                hthresh;  /**< Host threshold register. */
172         uint8_t                wthresh;  /**< Write-back threshold register. */
173         uint32_t               ctx_curr;
174         /**< Current used hardware descriptor. */
175         uint32_t               ctx_start;
176         /**< Start context position for transmit queue. */
177         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
178         /**< Hardware context history.*/
179 };
180
181 #if 1
182 #define RTE_PMD_USE_PREFETCH
183 #endif
184
185 #ifdef RTE_PMD_USE_PREFETCH
186 #define rte_igb_prefetch(p)     rte_prefetch0(p)
187 #else
188 #define rte_igb_prefetch(p)     do {} while(0)
189 #endif
190
191 #ifdef RTE_PMD_PACKET_PREFETCH
192 #define rte_packet_prefetch(p) rte_prefetch1(p)
193 #else
194 #define rte_packet_prefetch(p)  do {} while(0)
195 #endif
196
197 /*
198  * Macro for VMDq feature for 1 GbE NIC.
199  */
200 #define E1000_VMOLR_SIZE                        (8)
201
202 /*********************************************************************
203  *
204  *  TX function
205  *
206  **********************************************************************/
207
208 /*
209  * Advanced context descriptor are almost same between igb/ixgbe
210  * This is a separate function, looking for optimization opportunity here
211  * Rework required to go with the pre-defined values.
212  */
213
214 static inline void
215 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
216                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
217                 uint16_t ol_flags, uint32_t vlan_macip_lens)
218 {
219         uint32_t type_tucmd_mlhl;
220         uint32_t mss_l4len_idx;
221         uint32_t ctx_idx, ctx_curr;
222         uint32_t cmp_mask;
223
224         ctx_curr = txq->ctx_curr;
225         ctx_idx = ctx_curr + txq->ctx_start;
226
227         cmp_mask = 0;
228         type_tucmd_mlhl = 0;
229
230         if (ol_flags & PKT_TX_VLAN_PKT) {
231                 cmp_mask |= TX_VLAN_CMP_MASK;
232         }
233
234         if (ol_flags & PKT_TX_IP_CKSUM) {
235                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
236                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
237         }
238
239         /* Specify which HW CTX to upload. */
240         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
241         switch (ol_flags & PKT_TX_L4_MASK) {
242         case PKT_TX_UDP_CKSUM:
243                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
244                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
245                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
246                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
247                 break;
248         case PKT_TX_TCP_CKSUM:
249                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
250                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
251                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
252                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
253                 break;
254         case PKT_TX_SCTP_CKSUM:
255                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
256                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
257                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
258                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
259                 break;
260         default:
261                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
262                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
263                 break;
264         }
265
266         txq->ctx_cache[ctx_curr].flags           = ol_flags;
267         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
268         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
269                 vlan_macip_lens & cmp_mask;
270
271         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
272         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
273         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
274         ctx_txd->seqnum_seed     = 0;
275 }
276
277 /*
278  * Check which hardware context can be used. Use the existing match
279  * or create a new context descriptor.
280  */
281 static inline uint32_t
282 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
283                 uint32_t vlan_macip_lens)
284 {
285         /* If match with the current context */
286         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
287                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
288                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
289                         return txq->ctx_curr;
290         }
291
292         /* If match with the second context */
293         txq->ctx_curr ^= 1;
294         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
295                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
296                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
297                         return txq->ctx_curr;
298         }
299
300         /* Mismatch, use the previous context */
301         return (IGB_CTX_NUM);
302 }
303
304 static inline uint32_t
305 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
306 {
307         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
308         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
309         uint32_t tmp;
310
311         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
312         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
313         return tmp;
314 }
315
316 static inline uint32_t
317 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
318 {
319         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
320         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
321 }
322
323 uint16_t
324 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
325                uint16_t nb_pkts)
326 {
327         struct igb_tx_queue *txq;
328         struct igb_tx_entry *sw_ring;
329         struct igb_tx_entry *txe, *txn;
330         volatile union e1000_adv_tx_desc *txr;
331         volatile union e1000_adv_tx_desc *txd;
332         struct rte_mbuf     *tx_pkt;
333         struct rte_mbuf     *m_seg;
334         uint64_t buf_dma_addr;
335         uint32_t olinfo_status;
336         uint32_t cmd_type_len;
337         uint32_t pkt_len;
338         uint16_t slen;
339         uint16_t ol_flags;
340         uint16_t tx_end;
341         uint16_t tx_id;
342         uint16_t tx_last;
343         uint16_t nb_tx;
344         uint16_t tx_ol_req;
345         uint32_t new_ctx = 0;
346         uint32_t ctx = 0;
347         uint32_t vlan_macip_lens;
348
349         txq = tx_queue;
350         sw_ring = txq->sw_ring;
351         txr     = txq->tx_ring;
352         tx_id   = txq->tx_tail;
353         txe = &sw_ring[tx_id];
354
355         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
356                 tx_pkt = *tx_pkts++;
357                 pkt_len = tx_pkt->pkt.pkt_len;
358
359                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
360
361                 /*
362                  * The number of descriptors that must be allocated for a
363                  * packet is the number of segments of that packet, plus 1
364                  * Context Descriptor for the VLAN Tag Identifier, if any.
365                  * Determine the last TX descriptor to allocate in the TX ring
366                  * for the packet, starting from the current position (tx_id)
367                  * in the ring.
368                  */
369                 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
370
371                 ol_flags = tx_pkt->ol_flags;
372                 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
373                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
374
375                 /* If a Context Descriptor need be built . */
376                 if (tx_ol_req) {
377                         ctx = what_advctx_update(txq, tx_ol_req,
378                                 vlan_macip_lens);
379                         /* Only allocate context descriptor if required*/
380                         new_ctx = (ctx == IGB_CTX_NUM);
381                         ctx = txq->ctx_curr;
382                         tx_last = (uint16_t) (tx_last + new_ctx);
383                 }
384                 if (tx_last >= txq->nb_tx_desc)
385                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
386
387                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
388                            " tx_first=%u tx_last=%u\n",
389                            (unsigned) txq->port_id,
390                            (unsigned) txq->queue_id,
391                            (unsigned) pkt_len,
392                            (unsigned) tx_id,
393                            (unsigned) tx_last);
394
395                 /*
396                  * Check if there are enough free descriptors in the TX ring
397                  * to transmit the next packet.
398                  * This operation is based on the two following rules:
399                  *
400                  *   1- Only check that the last needed TX descriptor can be
401                  *      allocated (by construction, if that descriptor is free,
402                  *      all intermediate ones are also free).
403                  *
404                  *      For this purpose, the index of the last TX descriptor
405                  *      used for a packet (the "last descriptor" of a packet)
406                  *      is recorded in the TX entries (the last one included)
407                  *      that are associated with all TX descriptors allocated
408                  *      for that packet.
409                  *
410                  *   2- Avoid to allocate the last free TX descriptor of the
411                  *      ring, in order to never set the TDT register with the
412                  *      same value stored in parallel by the NIC in the TDH
413                  *      register, which makes the TX engine of the NIC enter
414                  *      in a deadlock situation.
415                  *
416                  *      By extension, avoid to allocate a free descriptor that
417                  *      belongs to the last set of free descriptors allocated
418                  *      to the same packet previously transmitted.
419                  */
420
421                 /*
422                  * The "last descriptor" of the previously sent packet, if any,
423                  * which used the last descriptor to allocate.
424                  */
425                 tx_end = sw_ring[tx_last].last_id;
426
427                 /*
428                  * The next descriptor following that "last descriptor" in the
429                  * ring.
430                  */
431                 tx_end = sw_ring[tx_end].next_id;
432
433                 /*
434                  * The "last descriptor" associated with that next descriptor.
435                  */
436                 tx_end = sw_ring[tx_end].last_id;
437
438                 /*
439                  * Check that this descriptor is free.
440                  */
441                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
442                         if (nb_tx == 0)
443                                 return (0);
444                         goto end_of_tx;
445                 }
446
447                 /*
448                  * Set common flags of all TX Data Descriptors.
449                  *
450                  * The following bits must be set in all Data Descriptors:
451                  *   - E1000_ADVTXD_DTYP_DATA
452                  *   - E1000_ADVTXD_DCMD_DEXT
453                  *
454                  * The following bits must be set in the first Data Descriptor
455                  * and are ignored in the other ones:
456                  *   - E1000_ADVTXD_DCMD_IFCS
457                  *   - E1000_ADVTXD_MAC_1588
458                  *   - E1000_ADVTXD_DCMD_VLE
459                  *
460                  * The following bits must only be set in the last Data
461                  * Descriptor:
462                  *   - E1000_TXD_CMD_EOP
463                  *
464                  * The following bits can be set in any Data Descriptor, but
465                  * are only set in the last Data Descriptor:
466                  *   - E1000_TXD_CMD_RS
467                  */
468                 cmd_type_len = txq->txd_type |
469                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
470                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
471 #if defined(RTE_LIBRTE_IEEE1588)
472                 if (ol_flags & PKT_TX_IEEE1588_TMST)
473                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
474 #endif
475                 if (tx_ol_req) {
476                         /* Setup TX Advanced context descriptor if required */
477                         if (new_ctx) {
478                                 volatile struct e1000_adv_tx_context_desc *
479                                     ctx_txd;
480
481                                 ctx_txd = (volatile struct
482                                     e1000_adv_tx_context_desc *)
483                                     &txr[tx_id];
484
485                                 txn = &sw_ring[txe->next_id];
486                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
487
488                                 if (txe->mbuf != NULL) {
489                                         rte_pktmbuf_free_seg(txe->mbuf);
490                                         txe->mbuf = NULL;
491                                 }
492
493                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
494                                     vlan_macip_lens);
495
496                                 txe->last_id = tx_last;
497                                 tx_id = txe->next_id;
498                                 txe = txn;
499                         }
500
501                         /* Setup the TX Advanced Data Descriptor */
502                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
503                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
504                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
505                 }
506
507                 m_seg = tx_pkt;
508                 do {
509                         txn = &sw_ring[txe->next_id];
510                         txd = &txr[tx_id];
511
512                         if (txe->mbuf != NULL)
513                                 rte_pktmbuf_free_seg(txe->mbuf);
514                         txe->mbuf = m_seg;
515
516                         /*
517                          * Set up transmit descriptor.
518                          */
519                         slen = (uint16_t) m_seg->pkt.data_len;
520                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
521                         txd->read.buffer_addr =
522                                 rte_cpu_to_le_64(buf_dma_addr);
523                         txd->read.cmd_type_len =
524                                 rte_cpu_to_le_32(cmd_type_len | slen);
525                         txd->read.olinfo_status =
526                                 rte_cpu_to_le_32(olinfo_status);
527                         txe->last_id = tx_last;
528                         tx_id = txe->next_id;
529                         txe = txn;
530                         m_seg = m_seg->pkt.next;
531                 } while (m_seg != NULL);
532
533                 /*
534                  * The last packet data descriptor needs End Of Packet (EOP)
535                  * and Report Status (RS).
536                  */
537                 txd->read.cmd_type_len |=
538                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
539         }
540  end_of_tx:
541         rte_wmb();
542
543         /*
544          * Set the Transmit Descriptor Tail (TDT).
545          */
546         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
547         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
548                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
549                    (unsigned) tx_id, (unsigned) nb_tx);
550         txq->tx_tail = tx_id;
551
552         return (nb_tx);
553 }
554
555 /*********************************************************************
556  *
557  *  RX functions
558  *
559  **********************************************************************/
560 static inline uint16_t
561 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
562 {
563         uint16_t pkt_flags;
564
565         static uint16_t ip_pkt_types_map[16] = {
566                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
567                 PKT_RX_IPV6_HDR, 0, 0, 0,
568                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
569                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
570         };
571
572 #if defined(RTE_LIBRTE_IEEE1588)
573         static uint32_t ip_pkt_etqf_map[8] = {
574                 0, 0, 0, PKT_RX_IEEE1588_PTP,
575                 0, 0, 0, 0,
576         };
577
578         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
579                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
580                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
581 #else
582         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
583                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
584 #endif
585         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
586                                                 0 : PKT_RX_RSS_HASH));
587 }
588
589 static inline uint16_t
590 rx_desc_status_to_pkt_flags(uint32_t rx_status)
591 {
592         uint16_t pkt_flags;
593
594         /* Check if VLAN present */
595         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
596                                                 PKT_RX_VLAN_PKT : 0);
597
598 #if defined(RTE_LIBRTE_IEEE1588)
599         if (rx_status & E1000_RXD_STAT_TMST)
600                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
601 #endif
602         return pkt_flags;
603 }
604
605 static inline uint16_t
606 rx_desc_error_to_pkt_flags(uint32_t rx_status)
607 {
608         /*
609          * Bit 30: IPE, IPv4 checksum error
610          * Bit 29: L4I, L4I integrity error
611          */
612
613         static uint16_t error_to_pkt_flags_map[4] = {
614                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
615                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
616         };
617         return error_to_pkt_flags_map[(rx_status >>
618                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
619 }
620
621 uint16_t
622 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
623                uint16_t nb_pkts)
624 {
625         struct igb_rx_queue *rxq;
626         volatile union e1000_adv_rx_desc *rx_ring;
627         volatile union e1000_adv_rx_desc *rxdp;
628         struct igb_rx_entry *sw_ring;
629         struct igb_rx_entry *rxe;
630         struct rte_mbuf *rxm;
631         struct rte_mbuf *nmb;
632         union e1000_adv_rx_desc rxd;
633         uint64_t dma_addr;
634         uint32_t staterr;
635         uint32_t hlen_type_rss;
636         uint16_t pkt_len;
637         uint16_t rx_id;
638         uint16_t nb_rx;
639         uint16_t nb_hold;
640         uint16_t pkt_flags;
641
642         nb_rx = 0;
643         nb_hold = 0;
644         rxq = rx_queue;
645         rx_id = rxq->rx_tail;
646         rx_ring = rxq->rx_ring;
647         sw_ring = rxq->sw_ring;
648         while (nb_rx < nb_pkts) {
649                 /*
650                  * The order of operations here is important as the DD status
651                  * bit must not be read after any other descriptor fields.
652                  * rx_ring and rxdp are pointing to volatile data so the order
653                  * of accesses cannot be reordered by the compiler. If they were
654                  * not volatile, they could be reordered which could lead to
655                  * using invalid descriptor fields when read from rxd.
656                  */
657                 rxdp = &rx_ring[rx_id];
658                 staterr = rxdp->wb.upper.status_error;
659                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
660                         break;
661                 rxd = *rxdp;
662
663                 /*
664                  * End of packet.
665                  *
666                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
667                  * likely to be invalid and to be dropped by the various
668                  * validation checks performed by the network stack.
669                  *
670                  * Allocate a new mbuf to replenish the RX ring descriptor.
671                  * If the allocation fails:
672                  *    - arrange for that RX descriptor to be the first one
673                  *      being parsed the next time the receive function is
674                  *      invoked [on the same queue].
675                  *
676                  *    - Stop parsing the RX ring and return immediately.
677                  *
678                  * This policy do not drop the packet received in the RX
679                  * descriptor for which the allocation of a new mbuf failed.
680                  * Thus, it allows that packet to be later retrieved if
681                  * mbuf have been freed in the mean time.
682                  * As a side effect, holding RX descriptors instead of
683                  * systematically giving them back to the NIC may lead to
684                  * RX ring exhaustion situations.
685                  * However, the NIC can gracefully prevent such situations
686                  * to happen by sending specific "back-pressure" flow control
687                  * frames to its peer(s).
688                  */
689                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
690                            "staterr=0x%x pkt_len=%u\n",
691                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
692                            (unsigned) rx_id, (unsigned) staterr,
693                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
694
695                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
696                 if (nmb == NULL) {
697                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
698                                    "queue_id=%u\n", (unsigned) rxq->port_id,
699                                    (unsigned) rxq->queue_id);
700                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
701                         break;
702                 }
703
704                 nb_hold++;
705                 rxe = &sw_ring[rx_id];
706                 rx_id++;
707                 if (rx_id == rxq->nb_rx_desc)
708                         rx_id = 0;
709
710                 /* Prefetch next mbuf while processing current one. */
711                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
712
713                 /*
714                  * When next RX descriptor is on a cache-line boundary,
715                  * prefetch the next 4 RX descriptors and the next 8 pointers
716                  * to mbufs.
717                  */
718                 if ((rx_id & 0x3) == 0) {
719                         rte_igb_prefetch(&rx_ring[rx_id]);
720                         rte_igb_prefetch(&sw_ring[rx_id]);
721                 }
722
723                 rxm = rxe->mbuf;
724                 rxe->mbuf = nmb;
725                 dma_addr =
726                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
727                 rxdp->read.hdr_addr = dma_addr;
728                 rxdp->read.pkt_addr = dma_addr;
729
730                 /*
731                  * Initialize the returned mbuf.
732                  * 1) setup generic mbuf fields:
733                  *    - number of segments,
734                  *    - next segment,
735                  *    - packet length,
736                  *    - RX port identifier.
737                  * 2) integrate hardware offload data, if any:
738                  *    - RSS flag & hash,
739                  *    - IP checksum flag,
740                  *    - VLAN TCI, if any,
741                  *    - error flags.
742                  */
743                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
744                                       rxq->crc_len);
745                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
746                 rte_packet_prefetch(rxm->pkt.data);
747                 rxm->pkt.nb_segs = 1;
748                 rxm->pkt.next = NULL;
749                 rxm->pkt.pkt_len = pkt_len;
750                 rxm->pkt.data_len = pkt_len;
751                 rxm->pkt.in_port = rxq->port_id;
752
753                 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
754                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
755                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
756                 rxm->pkt.vlan_macip.f.vlan_tci =
757                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
758
759                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
760                 pkt_flags = (uint16_t)(pkt_flags |
761                                 rx_desc_status_to_pkt_flags(staterr));
762                 pkt_flags = (uint16_t)(pkt_flags |
763                                 rx_desc_error_to_pkt_flags(staterr));
764                 rxm->ol_flags = pkt_flags;
765
766                 /*
767                  * Store the mbuf address into the next entry of the array
768                  * of returned packets.
769                  */
770                 rx_pkts[nb_rx++] = rxm;
771         }
772         rxq->rx_tail = rx_id;
773
774         /*
775          * If the number of free RX descriptors is greater than the RX free
776          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
777          * register.
778          * Update the RDT with the value of the last processed RX descriptor
779          * minus 1, to guarantee that the RDT register is never equal to the
780          * RDH register, which creates a "full" ring situtation from the
781          * hardware point of view...
782          */
783         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
784         if (nb_hold > rxq->rx_free_thresh) {
785                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
786                            "nb_hold=%u nb_rx=%u\n",
787                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
788                            (unsigned) rx_id, (unsigned) nb_hold,
789                            (unsigned) nb_rx);
790                 rx_id = (uint16_t) ((rx_id == 0) ?
791                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
792                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
793                 nb_hold = 0;
794         }
795         rxq->nb_rx_hold = nb_hold;
796         return (nb_rx);
797 }
798
799 uint16_t
800 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
801                          uint16_t nb_pkts)
802 {
803         struct igb_rx_queue *rxq;
804         volatile union e1000_adv_rx_desc *rx_ring;
805         volatile union e1000_adv_rx_desc *rxdp;
806         struct igb_rx_entry *sw_ring;
807         struct igb_rx_entry *rxe;
808         struct rte_mbuf *first_seg;
809         struct rte_mbuf *last_seg;
810         struct rte_mbuf *rxm;
811         struct rte_mbuf *nmb;
812         union e1000_adv_rx_desc rxd;
813         uint64_t dma; /* Physical address of mbuf data buffer */
814         uint32_t staterr;
815         uint32_t hlen_type_rss;
816         uint16_t rx_id;
817         uint16_t nb_rx;
818         uint16_t nb_hold;
819         uint16_t data_len;
820         uint16_t pkt_flags;
821
822         nb_rx = 0;
823         nb_hold = 0;
824         rxq = rx_queue;
825         rx_id = rxq->rx_tail;
826         rx_ring = rxq->rx_ring;
827         sw_ring = rxq->sw_ring;
828
829         /*
830          * Retrieve RX context of current packet, if any.
831          */
832         first_seg = rxq->pkt_first_seg;
833         last_seg = rxq->pkt_last_seg;
834
835         while (nb_rx < nb_pkts) {
836         next_desc:
837                 /*
838                  * The order of operations here is important as the DD status
839                  * bit must not be read after any other descriptor fields.
840                  * rx_ring and rxdp are pointing to volatile data so the order
841                  * of accesses cannot be reordered by the compiler. If they were
842                  * not volatile, they could be reordered which could lead to
843                  * using invalid descriptor fields when read from rxd.
844                  */
845                 rxdp = &rx_ring[rx_id];
846                 staterr = rxdp->wb.upper.status_error;
847                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
848                         break;
849                 rxd = *rxdp;
850
851                 /*
852                  * Descriptor done.
853                  *
854                  * Allocate a new mbuf to replenish the RX ring descriptor.
855                  * If the allocation fails:
856                  *    - arrange for that RX descriptor to be the first one
857                  *      being parsed the next time the receive function is
858                  *      invoked [on the same queue].
859                  *
860                  *    - Stop parsing the RX ring and return immediately.
861                  *
862                  * This policy does not drop the packet received in the RX
863                  * descriptor for which the allocation of a new mbuf failed.
864                  * Thus, it allows that packet to be later retrieved if
865                  * mbuf have been freed in the mean time.
866                  * As a side effect, holding RX descriptors instead of
867                  * systematically giving them back to the NIC may lead to
868                  * RX ring exhaustion situations.
869                  * However, the NIC can gracefully prevent such situations
870                  * to happen by sending specific "back-pressure" flow control
871                  * frames to its peer(s).
872                  */
873                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
874                            "staterr=0x%x data_len=%u\n",
875                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876                            (unsigned) rx_id, (unsigned) staterr,
877                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
878
879                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
880                 if (nmb == NULL) {
881                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882                                    "queue_id=%u\n", (unsigned) rxq->port_id,
883                                    (unsigned) rxq->queue_id);
884                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
885                         break;
886                 }
887
888                 nb_hold++;
889                 rxe = &sw_ring[rx_id];
890                 rx_id++;
891                 if (rx_id == rxq->nb_rx_desc)
892                         rx_id = 0;
893
894                 /* Prefetch next mbuf while processing current one. */
895                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
896
897                 /*
898                  * When next RX descriptor is on a cache-line boundary,
899                  * prefetch the next 4 RX descriptors and the next 8 pointers
900                  * to mbufs.
901                  */
902                 if ((rx_id & 0x3) == 0) {
903                         rte_igb_prefetch(&rx_ring[rx_id]);
904                         rte_igb_prefetch(&sw_ring[rx_id]);
905                 }
906
907                 /*
908                  * Update RX descriptor with the physical address of the new
909                  * data buffer of the new allocated mbuf.
910                  */
911                 rxm = rxe->mbuf;
912                 rxe->mbuf = nmb;
913                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
914                 rxdp->read.pkt_addr = dma;
915                 rxdp->read.hdr_addr = dma;
916
917                 /*
918                  * Set data length & data buffer address of mbuf.
919                  */
920                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
921                 rxm->pkt.data_len = data_len;
922                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
923
924                 /*
925                  * If this is the first buffer of the received packet,
926                  * set the pointer to the first mbuf of the packet and
927                  * initialize its context.
928                  * Otherwise, update the total length and the number of segments
929                  * of the current scattered packet, and update the pointer to
930                  * the last mbuf of the current packet.
931                  */
932                 if (first_seg == NULL) {
933                         first_seg = rxm;
934                         first_seg->pkt.pkt_len = data_len;
935                         first_seg->pkt.nb_segs = 1;
936                 } else {
937                         first_seg->pkt.pkt_len += data_len;
938                         first_seg->pkt.nb_segs++;
939                         last_seg->pkt.next = rxm;
940                 }
941
942                 /*
943                  * If this is not the last buffer of the received packet,
944                  * update the pointer to the last mbuf of the current scattered
945                  * packet and continue to parse the RX ring.
946                  */
947                 if (! (staterr & E1000_RXD_STAT_EOP)) {
948                         last_seg = rxm;
949                         goto next_desc;
950                 }
951
952                 /*
953                  * This is the last buffer of the received packet.
954                  * If the CRC is not stripped by the hardware:
955                  *   - Subtract the CRC length from the total packet length.
956                  *   - If the last buffer only contains the whole CRC or a part
957                  *     of it, free the mbuf associated to the last buffer.
958                  *     If part of the CRC is also contained in the previous
959                  *     mbuf, subtract the length of that CRC part from the
960                  *     data length of the previous mbuf.
961                  */
962                 rxm->pkt.next = NULL;
963                 if (unlikely(rxq->crc_len > 0)) {
964                         first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
965                         if (data_len <= ETHER_CRC_LEN) {
966                                 rte_pktmbuf_free_seg(rxm);
967                                 first_seg->pkt.nb_segs--;
968                                 last_seg->pkt.data_len = (uint16_t)
969                                         (last_seg->pkt.data_len -
970                                          (ETHER_CRC_LEN - data_len));
971                                 last_seg->pkt.next = NULL;
972                         } else
973                                 rxm->pkt.data_len =
974                                         (uint16_t) (data_len - ETHER_CRC_LEN);
975                 }
976
977                 /*
978                  * Initialize the first mbuf of the returned packet:
979                  *    - RX port identifier,
980                  *    - hardware offload data, if any:
981                  *      - RSS flag & hash,
982                  *      - IP checksum flag,
983                  *      - VLAN TCI, if any,
984                  *      - error flags.
985                  */
986                 first_seg->pkt.in_port = rxq->port_id;
987                 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
988
989                 /*
990                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
991                  * set in the pkt_flags field.
992                  */
993                 first_seg->pkt.vlan_macip.f.vlan_tci =
994                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
995                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
996                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
997                 pkt_flags = (uint16_t)(pkt_flags |
998                                 rx_desc_status_to_pkt_flags(staterr));
999                 pkt_flags = (uint16_t)(pkt_flags |
1000                                 rx_desc_error_to_pkt_flags(staterr));
1001                 first_seg->ol_flags = pkt_flags;
1002
1003                 /* Prefetch data of first segment, if configured to do so. */
1004                 rte_packet_prefetch(first_seg->pkt.data);
1005
1006                 /*
1007                  * Store the mbuf address into the next entry of the array
1008                  * of returned packets.
1009                  */
1010                 rx_pkts[nb_rx++] = first_seg;
1011
1012                 /*
1013                  * Setup receipt context for a new packet.
1014                  */
1015                 first_seg = NULL;
1016         }
1017
1018         /*
1019          * Record index of the next RX descriptor to probe.
1020          */
1021         rxq->rx_tail = rx_id;
1022
1023         /*
1024          * Save receive context.
1025          */
1026         rxq->pkt_first_seg = first_seg;
1027         rxq->pkt_last_seg = last_seg;
1028
1029         /*
1030          * If the number of free RX descriptors is greater than the RX free
1031          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1032          * register.
1033          * Update the RDT with the value of the last processed RX descriptor
1034          * minus 1, to guarantee that the RDT register is never equal to the
1035          * RDH register, which creates a "full" ring situtation from the
1036          * hardware point of view...
1037          */
1038         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1039         if (nb_hold > rxq->rx_free_thresh) {
1040                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1041                            "nb_hold=%u nb_rx=%u\n",
1042                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1043                            (unsigned) rx_id, (unsigned) nb_hold,
1044                            (unsigned) nb_rx);
1045                 rx_id = (uint16_t) ((rx_id == 0) ?
1046                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1047                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1048                 nb_hold = 0;
1049         }
1050         rxq->nb_rx_hold = nb_hold;
1051         return (nb_rx);
1052 }
1053
1054 /*
1055  * Rings setup and release.
1056  *
1057  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1058  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1059  * This will also optimize cache line size effect.
1060  * H/W supports up to cache line size 128.
1061  */
1062 #define IGB_ALIGN 128
1063
1064 /*
1065  * Maximum number of Ring Descriptors.
1066  *
1067  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1068  * desscriptors should meet the following condition:
1069  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1070  */
1071 #define IGB_MIN_RING_DESC 32
1072 #define IGB_MAX_RING_DESC 4096
1073
1074 static const struct rte_memzone *
1075 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1076                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1077 {
1078         char z_name[RTE_MEMZONE_NAMESIZE];
1079         const struct rte_memzone *mz;
1080
1081         rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1082                         dev->driver->pci_drv.name, ring_name,
1083                                 dev->data->port_id, queue_id);
1084         mz = rte_memzone_lookup(z_name);
1085         if (mz)
1086                 return mz;
1087
1088 #ifdef RTE_LIBRTE_XEN_DOM0
1089         return rte_memzone_reserve_bounded(z_name, ring_size,
1090                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1091 #else
1092         return rte_memzone_reserve_aligned(z_name, ring_size,
1093                         socket_id, 0, IGB_ALIGN);
1094 #endif
1095 }
1096
1097 static void
1098 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1099 {
1100         unsigned i;
1101
1102         if (txq->sw_ring != NULL) {
1103                 for (i = 0; i < txq->nb_tx_desc; i++) {
1104                         if (txq->sw_ring[i].mbuf != NULL) {
1105                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1106                                 txq->sw_ring[i].mbuf = NULL;
1107                         }
1108                 }
1109         }
1110 }
1111
1112 static void
1113 igb_tx_queue_release(struct igb_tx_queue *txq)
1114 {
1115         if (txq != NULL) {
1116                 igb_tx_queue_release_mbufs(txq);
1117                 rte_free(txq->sw_ring);
1118                 rte_free(txq);
1119         }
1120 }
1121
1122 void
1123 eth_igb_tx_queue_release(void *txq)
1124 {
1125         igb_tx_queue_release(txq);
1126 }
1127
1128 static void
1129 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1130 {
1131         txq->tx_head = 0;
1132         txq->tx_tail = 0;
1133         txq->ctx_curr = 0;
1134         memset((void*)&txq->ctx_cache, 0,
1135                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1136 }
1137
1138 static void
1139 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1140 {
1141         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1142                         .buffer_addr = 0}};
1143         struct igb_tx_entry *txe = txq->sw_ring;
1144         uint16_t i, prev;
1145         struct e1000_hw *hw;
1146
1147         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148         /* Zero out HW ring memory */
1149         for (i = 0; i < txq->nb_tx_desc; i++) {
1150                 txq->tx_ring[i] = zeroed_desc;
1151         }
1152
1153         /* Initialize ring entries */
1154         prev = (uint16_t)(txq->nb_tx_desc - 1);
1155         for (i = 0; i < txq->nb_tx_desc; i++) {
1156                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1157
1158                 txd->wb.status = E1000_TXD_STAT_DD;
1159                 txe[i].mbuf = NULL;
1160                 txe[i].last_id = i;
1161                 txe[prev].next_id = i;
1162                 prev = i;
1163         }
1164
1165         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1166         /* 82575 specific, each tx queue will use 2 hw contexts */
1167         if (hw->mac.type == e1000_82575)
1168                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1169
1170         igb_reset_tx_queue_stat(txq);
1171 }
1172
1173 int
1174 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1175                          uint16_t queue_idx,
1176                          uint16_t nb_desc,
1177                          unsigned int socket_id,
1178                          const struct rte_eth_txconf *tx_conf)
1179 {
1180         const struct rte_memzone *tz;
1181         struct igb_tx_queue *txq;
1182         struct e1000_hw     *hw;
1183         uint32_t size;
1184
1185         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186
1187         /*
1188          * Validate number of transmit descriptors.
1189          * It must not exceed hardware maximum, and must be multiple
1190          * of IGB_ALIGN.
1191          */
1192         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1193             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1194                 return -EINVAL;
1195         }
1196
1197         /*
1198          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1199          * driver.
1200          */
1201         if (tx_conf->tx_free_thresh != 0)
1202                 RTE_LOG(WARNING, PMD,
1203                         "The tx_free_thresh parameter is not "
1204                         "used for the 1G driver.\n");
1205         if (tx_conf->tx_rs_thresh != 0)
1206                 RTE_LOG(WARNING, PMD,
1207                         "The tx_rs_thresh parameter is not "
1208                         "used for the 1G driver.\n");
1209         if (tx_conf->tx_thresh.wthresh == 0)
1210                 RTE_LOG(WARNING, PMD,
1211                         "To improve 1G driver performance, consider setting "
1212                         "the TX WTHRESH value to 4, 8, or 16.\n");
1213
1214         /* Free memory prior to re-allocation if needed */
1215         if (dev->data->tx_queues[queue_idx] != NULL)
1216                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1217
1218         /* First allocate the tx queue data structure */
1219         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1220                                                         CACHE_LINE_SIZE);
1221         if (txq == NULL)
1222                 return (-ENOMEM);
1223
1224         /*
1225          * Allocate TX ring hardware descriptors. A memzone large enough to
1226          * handle the maximum ring size is allocated in order to allow for
1227          * resizing in later calls to the queue setup function.
1228          */
1229         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1230         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1231                                         size, socket_id);
1232         if (tz == NULL) {
1233                 igb_tx_queue_release(txq);
1234                 return (-ENOMEM);
1235         }
1236
1237         txq->nb_tx_desc = nb_desc;
1238         txq->pthresh = tx_conf->tx_thresh.pthresh;
1239         txq->hthresh = tx_conf->tx_thresh.hthresh;
1240         txq->wthresh = tx_conf->tx_thresh.wthresh;
1241         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1242                 txq->wthresh = 1;
1243         txq->queue_id = queue_idx;
1244         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1245                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1246         txq->port_id = dev->data->port_id;
1247
1248         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1249 #ifndef RTE_LIBRTE_XEN_DOM0
1250         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1251 #else
1252         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1253 #endif
1254          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1255         /* Allocate software ring */
1256         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1257                                    sizeof(struct igb_tx_entry) * nb_desc,
1258                                    CACHE_LINE_SIZE);
1259         if (txq->sw_ring == NULL) {
1260                 igb_tx_queue_release(txq);
1261                 return (-ENOMEM);
1262         }
1263         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1264                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1265
1266         igb_reset_tx_queue(txq, dev);
1267         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1268         dev->data->tx_queues[queue_idx] = txq;
1269
1270         return (0);
1271 }
1272
1273 static void
1274 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1275 {
1276         unsigned i;
1277
1278         if (rxq->sw_ring != NULL) {
1279                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1280                         if (rxq->sw_ring[i].mbuf != NULL) {
1281                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1282                                 rxq->sw_ring[i].mbuf = NULL;
1283                         }
1284                 }
1285         }
1286 }
1287
1288 static void
1289 igb_rx_queue_release(struct igb_rx_queue *rxq)
1290 {
1291         if (rxq != NULL) {
1292                 igb_rx_queue_release_mbufs(rxq);
1293                 rte_free(rxq->sw_ring);
1294                 rte_free(rxq);
1295         }
1296 }
1297
1298 void
1299 eth_igb_rx_queue_release(void *rxq)
1300 {
1301         igb_rx_queue_release(rxq);
1302 }
1303
1304 static void
1305 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1306 {
1307         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1308                         .pkt_addr = 0}};
1309         unsigned i;
1310
1311         /* Zero out HW ring memory */
1312         for (i = 0; i < rxq->nb_rx_desc; i++) {
1313                 rxq->rx_ring[i] = zeroed_desc;
1314         }
1315
1316         rxq->rx_tail = 0;
1317         rxq->pkt_first_seg = NULL;
1318         rxq->pkt_last_seg = NULL;
1319 }
1320
1321 int
1322 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1323                          uint16_t queue_idx,
1324                          uint16_t nb_desc,
1325                          unsigned int socket_id,
1326                          const struct rte_eth_rxconf *rx_conf,
1327                          struct rte_mempool *mp)
1328 {
1329         const struct rte_memzone *rz;
1330         struct igb_rx_queue *rxq;
1331         struct e1000_hw     *hw;
1332         unsigned int size;
1333
1334         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1335
1336         /*
1337          * Validate number of receive descriptors.
1338          * It must not exceed hardware maximum, and must be multiple
1339          * of IGB_ALIGN.
1340          */
1341         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1342             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1343                 return (-EINVAL);
1344         }
1345
1346         /* Free memory prior to re-allocation if needed */
1347         if (dev->data->rx_queues[queue_idx] != NULL) {
1348                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1349                 dev->data->rx_queues[queue_idx] = NULL;
1350         }
1351
1352         /* First allocate the RX queue data structure. */
1353         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1354                           CACHE_LINE_SIZE);
1355         if (rxq == NULL)
1356                 return (-ENOMEM);
1357         rxq->mb_pool = mp;
1358         rxq->nb_rx_desc = nb_desc;
1359         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1360         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1361         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1362         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1363                 rxq->wthresh = 1;
1364         rxq->drop_en = rx_conf->rx_drop_en;
1365         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1366         rxq->queue_id = queue_idx;
1367         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1368                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1369         rxq->port_id = dev->data->port_id;
1370         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1371                                   ETHER_CRC_LEN);
1372
1373         /*
1374          *  Allocate RX ring hardware descriptors. A memzone large enough to
1375          *  handle the maximum ring size is allocated in order to allow for
1376          *  resizing in later calls to the queue setup function.
1377          */
1378         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1379         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1380         if (rz == NULL) {
1381                 igb_rx_queue_release(rxq);
1382                 return (-ENOMEM);
1383         }
1384         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1385         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1386 #ifndef RTE_LIBRTE_XEN_DOM0
1387         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1388 #else
1389         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1390 #endif
1391         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1392
1393         /* Allocate software ring. */
1394         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1395                                    sizeof(struct igb_rx_entry) * nb_desc,
1396                                    CACHE_LINE_SIZE);
1397         if (rxq->sw_ring == NULL) {
1398                 igb_rx_queue_release(rxq);
1399                 return (-ENOMEM);
1400         }
1401         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1402                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1403
1404         dev->data->rx_queues[queue_idx] = rxq;
1405         igb_reset_rx_queue(rxq);
1406
1407         return 0;
1408 }
1409
1410 uint32_t
1411 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1412 {
1413 #define IGB_RXQ_SCAN_INTERVAL 4
1414         volatile union e1000_adv_rx_desc *rxdp;
1415         struct igb_rx_queue *rxq;
1416         uint32_t desc = 0;
1417
1418         if (rx_queue_id >= dev->data->nb_rx_queues) {
1419                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1420                 return 0;
1421         }
1422
1423         rxq = dev->data->rx_queues[rx_queue_id];
1424         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1425
1426         while ((desc < rxq->nb_rx_desc) &&
1427                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1428                 desc += IGB_RXQ_SCAN_INTERVAL;
1429                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1430                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1431                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1432                                 desc - rxq->nb_rx_desc]);
1433         }
1434
1435         return 0;
1436 }
1437
1438 int
1439 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1440 {
1441         volatile union e1000_adv_rx_desc *rxdp;
1442         struct igb_rx_queue *rxq = rx_queue;
1443         uint32_t desc;
1444
1445         if (unlikely(offset >= rxq->nb_rx_desc))
1446                 return 0;
1447         desc = rxq->rx_tail + offset;
1448         if (desc >= rxq->nb_rx_desc)
1449                 desc -= rxq->nb_rx_desc;
1450
1451         rxdp = &rxq->rx_ring[desc];
1452         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1453 }
1454
1455 void
1456 igb_dev_clear_queues(struct rte_eth_dev *dev)
1457 {
1458         uint16_t i;
1459         struct igb_tx_queue *txq;
1460         struct igb_rx_queue *rxq;
1461
1462         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1463                 txq = dev->data->tx_queues[i];
1464                 if (txq != NULL) {
1465                         igb_tx_queue_release_mbufs(txq);
1466                         igb_reset_tx_queue(txq, dev);
1467                 }
1468         }
1469
1470         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1471                 rxq = dev->data->rx_queues[i];
1472                 if (rxq != NULL) {
1473                         igb_rx_queue_release_mbufs(rxq);
1474                         igb_reset_rx_queue(rxq);
1475                 }
1476         }
1477 }
1478
1479 /**
1480  * Receive Side Scaling (RSS).
1481  * See section 7.1.1.7 in the following document:
1482  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1483  *
1484  * Principles:
1485  * The source and destination IP addresses of the IP header and the source and
1486  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1487  * against a configurable random key to compute a 32-bit RSS hash result.
1488  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1489  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1490  * RSS output index which is used as the RX queue index where to store the
1491  * received packets.
1492  * The following output is supplied in the RX write-back descriptor:
1493  *     - 32-bit result of the Microsoft RSS hash function,
1494  *     - 4-bit RSS type field.
1495  */
1496
1497 /*
1498  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1499  * Used as the default key.
1500  */
1501 static uint8_t rss_intel_key[40] = {
1502         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1503         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1504         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1505         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1506         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1507 };
1508
1509 static void
1510 igb_rss_disable(struct rte_eth_dev *dev)
1511 {
1512         struct e1000_hw *hw;
1513         uint32_t mrqc;
1514
1515         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1517         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1518         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1519 }
1520
1521 static void
1522 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1523 {
1524         uint8_t  *hash_key;
1525         uint32_t rss_key;
1526         uint32_t mrqc;
1527         uint16_t rss_hf;
1528         uint16_t i;
1529
1530         hash_key = rss_conf->rss_key;
1531         if (hash_key != NULL) {
1532                 /* Fill in RSS hash key */
1533                 for (i = 0; i < 10; i++) {
1534                         rss_key  = hash_key[(i * 4)];
1535                         rss_key |= hash_key[(i * 4) + 1] << 8;
1536                         rss_key |= hash_key[(i * 4) + 2] << 16;
1537                         rss_key |= hash_key[(i * 4) + 3] << 24;
1538                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1539                 }
1540         }
1541
1542         /* Set configured hashing protocols in MRQC register */
1543         rss_hf = rss_conf->rss_hf;
1544         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1545         if (rss_hf & ETH_RSS_IPV4)
1546                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1547         if (rss_hf & ETH_RSS_IPV4_TCP)
1548                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1549         if (rss_hf & ETH_RSS_IPV6)
1550                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1551         if (rss_hf & ETH_RSS_IPV6_EX)
1552                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1553         if (rss_hf & ETH_RSS_IPV6_TCP)
1554                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1555         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1556                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1557         if (rss_hf & ETH_RSS_IPV4_UDP)
1558                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1559         if (rss_hf & ETH_RSS_IPV6_UDP)
1560                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1561         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1562                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1563         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1564 }
1565
1566 int
1567 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1568                         struct rte_eth_rss_conf *rss_conf)
1569 {
1570         struct e1000_hw *hw;
1571         uint32_t mrqc;
1572         uint16_t rss_hf;
1573
1574         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1575
1576         /*
1577          * Before changing anything, first check that the update RSS operation
1578          * does not attempt to disable RSS, if RSS was enabled at
1579          * initialization time, or does not attempt to enable RSS, if RSS was
1580          * disabled at initialization time.
1581          */
1582         rss_hf = rss_conf->rss_hf;
1583         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1584         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1585                 if (rss_hf != 0) /* Enable RSS */
1586                         return -(EINVAL);
1587                 return 0; /* Nothing to do */
1588         }
1589         /* RSS enabled */
1590         if (rss_hf == 0) /* Disable RSS */
1591                 return -(EINVAL);
1592         igb_hw_rss_hash_set(hw, rss_conf);
1593         return 0;
1594 }
1595
1596 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1597                               struct rte_eth_rss_conf *rss_conf)
1598 {
1599         struct e1000_hw *hw;
1600         uint8_t *hash_key;
1601         uint32_t rss_key;
1602         uint32_t mrqc;
1603         uint16_t rss_hf;
1604         uint16_t i;
1605
1606         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1607         hash_key = rss_conf->rss_key;
1608         if (hash_key != NULL) {
1609                 /* Return RSS hash key */
1610                 for (i = 0; i < 10; i++) {
1611                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1612                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1613                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1614                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1615                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1616                 }
1617         }
1618
1619         /* Get RSS functions configured in MRQC register */
1620         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1621         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1622                 rss_conf->rss_hf = 0;
1623                 return 0;
1624         }
1625         rss_hf = 0;
1626         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1627                 rss_hf |= ETH_RSS_IPV4;
1628         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1629                 rss_hf |= ETH_RSS_IPV4_TCP;
1630         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1631                 rss_hf |= ETH_RSS_IPV6;
1632         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1633                 rss_hf |= ETH_RSS_IPV6_EX;
1634         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1635                 rss_hf |= ETH_RSS_IPV6_TCP;
1636         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1637                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1638         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1639                 rss_hf |= ETH_RSS_IPV4_UDP;
1640         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1641                 rss_hf |= ETH_RSS_IPV6_UDP;
1642         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1643                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1644         rss_conf->rss_hf = rss_hf;
1645         return 0;
1646 }
1647
1648 static void
1649 igb_rss_configure(struct rte_eth_dev *dev)
1650 {
1651         struct rte_eth_rss_conf rss_conf;
1652         struct e1000_hw *hw;
1653         uint32_t shift;
1654         uint16_t i;
1655
1656         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1657
1658         /* Fill in redirection table. */
1659         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1660         for (i = 0; i < 128; i++) {
1661                 union e1000_reta {
1662                         uint32_t dword;
1663                         uint8_t  bytes[4];
1664                 } reta;
1665                 uint8_t q_idx;
1666
1667                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1668                                    i % dev->data->nb_rx_queues : 0);
1669                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1670                 if ((i & 3) == 3)
1671                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1672         }
1673
1674         /*
1675          * Configure the RSS key and the RSS protocols used to compute
1676          * the RSS hash of input packets.
1677          */
1678         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1679         if (rss_conf.rss_hf == 0) {
1680                 igb_rss_disable(dev);
1681                 return;
1682         }
1683         if (rss_conf.rss_key == NULL)
1684                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1685         igb_hw_rss_hash_set(hw, &rss_conf);
1686 }
1687
1688 /*
1689  * Check if the mac type support VMDq or not.
1690  * Return 1 if it supports, otherwise, return 0.
1691  */
1692 static int
1693 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1694 {
1695         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696
1697         switch (hw->mac.type) {
1698         case e1000_82576:
1699         case e1000_82580:
1700         case e1000_i350:
1701                 return 1;
1702         case e1000_82540:
1703         case e1000_82541:
1704         case e1000_82542:
1705         case e1000_82543:
1706         case e1000_82544:
1707         case e1000_82545:
1708         case e1000_82546:
1709         case e1000_82547:
1710         case e1000_82571:
1711         case e1000_82572:
1712         case e1000_82573:
1713         case e1000_82574:
1714         case e1000_82583:
1715         case e1000_i210:
1716         case e1000_i211:
1717         default:
1718                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1719                 return 0;
1720         }
1721 }
1722
1723 static int
1724 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1725 {
1726         struct rte_eth_vmdq_rx_conf *cfg;
1727         struct e1000_hw *hw;
1728         uint32_t mrqc, vt_ctl, vmolr, rctl;
1729         int i;
1730
1731         PMD_INIT_LOG(DEBUG, ">>");
1732         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1734
1735         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1736         if (igb_is_vmdq_supported(dev) == 0)
1737                 return -1;
1738
1739         igb_rss_disable(dev);
1740
1741         /* RCTL: eanble VLAN filter */
1742         rctl = E1000_READ_REG(hw, E1000_RCTL);
1743         rctl |= E1000_RCTL_VFE;
1744         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1745
1746         /* MRQC: enable vmdq */
1747         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1748         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1749         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1750
1751         /* VTCTL:  pool selection according to VLAN tag */
1752         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1753         if (cfg->enable_default_pool)
1754                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1755         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1756         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1757
1758         /*
1759          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1760          * Both 82576 and 82580 support it
1761          */
1762         if (hw->mac.type != e1000_i350) {
1763                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1764                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1765                         vmolr |= E1000_VMOLR_STRVLAN;
1766                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1767                 }
1768         }
1769
1770         /* VFTA - enable all vlan filters */
1771         for (i = 0; i < IGB_VFTA_SIZE; i++)
1772                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1773
1774         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1775         if (hw->mac.type != e1000_82580)
1776                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1777
1778         /*
1779          * RAH/RAL - allow pools to read specific mac addresses
1780          * In this case, all pools should be able to read from mac addr 0
1781          */
1782         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1783         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1784
1785         /* VLVF: set up filters for vlan tags as configured */
1786         for (i = 0; i < cfg->nb_pool_maps; i++) {
1787                 /* set vlan id in VF register and set the valid bit */
1788                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1789                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1790                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1791                         E1000_VLVF_POOLSEL_MASK)));
1792         }
1793
1794         E1000_WRITE_FLUSH(hw);
1795
1796         return 0;
1797 }
1798
1799
1800 /*********************************************************************
1801  *
1802  *  Enable receive unit.
1803  *
1804  **********************************************************************/
1805
1806 static int
1807 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1808 {
1809         struct igb_rx_entry *rxe = rxq->sw_ring;
1810         uint64_t dma_addr;
1811         unsigned i;
1812
1813         /* Initialize software ring entries. */
1814         for (i = 0; i < rxq->nb_rx_desc; i++) {
1815                 volatile union e1000_adv_rx_desc *rxd;
1816                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1817
1818                 if (mbuf == NULL) {
1819                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1820                                 "queue_id=%hu\n", rxq->queue_id);
1821                         igb_rx_queue_release(rxq);
1822                         return (-ENOMEM);
1823                 }
1824                 dma_addr =
1825                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1826                 rxd = &rxq->rx_ring[i];
1827                 rxd->read.hdr_addr = dma_addr;
1828                 rxd->read.pkt_addr = dma_addr;
1829                 rxe[i].mbuf = mbuf;
1830         }
1831
1832         return 0;
1833 }
1834
1835 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1836 static int
1837 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1838 {
1839         struct e1000_hw *hw =
1840                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1841         uint32_t mrqc;
1842
1843         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1844                 /*
1845                  * SRIOV active scheme
1846                  * FIXME if support RSS together with VMDq & SRIOV
1847                  */
1848                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1849                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1850                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1851                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1852         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1853                 /*
1854                  * SRIOV inactive scheme
1855                  */
1856                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1857                         case ETH_MQ_RX_RSS:
1858                                 igb_rss_configure(dev);
1859                                 break;
1860                         case ETH_MQ_RX_VMDQ_ONLY:
1861                                 /*Configure general VMDQ only RX parameters*/
1862                                 igb_vmdq_rx_hw_configure(dev);
1863                                 break;
1864                         case ETH_MQ_RX_NONE:
1865                                 /* if mq_mode is none, disable rss mode.*/
1866                         default:
1867                                 igb_rss_disable(dev);
1868                                 break;
1869                 }
1870         }
1871
1872         return 0;
1873 }
1874
1875 int
1876 eth_igb_rx_init(struct rte_eth_dev *dev)
1877 {
1878         struct e1000_hw     *hw;
1879         struct igb_rx_queue *rxq;
1880         struct rte_pktmbuf_pool_private *mbp_priv;
1881         uint32_t rctl;
1882         uint32_t rxcsum;
1883         uint32_t srrctl;
1884         uint16_t buf_size;
1885         uint16_t rctl_bsize;
1886         uint16_t i;
1887         int ret;
1888
1889         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1890         srrctl = 0;
1891
1892         /*
1893          * Make sure receives are disabled while setting
1894          * up the descriptor ring.
1895          */
1896         rctl = E1000_READ_REG(hw, E1000_RCTL);
1897         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1898
1899         /*
1900          * Configure support of jumbo frames, if any.
1901          */
1902         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1903                 rctl |= E1000_RCTL_LPE;
1904
1905                 /*
1906                  * Set maximum packet length by default, and might be updated
1907                  * together with enabling/disabling dual VLAN.
1908                  */
1909                 E1000_WRITE_REG(hw, E1000_RLPML,
1910                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1911                                                 VLAN_TAG_SIZE);
1912         } else
1913                 rctl &= ~E1000_RCTL_LPE;
1914
1915         /* Configure and enable each RX queue. */
1916         rctl_bsize = 0;
1917         dev->rx_pkt_burst = eth_igb_recv_pkts;
1918         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1919                 uint64_t bus_addr;
1920                 uint32_t rxdctl;
1921
1922                 rxq = dev->data->rx_queues[i];
1923
1924                 /* Allocate buffers for descriptor rings and set up queue */
1925                 ret = igb_alloc_rx_queue_mbufs(rxq);
1926                 if (ret)
1927                         return ret;
1928
1929                 /*
1930                  * Reset crc_len in case it was changed after queue setup by a
1931                  *  call to configure
1932                  */
1933                 rxq->crc_len =
1934                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1935                                                         0 : ETHER_CRC_LEN);
1936
1937                 bus_addr = rxq->rx_ring_phys_addr;
1938                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1939                                 rxq->nb_rx_desc *
1940                                 sizeof(union e1000_adv_rx_desc));
1941                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1942                                 (uint32_t)(bus_addr >> 32));
1943                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1944
1945                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1946
1947                 /*
1948                  * Configure RX buffer size.
1949                  */
1950                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1951                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1952                                        RTE_PKTMBUF_HEADROOM);
1953                 if (buf_size >= 1024) {
1954                         /*
1955                          * Configure the BSIZEPACKET field of the SRRCTL
1956                          * register of the queue.
1957                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1958                          * If this field is equal to 0b, then RCTL.BSIZE
1959                          * determines the RX packet buffer size.
1960                          */
1961                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1962                                    E1000_SRRCTL_BSIZEPKT_MASK);
1963                         buf_size = (uint16_t) ((srrctl &
1964                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1965                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1966
1967                         /* It adds dual VLAN length for supporting dual VLAN */
1968                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1969                                                 2 * VLAN_TAG_SIZE) > buf_size){
1970                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1971                                 dev->data->scattered_rx = 1;
1972                         }
1973                 } else {
1974                         /*
1975                          * Use BSIZE field of the device RCTL register.
1976                          */
1977                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1978                                 rctl_bsize = buf_size;
1979                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1980                         dev->data->scattered_rx = 1;
1981                 }
1982
1983                 /* Set if packets are dropped when no descriptors available */
1984                 if (rxq->drop_en)
1985                         srrctl |= E1000_SRRCTL_DROP_EN;
1986
1987                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1988
1989                 /* Enable this RX queue. */
1990                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1991                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1992                 rxdctl &= 0xFFF00000;
1993                 rxdctl |= (rxq->pthresh & 0x1F);
1994                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1995                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1996                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1997         }
1998
1999         /*
2000          * Setup BSIZE field of RCTL register, if needed.
2001          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2002          * register, since the code above configures the SRRCTL register of
2003          * the RX queue in such a case.
2004          * All configurable sizes are:
2005          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2006          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2007          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2008          *  2048: rctl |= E1000_RCTL_SZ_2048;
2009          *  1024: rctl |= E1000_RCTL_SZ_1024;
2010          *   512: rctl |= E1000_RCTL_SZ_512;
2011          *   256: rctl |= E1000_RCTL_SZ_256;
2012          */
2013         if (rctl_bsize > 0) {
2014                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2015                         rctl |= E1000_RCTL_SZ_512;
2016                 else /* 256 <= buf_size < 512 - use 256 */
2017                         rctl |= E1000_RCTL_SZ_256;
2018         }
2019
2020         /*
2021          * Configure RSS if device configured with multiple RX queues.
2022          */
2023         igb_dev_mq_rx_configure(dev);
2024
2025         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2026         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2027
2028         /*
2029          * Setup the Checksum Register.
2030          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2031          */
2032         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2033         rxcsum |= E1000_RXCSUM_PCSD;
2034
2035         /* Enable both L3/L4 rx checksum offload */
2036         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2037                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2038         else
2039                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2040         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2041
2042         /* Setup the Receive Control Register. */
2043         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2044                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2045
2046                 /* set STRCRC bit in all queues */
2047                 if (hw->mac.type == e1000_i350 ||
2048                     hw->mac.type == e1000_i210 ||
2049                     hw->mac.type == e1000_i211 ||
2050                     hw->mac.type == e1000_i354) {
2051                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2052                                 rxq = dev->data->rx_queues[i];
2053                                 uint32_t dvmolr = E1000_READ_REG(hw,
2054                                         E1000_DVMOLR(rxq->reg_idx));
2055                                 dvmolr |= E1000_DVMOLR_STRCRC;
2056                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2057                         }
2058                 }
2059         } else {
2060                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2061
2062                 /* clear STRCRC bit in all queues */
2063                 if (hw->mac.type == e1000_i350 ||
2064                     hw->mac.type == e1000_i210 ||
2065                     hw->mac.type == e1000_i211 ||
2066                     hw->mac.type == e1000_i354) {
2067                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2068                                 rxq = dev->data->rx_queues[i];
2069                                 uint32_t dvmolr = E1000_READ_REG(hw,
2070                                         E1000_DVMOLR(rxq->reg_idx));
2071                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2072                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2073                         }
2074                 }
2075         }
2076
2077         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2078         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2079                 E1000_RCTL_RDMTS_HALF |
2080                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2081
2082         /* Make sure VLAN Filters are off. */
2083         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2084                 rctl &= ~E1000_RCTL_VFE;
2085         /* Don't store bad packets. */
2086         rctl &= ~E1000_RCTL_SBP;
2087
2088         /* Enable Receives. */
2089         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2090
2091         /*
2092          * Setup the HW Rx Head and Tail Descriptor Pointers.
2093          * This needs to be done after enable.
2094          */
2095         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2096                 rxq = dev->data->rx_queues[i];
2097                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2098                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2099         }
2100
2101         return 0;
2102 }
2103
2104 /*********************************************************************
2105  *
2106  *  Enable transmit unit.
2107  *
2108  **********************************************************************/
2109 void
2110 eth_igb_tx_init(struct rte_eth_dev *dev)
2111 {
2112         struct e1000_hw     *hw;
2113         struct igb_tx_queue *txq;
2114         uint32_t tctl;
2115         uint32_t txdctl;
2116         uint16_t i;
2117
2118         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2119
2120         /* Setup the Base and Length of the Tx Descriptor Rings. */
2121         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2122                 uint64_t bus_addr;
2123                 txq = dev->data->tx_queues[i];
2124                 bus_addr = txq->tx_ring_phys_addr;
2125
2126                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2127                                 txq->nb_tx_desc *
2128                                 sizeof(union e1000_adv_tx_desc));
2129                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2130                                 (uint32_t)(bus_addr >> 32));
2131                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2132
2133                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2134                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2135                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2136
2137                 /* Setup Transmit threshold registers. */
2138                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2139                 txdctl |= txq->pthresh & 0x1F;
2140                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2141                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2142                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2143                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2144         }
2145
2146         /* Program the Transmit Control Register. */
2147         tctl = E1000_READ_REG(hw, E1000_TCTL);
2148         tctl &= ~E1000_TCTL_CT;
2149         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2150                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2151
2152         e1000_config_collision_dist(hw);
2153
2154         /* This write will effectively turn on the transmit unit. */
2155         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2156 }
2157
2158 /*********************************************************************
2159  *
2160  *  Enable VF receive unit.
2161  *
2162  **********************************************************************/
2163 int
2164 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2165 {
2166         struct e1000_hw     *hw;
2167         struct igb_rx_queue *rxq;
2168         struct rte_pktmbuf_pool_private *mbp_priv;
2169         uint32_t srrctl;
2170         uint16_t buf_size;
2171         uint16_t rctl_bsize;
2172         uint16_t i;
2173         int ret;
2174
2175         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2176
2177         /* setup MTU */
2178         e1000_rlpml_set_vf(hw,
2179                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2180                 VLAN_TAG_SIZE));
2181
2182         /* Configure and enable each RX queue. */
2183         rctl_bsize = 0;
2184         dev->rx_pkt_burst = eth_igb_recv_pkts;
2185         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2186                 uint64_t bus_addr;
2187                 uint32_t rxdctl;
2188
2189                 rxq = dev->data->rx_queues[i];
2190
2191                 /* Allocate buffers for descriptor rings and set up queue */
2192                 ret = igb_alloc_rx_queue_mbufs(rxq);
2193                 if (ret)
2194                         return ret;
2195
2196                 bus_addr = rxq->rx_ring_phys_addr;
2197                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2198                                 rxq->nb_rx_desc *
2199                                 sizeof(union e1000_adv_rx_desc));
2200                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2201                                 (uint32_t)(bus_addr >> 32));
2202                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2203
2204                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2205
2206                 /*
2207                  * Configure RX buffer size.
2208                  */
2209                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2210                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2211                                        RTE_PKTMBUF_HEADROOM);
2212                 if (buf_size >= 1024) {
2213                         /*
2214                          * Configure the BSIZEPACKET field of the SRRCTL
2215                          * register of the queue.
2216                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2217                          * If this field is equal to 0b, then RCTL.BSIZE
2218                          * determines the RX packet buffer size.
2219                          */
2220                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2221                                    E1000_SRRCTL_BSIZEPKT_MASK);
2222                         buf_size = (uint16_t) ((srrctl &
2223                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2224                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2225
2226                         /* It adds dual VLAN length for supporting dual VLAN */
2227                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2228                                                 2 * VLAN_TAG_SIZE) > buf_size){
2229                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2230                                 dev->data->scattered_rx = 1;
2231                         }
2232                 } else {
2233                         /*
2234                          * Use BSIZE field of the device RCTL register.
2235                          */
2236                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2237                                 rctl_bsize = buf_size;
2238                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2239                         dev->data->scattered_rx = 1;
2240                 }
2241
2242                 /* Set if packets are dropped when no descriptors available */
2243                 if (rxq->drop_en)
2244                         srrctl |= E1000_SRRCTL_DROP_EN;
2245
2246                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2247
2248                 /* Enable this RX queue. */
2249                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2250                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2251                 rxdctl &= 0xFFF00000;
2252                 rxdctl |= (rxq->pthresh & 0x1F);
2253                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2254                 if (hw->mac.type == e1000_vfadapt) {
2255                         /*
2256                          * Workaround of 82576 VF Erratum
2257                          * force set WTHRESH to 1
2258                          * to avoid Write-Back not triggered sometimes
2259                          */
2260                         rxdctl |= 0x10000;
2261                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2262                 }
2263                 else
2264                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2265                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2266         }
2267
2268         /*
2269          * Setup the HW Rx Head and Tail Descriptor Pointers.
2270          * This needs to be done after enable.
2271          */
2272         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2273                 rxq = dev->data->rx_queues[i];
2274                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2275                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2276         }
2277
2278         return 0;
2279 }
2280
2281 /*********************************************************************
2282  *
2283  *  Enable VF transmit unit.
2284  *
2285  **********************************************************************/
2286 void
2287 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2288 {
2289         struct e1000_hw     *hw;
2290         struct igb_tx_queue *txq;
2291         uint32_t txdctl;
2292         uint16_t i;
2293
2294         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2295
2296         /* Setup the Base and Length of the Tx Descriptor Rings. */
2297         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2298                 uint64_t bus_addr;
2299
2300                 txq = dev->data->tx_queues[i];
2301                 bus_addr = txq->tx_ring_phys_addr;
2302                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2303                                 txq->nb_tx_desc *
2304                                 sizeof(union e1000_adv_tx_desc));
2305                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2306                                 (uint32_t)(bus_addr >> 32));
2307                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2308
2309                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2310                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2311                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2312
2313                 /* Setup Transmit threshold registers. */
2314                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2315                 txdctl |= txq->pthresh & 0x1F;
2316                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2317                 if (hw->mac.type == e1000_82576) {
2318                         /*
2319                          * Workaround of 82576 VF Erratum
2320                          * force set WTHRESH to 1
2321                          * to avoid Write-Back not triggered sometimes
2322                          */
2323                         txdctl |= 0x10000;
2324                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2325                 }
2326                 else
2327                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2328                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2329                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2330         }
2331
2332 }
2333