igb/ixgbe: reset queue pointers after releasing
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 static inline struct rte_mbuf *
77 rte_rxmbuf_alloc(struct rte_mempool *mp)
78 {
79         struct rte_mbuf *m;
80
81         m = __rte_mbuf_raw_alloc(mp);
82         __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
83         return (m);
84 }
85
86 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
87         (uint64_t) ((mb)->buf_physaddr +                   \
88                         (uint64_t) ((char *)((mb)->pkt.data) -     \
89                                 (char *)(mb)->buf_addr))
90
91 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
92         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
93
94 /**
95  * Structure associated with each descriptor of the RX ring of a RX queue.
96  */
97 struct igb_rx_entry {
98         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
99 };
100
101 /**
102  * Structure associated with each descriptor of the TX ring of a TX queue.
103  */
104 struct igb_tx_entry {
105         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
106         uint16_t next_id; /**< Index of next descriptor in ring. */
107         uint16_t last_id; /**< Index of last scattered descriptor. */
108 };
109
110 /**
111  * Structure associated with each RX queue.
112  */
113 struct igb_rx_queue {
114         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
115         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
116         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
117         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
118         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
119         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
120         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
121         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
122         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
123         uint16_t            rx_tail;    /**< current value of RDT register. */
124         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
125         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
126         uint16_t            queue_id;   /**< RX queue index. */
127         uint16_t            reg_idx;    /**< RX queue register index. */
128         uint8_t             port_id;    /**< Device port identifier. */
129         uint8_t             pthresh;    /**< Prefetch threshold register. */
130         uint8_t             hthresh;    /**< Host threshold register. */
131         uint8_t             wthresh;    /**< Write-back threshold register. */
132         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
133         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
134 };
135
136 /**
137  * Hardware context number
138  */
139 enum igb_advctx_num {
140         IGB_CTX_0    = 0, /**< CTX0    */
141         IGB_CTX_1    = 1, /**< CTX1    */
142         IGB_CTX_NUM  = 2, /**< CTX_NUM */
143 };
144
145 /**
146  * Strucutre to check if new context need be built
147  */
148 struct igb_advctx_info {
149         uint16_t flags;           /**< ol_flags related to context build. */
150         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
151         union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
152 };
153
154 /**
155  * Structure associated with each TX queue.
156  */
157 struct igb_tx_queue {
158         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
159         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
160         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
161         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
162         uint32_t               txd_type;      /**< Device-specific TXD type */
163         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
164         uint16_t               tx_tail; /**< Current value of TDT register. */
165         uint16_t               tx_head;
166         /**< Index of first used TX descriptor. */
167         uint16_t               queue_id; /**< TX queue index. */
168         uint16_t               reg_idx;  /**< TX queue register index. */
169         uint8_t                port_id;  /**< Device port identifier. */
170         uint8_t                pthresh;  /**< Prefetch threshold register. */
171         uint8_t                hthresh;  /**< Host threshold register. */
172         uint8_t                wthresh;  /**< Write-back threshold register. */
173         uint32_t               ctx_curr;
174         /**< Current used hardware descriptor. */
175         uint32_t               ctx_start;
176         /**< Start context position for transmit queue. */
177         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
178         /**< Hardware context history.*/
179 };
180
181 #if 1
182 #define RTE_PMD_USE_PREFETCH
183 #endif
184
185 #ifdef RTE_PMD_USE_PREFETCH
186 #define rte_igb_prefetch(p)     rte_prefetch0(p)
187 #else
188 #define rte_igb_prefetch(p)     do {} while(0)
189 #endif
190
191 #ifdef RTE_PMD_PACKET_PREFETCH
192 #define rte_packet_prefetch(p) rte_prefetch1(p)
193 #else
194 #define rte_packet_prefetch(p)  do {} while(0)
195 #endif
196
197 /*
198  * Macro for VMDq feature for 1 GbE NIC.
199  */
200 #define E1000_VMOLR_SIZE                        (8)
201
202 /*********************************************************************
203  *
204  *  TX function
205  *
206  **********************************************************************/
207
208 /*
209  * Advanced context descriptor are almost same between igb/ixgbe
210  * This is a separate function, looking for optimization opportunity here
211  * Rework required to go with the pre-defined values.
212  */
213
214 static inline void
215 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
216                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
217                 uint16_t ol_flags, uint32_t vlan_macip_lens)
218 {
219         uint32_t type_tucmd_mlhl;
220         uint32_t mss_l4len_idx;
221         uint32_t ctx_idx, ctx_curr;
222         uint32_t cmp_mask;
223
224         ctx_curr = txq->ctx_curr;
225         ctx_idx = ctx_curr + txq->ctx_start;
226
227         cmp_mask = 0;
228         type_tucmd_mlhl = 0;
229
230         if (ol_flags & PKT_TX_VLAN_PKT) {
231                 cmp_mask |= TX_VLAN_CMP_MASK;
232         }
233
234         if (ol_flags & PKT_TX_IP_CKSUM) {
235                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
236                 cmp_mask |= TX_MAC_LEN_CMP_MASK;
237         }
238
239         /* Specify which HW CTX to upload. */
240         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
241         switch (ol_flags & PKT_TX_L4_MASK) {
242         case PKT_TX_UDP_CKSUM:
243                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
244                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
245                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
246                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
247                 break;
248         case PKT_TX_TCP_CKSUM:
249                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
250                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
251                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
252                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
253                 break;
254         case PKT_TX_SCTP_CKSUM:
255                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
256                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
257                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
258                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
259                 break;
260         default:
261                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
262                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
263                 break;
264         }
265
266         txq->ctx_cache[ctx_curr].flags           = ol_flags;
267         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
268         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
269                 vlan_macip_lens & cmp_mask;
270
271         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
272         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
273         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
274         ctx_txd->seqnum_seed     = 0;
275 }
276
277 /*
278  * Check which hardware context can be used. Use the existing match
279  * or create a new context descriptor.
280  */
281 static inline uint32_t
282 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
283                 uint32_t vlan_macip_lens)
284 {
285         /* If match with the current context */
286         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
287                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
288                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
289                         return txq->ctx_curr;
290         }
291
292         /* If match with the second context */
293         txq->ctx_curr ^= 1;
294         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
295                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
296                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
297                         return txq->ctx_curr;
298         }
299
300         /* Mismatch, use the previous context */
301         return (IGB_CTX_NUM);
302 }
303
304 static inline uint32_t
305 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
306 {
307         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
308         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
309         uint32_t tmp;
310
311         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
312         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
313         return tmp;
314 }
315
316 static inline uint32_t
317 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
318 {
319         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
320         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
321 }
322
323 uint16_t
324 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
325                uint16_t nb_pkts)
326 {
327         struct igb_tx_queue *txq;
328         struct igb_tx_entry *sw_ring;
329         struct igb_tx_entry *txe, *txn;
330         volatile union e1000_adv_tx_desc *txr;
331         volatile union e1000_adv_tx_desc *txd;
332         struct rte_mbuf     *tx_pkt;
333         struct rte_mbuf     *m_seg;
334         uint64_t buf_dma_addr;
335         uint32_t olinfo_status;
336         uint32_t cmd_type_len;
337         uint32_t pkt_len;
338         uint16_t slen;
339         uint16_t ol_flags;
340         uint16_t tx_end;
341         uint16_t tx_id;
342         uint16_t tx_last;
343         uint16_t nb_tx;
344         uint16_t tx_ol_req;
345         uint32_t new_ctx = 0;
346         uint32_t ctx = 0;
347         uint32_t vlan_macip_lens;
348
349         txq = tx_queue;
350         sw_ring = txq->sw_ring;
351         txr     = txq->tx_ring;
352         tx_id   = txq->tx_tail;
353         txe = &sw_ring[tx_id];
354
355         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
356                 tx_pkt = *tx_pkts++;
357                 pkt_len = tx_pkt->pkt.pkt_len;
358
359                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
360
361                 /*
362                  * The number of descriptors that must be allocated for a
363                  * packet is the number of segments of that packet, plus 1
364                  * Context Descriptor for the VLAN Tag Identifier, if any.
365                  * Determine the last TX descriptor to allocate in the TX ring
366                  * for the packet, starting from the current position (tx_id)
367                  * in the ring.
368                  */
369                 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
370
371                 ol_flags = tx_pkt->ol_flags;
372                 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
373                 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
374
375                 /* If a Context Descriptor need be built . */
376                 if (tx_ol_req) {
377                         ctx = what_advctx_update(txq, tx_ol_req,
378                                 vlan_macip_lens);
379                         /* Only allocate context descriptor if required*/
380                         new_ctx = (ctx == IGB_CTX_NUM);
381                         ctx = txq->ctx_curr;
382                         tx_last = (uint16_t) (tx_last + new_ctx);
383                 }
384                 if (tx_last >= txq->nb_tx_desc)
385                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
386
387                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
388                            " tx_first=%u tx_last=%u\n",
389                            (unsigned) txq->port_id,
390                            (unsigned) txq->queue_id,
391                            (unsigned) pkt_len,
392                            (unsigned) tx_id,
393                            (unsigned) tx_last);
394
395                 /*
396                  * Check if there are enough free descriptors in the TX ring
397                  * to transmit the next packet.
398                  * This operation is based on the two following rules:
399                  *
400                  *   1- Only check that the last needed TX descriptor can be
401                  *      allocated (by construction, if that descriptor is free,
402                  *      all intermediate ones are also free).
403                  *
404                  *      For this purpose, the index of the last TX descriptor
405                  *      used for a packet (the "last descriptor" of a packet)
406                  *      is recorded in the TX entries (the last one included)
407                  *      that are associated with all TX descriptors allocated
408                  *      for that packet.
409                  *
410                  *   2- Avoid to allocate the last free TX descriptor of the
411                  *      ring, in order to never set the TDT register with the
412                  *      same value stored in parallel by the NIC in the TDH
413                  *      register, which makes the TX engine of the NIC enter
414                  *      in a deadlock situation.
415                  *
416                  *      By extension, avoid to allocate a free descriptor that
417                  *      belongs to the last set of free descriptors allocated
418                  *      to the same packet previously transmitted.
419                  */
420
421                 /*
422                  * The "last descriptor" of the previously sent packet, if any,
423                  * which used the last descriptor to allocate.
424                  */
425                 tx_end = sw_ring[tx_last].last_id;
426
427                 /*
428                  * The next descriptor following that "last descriptor" in the
429                  * ring.
430                  */
431                 tx_end = sw_ring[tx_end].next_id;
432
433                 /*
434                  * The "last descriptor" associated with that next descriptor.
435                  */
436                 tx_end = sw_ring[tx_end].last_id;
437
438                 /*
439                  * Check that this descriptor is free.
440                  */
441                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
442                         if (nb_tx == 0)
443                                 return (0);
444                         goto end_of_tx;
445                 }
446
447                 /*
448                  * Set common flags of all TX Data Descriptors.
449                  *
450                  * The following bits must be set in all Data Descriptors:
451                  *   - E1000_ADVTXD_DTYP_DATA
452                  *   - E1000_ADVTXD_DCMD_DEXT
453                  *
454                  * The following bits must be set in the first Data Descriptor
455                  * and are ignored in the other ones:
456                  *   - E1000_ADVTXD_DCMD_IFCS
457                  *   - E1000_ADVTXD_MAC_1588
458                  *   - E1000_ADVTXD_DCMD_VLE
459                  *
460                  * The following bits must only be set in the last Data
461                  * Descriptor:
462                  *   - E1000_TXD_CMD_EOP
463                  *
464                  * The following bits can be set in any Data Descriptor, but
465                  * are only set in the last Data Descriptor:
466                  *   - E1000_TXD_CMD_RS
467                  */
468                 cmd_type_len = txq->txd_type |
469                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
470                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
471 #if defined(RTE_LIBRTE_IEEE1588)
472                 if (ol_flags & PKT_TX_IEEE1588_TMST)
473                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
474 #endif
475                 if (tx_ol_req) {
476                         /* Setup TX Advanced context descriptor if required */
477                         if (new_ctx) {
478                                 volatile struct e1000_adv_tx_context_desc *
479                                     ctx_txd;
480
481                                 ctx_txd = (volatile struct
482                                     e1000_adv_tx_context_desc *)
483                                     &txr[tx_id];
484
485                                 txn = &sw_ring[txe->next_id];
486                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
487
488                                 if (txe->mbuf != NULL) {
489                                         rte_pktmbuf_free_seg(txe->mbuf);
490                                         txe->mbuf = NULL;
491                                 }
492
493                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
494                                     vlan_macip_lens);
495
496                                 txe->last_id = tx_last;
497                                 tx_id = txe->next_id;
498                                 txe = txn;
499                         }
500
501                         /* Setup the TX Advanced Data Descriptor */
502                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
503                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
504                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
505                 }
506
507                 m_seg = tx_pkt;
508                 do {
509                         txn = &sw_ring[txe->next_id];
510                         txd = &txr[tx_id];
511
512                         if (txe->mbuf != NULL)
513                                 rte_pktmbuf_free_seg(txe->mbuf);
514                         txe->mbuf = m_seg;
515
516                         /*
517                          * Set up transmit descriptor.
518                          */
519                         slen = (uint16_t) m_seg->pkt.data_len;
520                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
521                         txd->read.buffer_addr =
522                                 rte_cpu_to_le_64(buf_dma_addr);
523                         txd->read.cmd_type_len =
524                                 rte_cpu_to_le_32(cmd_type_len | slen);
525                         txd->read.olinfo_status =
526                                 rte_cpu_to_le_32(olinfo_status);
527                         txe->last_id = tx_last;
528                         tx_id = txe->next_id;
529                         txe = txn;
530                         m_seg = m_seg->pkt.next;
531                 } while (m_seg != NULL);
532
533                 /*
534                  * The last packet data descriptor needs End Of Packet (EOP)
535                  * and Report Status (RS).
536                  */
537                 txd->read.cmd_type_len |=
538                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
539         }
540  end_of_tx:
541         rte_wmb();
542
543         /*
544          * Set the Transmit Descriptor Tail (TDT).
545          */
546         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
547         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
548                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
549                    (unsigned) tx_id, (unsigned) nb_tx);
550         txq->tx_tail = tx_id;
551
552         return (nb_tx);
553 }
554
555 /*********************************************************************
556  *
557  *  RX functions
558  *
559  **********************************************************************/
560 static inline uint16_t
561 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
562 {
563         uint16_t pkt_flags;
564
565         static uint16_t ip_pkt_types_map[16] = {
566                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
567                 PKT_RX_IPV6_HDR, 0, 0, 0,
568                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
569                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
570         };
571
572 #if defined(RTE_LIBRTE_IEEE1588)
573         static uint32_t ip_pkt_etqf_map[8] = {
574                 0, 0, 0, PKT_RX_IEEE1588_PTP,
575                 0, 0, 0, 0,
576         };
577
578         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
579                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
580                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
581 #else
582         pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
583                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
584 #endif
585         return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
586                                                 0 : PKT_RX_RSS_HASH));
587 }
588
589 static inline uint16_t
590 rx_desc_status_to_pkt_flags(uint32_t rx_status)
591 {
592         uint16_t pkt_flags;
593
594         /* Check if VLAN present */
595         pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
596                                                 PKT_RX_VLAN_PKT : 0);
597
598 #if defined(RTE_LIBRTE_IEEE1588)
599         if (rx_status & E1000_RXD_STAT_TMST)
600                 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
601 #endif
602         return pkt_flags;
603 }
604
605 static inline uint16_t
606 rx_desc_error_to_pkt_flags(uint32_t rx_status)
607 {
608         /*
609          * Bit 30: IPE, IPv4 checksum error
610          * Bit 29: L4I, L4I integrity error
611          */
612
613         static uint16_t error_to_pkt_flags_map[4] = {
614                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
615                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
616         };
617         return error_to_pkt_flags_map[(rx_status >>
618                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
619 }
620
621 uint16_t
622 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
623                uint16_t nb_pkts)
624 {
625         struct igb_rx_queue *rxq;
626         volatile union e1000_adv_rx_desc *rx_ring;
627         volatile union e1000_adv_rx_desc *rxdp;
628         struct igb_rx_entry *sw_ring;
629         struct igb_rx_entry *rxe;
630         struct rte_mbuf *rxm;
631         struct rte_mbuf *nmb;
632         union e1000_adv_rx_desc rxd;
633         uint64_t dma_addr;
634         uint32_t staterr;
635         uint32_t hlen_type_rss;
636         uint16_t pkt_len;
637         uint16_t rx_id;
638         uint16_t nb_rx;
639         uint16_t nb_hold;
640         uint16_t pkt_flags;
641
642         nb_rx = 0;
643         nb_hold = 0;
644         rxq = rx_queue;
645         rx_id = rxq->rx_tail;
646         rx_ring = rxq->rx_ring;
647         sw_ring = rxq->sw_ring;
648         while (nb_rx < nb_pkts) {
649                 /*
650                  * The order of operations here is important as the DD status
651                  * bit must not be read after any other descriptor fields.
652                  * rx_ring and rxdp are pointing to volatile data so the order
653                  * of accesses cannot be reordered by the compiler. If they were
654                  * not volatile, they could be reordered which could lead to
655                  * using invalid descriptor fields when read from rxd.
656                  */
657                 rxdp = &rx_ring[rx_id];
658                 staterr = rxdp->wb.upper.status_error;
659                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
660                         break;
661                 rxd = *rxdp;
662
663                 /*
664                  * End of packet.
665                  *
666                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
667                  * likely to be invalid and to be dropped by the various
668                  * validation checks performed by the network stack.
669                  *
670                  * Allocate a new mbuf to replenish the RX ring descriptor.
671                  * If the allocation fails:
672                  *    - arrange for that RX descriptor to be the first one
673                  *      being parsed the next time the receive function is
674                  *      invoked [on the same queue].
675                  *
676                  *    - Stop parsing the RX ring and return immediately.
677                  *
678                  * This policy do not drop the packet received in the RX
679                  * descriptor for which the allocation of a new mbuf failed.
680                  * Thus, it allows that packet to be later retrieved if
681                  * mbuf have been freed in the mean time.
682                  * As a side effect, holding RX descriptors instead of
683                  * systematically giving them back to the NIC may lead to
684                  * RX ring exhaustion situations.
685                  * However, the NIC can gracefully prevent such situations
686                  * to happen by sending specific "back-pressure" flow control
687                  * frames to its peer(s).
688                  */
689                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
690                            "staterr=0x%x pkt_len=%u\n",
691                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
692                            (unsigned) rx_id, (unsigned) staterr,
693                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
694
695                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
696                 if (nmb == NULL) {
697                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
698                                    "queue_id=%u\n", (unsigned) rxq->port_id,
699                                    (unsigned) rxq->queue_id);
700                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
701                         break;
702                 }
703
704                 nb_hold++;
705                 rxe = &sw_ring[rx_id];
706                 rx_id++;
707                 if (rx_id == rxq->nb_rx_desc)
708                         rx_id = 0;
709
710                 /* Prefetch next mbuf while processing current one. */
711                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
712
713                 /*
714                  * When next RX descriptor is on a cache-line boundary,
715                  * prefetch the next 4 RX descriptors and the next 8 pointers
716                  * to mbufs.
717                  */
718                 if ((rx_id & 0x3) == 0) {
719                         rte_igb_prefetch(&rx_ring[rx_id]);
720                         rte_igb_prefetch(&sw_ring[rx_id]);
721                 }
722
723                 rxm = rxe->mbuf;
724                 rxe->mbuf = nmb;
725                 dma_addr =
726                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
727                 rxdp->read.hdr_addr = dma_addr;
728                 rxdp->read.pkt_addr = dma_addr;
729
730                 /*
731                  * Initialize the returned mbuf.
732                  * 1) setup generic mbuf fields:
733                  *    - number of segments,
734                  *    - next segment,
735                  *    - packet length,
736                  *    - RX port identifier.
737                  * 2) integrate hardware offload data, if any:
738                  *    - RSS flag & hash,
739                  *    - IP checksum flag,
740                  *    - VLAN TCI, if any,
741                  *    - error flags.
742                  */
743                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
744                                       rxq->crc_len);
745                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
746                 rte_packet_prefetch(rxm->pkt.data);
747                 rxm->pkt.nb_segs = 1;
748                 rxm->pkt.next = NULL;
749                 rxm->pkt.pkt_len = pkt_len;
750                 rxm->pkt.data_len = pkt_len;
751                 rxm->pkt.in_port = rxq->port_id;
752
753                 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
754                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
755                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
756                 rxm->pkt.vlan_macip.f.vlan_tci =
757                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
758
759                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
760                 pkt_flags = (uint16_t)(pkt_flags |
761                                 rx_desc_status_to_pkt_flags(staterr));
762                 pkt_flags = (uint16_t)(pkt_flags |
763                                 rx_desc_error_to_pkt_flags(staterr));
764                 rxm->ol_flags = pkt_flags;
765
766                 /*
767                  * Store the mbuf address into the next entry of the array
768                  * of returned packets.
769                  */
770                 rx_pkts[nb_rx++] = rxm;
771         }
772         rxq->rx_tail = rx_id;
773
774         /*
775          * If the number of free RX descriptors is greater than the RX free
776          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
777          * register.
778          * Update the RDT with the value of the last processed RX descriptor
779          * minus 1, to guarantee that the RDT register is never equal to the
780          * RDH register, which creates a "full" ring situtation from the
781          * hardware point of view...
782          */
783         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
784         if (nb_hold > rxq->rx_free_thresh) {
785                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
786                            "nb_hold=%u nb_rx=%u\n",
787                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
788                            (unsigned) rx_id, (unsigned) nb_hold,
789                            (unsigned) nb_rx);
790                 rx_id = (uint16_t) ((rx_id == 0) ?
791                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
792                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
793                 nb_hold = 0;
794         }
795         rxq->nb_rx_hold = nb_hold;
796         return (nb_rx);
797 }
798
799 uint16_t
800 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
801                          uint16_t nb_pkts)
802 {
803         struct igb_rx_queue *rxq;
804         volatile union e1000_adv_rx_desc *rx_ring;
805         volatile union e1000_adv_rx_desc *rxdp;
806         struct igb_rx_entry *sw_ring;
807         struct igb_rx_entry *rxe;
808         struct rte_mbuf *first_seg;
809         struct rte_mbuf *last_seg;
810         struct rte_mbuf *rxm;
811         struct rte_mbuf *nmb;
812         union e1000_adv_rx_desc rxd;
813         uint64_t dma; /* Physical address of mbuf data buffer */
814         uint32_t staterr;
815         uint32_t hlen_type_rss;
816         uint16_t rx_id;
817         uint16_t nb_rx;
818         uint16_t nb_hold;
819         uint16_t data_len;
820         uint16_t pkt_flags;
821
822         nb_rx = 0;
823         nb_hold = 0;
824         rxq = rx_queue;
825         rx_id = rxq->rx_tail;
826         rx_ring = rxq->rx_ring;
827         sw_ring = rxq->sw_ring;
828
829         /*
830          * Retrieve RX context of current packet, if any.
831          */
832         first_seg = rxq->pkt_first_seg;
833         last_seg = rxq->pkt_last_seg;
834
835         while (nb_rx < nb_pkts) {
836         next_desc:
837                 /*
838                  * The order of operations here is important as the DD status
839                  * bit must not be read after any other descriptor fields.
840                  * rx_ring and rxdp are pointing to volatile data so the order
841                  * of accesses cannot be reordered by the compiler. If they were
842                  * not volatile, they could be reordered which could lead to
843                  * using invalid descriptor fields when read from rxd.
844                  */
845                 rxdp = &rx_ring[rx_id];
846                 staterr = rxdp->wb.upper.status_error;
847                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
848                         break;
849                 rxd = *rxdp;
850
851                 /*
852                  * Descriptor done.
853                  *
854                  * Allocate a new mbuf to replenish the RX ring descriptor.
855                  * If the allocation fails:
856                  *    - arrange for that RX descriptor to be the first one
857                  *      being parsed the next time the receive function is
858                  *      invoked [on the same queue].
859                  *
860                  *    - Stop parsing the RX ring and return immediately.
861                  *
862                  * This policy does not drop the packet received in the RX
863                  * descriptor for which the allocation of a new mbuf failed.
864                  * Thus, it allows that packet to be later retrieved if
865                  * mbuf have been freed in the mean time.
866                  * As a side effect, holding RX descriptors instead of
867                  * systematically giving them back to the NIC may lead to
868                  * RX ring exhaustion situations.
869                  * However, the NIC can gracefully prevent such situations
870                  * to happen by sending specific "back-pressure" flow control
871                  * frames to its peer(s).
872                  */
873                 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
874                            "staterr=0x%x data_len=%u\n",
875                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876                            (unsigned) rx_id, (unsigned) staterr,
877                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
878
879                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
880                 if (nmb == NULL) {
881                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882                                    "queue_id=%u\n", (unsigned) rxq->port_id,
883                                    (unsigned) rxq->queue_id);
884                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
885                         break;
886                 }
887
888                 nb_hold++;
889                 rxe = &sw_ring[rx_id];
890                 rx_id++;
891                 if (rx_id == rxq->nb_rx_desc)
892                         rx_id = 0;
893
894                 /* Prefetch next mbuf while processing current one. */
895                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
896
897                 /*
898                  * When next RX descriptor is on a cache-line boundary,
899                  * prefetch the next 4 RX descriptors and the next 8 pointers
900                  * to mbufs.
901                  */
902                 if ((rx_id & 0x3) == 0) {
903                         rte_igb_prefetch(&rx_ring[rx_id]);
904                         rte_igb_prefetch(&sw_ring[rx_id]);
905                 }
906
907                 /*
908                  * Update RX descriptor with the physical address of the new
909                  * data buffer of the new allocated mbuf.
910                  */
911                 rxm = rxe->mbuf;
912                 rxe->mbuf = nmb;
913                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
914                 rxdp->read.pkt_addr = dma;
915                 rxdp->read.hdr_addr = dma;
916
917                 /*
918                  * Set data length & data buffer address of mbuf.
919                  */
920                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
921                 rxm->pkt.data_len = data_len;
922                 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
923
924                 /*
925                  * If this is the first buffer of the received packet,
926                  * set the pointer to the first mbuf of the packet and
927                  * initialize its context.
928                  * Otherwise, update the total length and the number of segments
929                  * of the current scattered packet, and update the pointer to
930                  * the last mbuf of the current packet.
931                  */
932                 if (first_seg == NULL) {
933                         first_seg = rxm;
934                         first_seg->pkt.pkt_len = data_len;
935                         first_seg->pkt.nb_segs = 1;
936                 } else {
937                         first_seg->pkt.pkt_len += data_len;
938                         first_seg->pkt.nb_segs++;
939                         last_seg->pkt.next = rxm;
940                 }
941
942                 /*
943                  * If this is not the last buffer of the received packet,
944                  * update the pointer to the last mbuf of the current scattered
945                  * packet and continue to parse the RX ring.
946                  */
947                 if (! (staterr & E1000_RXD_STAT_EOP)) {
948                         last_seg = rxm;
949                         goto next_desc;
950                 }
951
952                 /*
953                  * This is the last buffer of the received packet.
954                  * If the CRC is not stripped by the hardware:
955                  *   - Subtract the CRC length from the total packet length.
956                  *   - If the last buffer only contains the whole CRC or a part
957                  *     of it, free the mbuf associated to the last buffer.
958                  *     If part of the CRC is also contained in the previous
959                  *     mbuf, subtract the length of that CRC part from the
960                  *     data length of the previous mbuf.
961                  */
962                 rxm->pkt.next = NULL;
963                 if (unlikely(rxq->crc_len > 0)) {
964                         first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
965                         if (data_len <= ETHER_CRC_LEN) {
966                                 rte_pktmbuf_free_seg(rxm);
967                                 first_seg->pkt.nb_segs--;
968                                 last_seg->pkt.data_len = (uint16_t)
969                                         (last_seg->pkt.data_len -
970                                          (ETHER_CRC_LEN - data_len));
971                                 last_seg->pkt.next = NULL;
972                         } else
973                                 rxm->pkt.data_len =
974                                         (uint16_t) (data_len - ETHER_CRC_LEN);
975                 }
976
977                 /*
978                  * Initialize the first mbuf of the returned packet:
979                  *    - RX port identifier,
980                  *    - hardware offload data, if any:
981                  *      - RSS flag & hash,
982                  *      - IP checksum flag,
983                  *      - VLAN TCI, if any,
984                  *      - error flags.
985                  */
986                 first_seg->pkt.in_port = rxq->port_id;
987                 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
988
989                 /*
990                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
991                  * set in the pkt_flags field.
992                  */
993                 first_seg->pkt.vlan_macip.f.vlan_tci =
994                         rte_le_to_cpu_16(rxd.wb.upper.vlan);
995                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
996                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
997                 pkt_flags = (uint16_t)(pkt_flags |
998                                 rx_desc_status_to_pkt_flags(staterr));
999                 pkt_flags = (uint16_t)(pkt_flags |
1000                                 rx_desc_error_to_pkt_flags(staterr));
1001                 first_seg->ol_flags = pkt_flags;
1002
1003                 /* Prefetch data of first segment, if configured to do so. */
1004                 rte_packet_prefetch(first_seg->pkt.data);
1005
1006                 /*
1007                  * Store the mbuf address into the next entry of the array
1008                  * of returned packets.
1009                  */
1010                 rx_pkts[nb_rx++] = first_seg;
1011
1012                 /*
1013                  * Setup receipt context for a new packet.
1014                  */
1015                 first_seg = NULL;
1016         }
1017
1018         /*
1019          * Record index of the next RX descriptor to probe.
1020          */
1021         rxq->rx_tail = rx_id;
1022
1023         /*
1024          * Save receive context.
1025          */
1026         rxq->pkt_first_seg = first_seg;
1027         rxq->pkt_last_seg = last_seg;
1028
1029         /*
1030          * If the number of free RX descriptors is greater than the RX free
1031          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1032          * register.
1033          * Update the RDT with the value of the last processed RX descriptor
1034          * minus 1, to guarantee that the RDT register is never equal to the
1035          * RDH register, which creates a "full" ring situtation from the
1036          * hardware point of view...
1037          */
1038         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1039         if (nb_hold > rxq->rx_free_thresh) {
1040                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1041                            "nb_hold=%u nb_rx=%u\n",
1042                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1043                            (unsigned) rx_id, (unsigned) nb_hold,
1044                            (unsigned) nb_rx);
1045                 rx_id = (uint16_t) ((rx_id == 0) ?
1046                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1047                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1048                 nb_hold = 0;
1049         }
1050         rxq->nb_rx_hold = nb_hold;
1051         return (nb_rx);
1052 }
1053
1054 /*
1055  * Rings setup and release.
1056  *
1057  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1058  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1059  * This will also optimize cache line size effect.
1060  * H/W supports up to cache line size 128.
1061  */
1062 #define IGB_ALIGN 128
1063
1064 /*
1065  * Maximum number of Ring Descriptors.
1066  *
1067  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1068  * desscriptors should meet the following condition:
1069  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1070  */
1071 #define IGB_MIN_RING_DESC 32
1072 #define IGB_MAX_RING_DESC 4096
1073
1074 static const struct rte_memzone *
1075 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1076                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1077 {
1078         char z_name[RTE_MEMZONE_NAMESIZE];
1079         const struct rte_memzone *mz;
1080
1081         rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1082                         dev->driver->pci_drv.name, ring_name,
1083                                 dev->data->port_id, queue_id);
1084         mz = rte_memzone_lookup(z_name);
1085         if (mz)
1086                 return mz;
1087
1088 #ifdef RTE_LIBRTE_XEN_DOM0
1089         return rte_memzone_reserve_bounded(z_name, ring_size,
1090                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1091 #else
1092         return rte_memzone_reserve_aligned(z_name, ring_size,
1093                         socket_id, 0, IGB_ALIGN);
1094 #endif
1095 }
1096
1097 static void
1098 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1099 {
1100         unsigned i;
1101
1102         if (txq->sw_ring != NULL) {
1103                 for (i = 0; i < txq->nb_tx_desc; i++) {
1104                         if (txq->sw_ring[i].mbuf != NULL) {
1105                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1106                                 txq->sw_ring[i].mbuf = NULL;
1107                         }
1108                 }
1109         }
1110 }
1111
1112 static void
1113 igb_tx_queue_release(struct igb_tx_queue *txq)
1114 {
1115         if (txq != NULL) {
1116                 igb_tx_queue_release_mbufs(txq);
1117                 rte_free(txq->sw_ring);
1118                 rte_free(txq);
1119         }
1120 }
1121
1122 void
1123 eth_igb_tx_queue_release(void *txq)
1124 {
1125         igb_tx_queue_release(txq);
1126 }
1127
1128 static void
1129 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1130 {
1131         txq->tx_head = 0;
1132         txq->tx_tail = 0;
1133         txq->ctx_curr = 0;
1134         memset((void*)&txq->ctx_cache, 0,
1135                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1136 }
1137
1138 static void
1139 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1140 {
1141         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1142                         .buffer_addr = 0}};
1143         struct igb_tx_entry *txe = txq->sw_ring;
1144         uint16_t i, prev;
1145         struct e1000_hw *hw;
1146
1147         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148         /* Zero out HW ring memory */
1149         for (i = 0; i < txq->nb_tx_desc; i++) {
1150                 txq->tx_ring[i] = zeroed_desc;
1151         }
1152
1153         /* Initialize ring entries */
1154         prev = (uint16_t)(txq->nb_tx_desc - 1);
1155         for (i = 0; i < txq->nb_tx_desc; i++) {
1156                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1157
1158                 txd->wb.status = E1000_TXD_STAT_DD;
1159                 txe[i].mbuf = NULL;
1160                 txe[i].last_id = i;
1161                 txe[prev].next_id = i;
1162                 prev = i;
1163         }
1164
1165         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1166         /* 82575 specific, each tx queue will use 2 hw contexts */
1167         if (hw->mac.type == e1000_82575)
1168                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1169
1170         igb_reset_tx_queue_stat(txq);
1171 }
1172
1173 int
1174 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1175                          uint16_t queue_idx,
1176                          uint16_t nb_desc,
1177                          unsigned int socket_id,
1178                          const struct rte_eth_txconf *tx_conf)
1179 {
1180         const struct rte_memzone *tz;
1181         struct igb_tx_queue *txq;
1182         struct e1000_hw     *hw;
1183         uint32_t size;
1184
1185         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186
1187         /*
1188          * Validate number of transmit descriptors.
1189          * It must not exceed hardware maximum, and must be multiple
1190          * of IGB_ALIGN.
1191          */
1192         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1193             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1194                 return -EINVAL;
1195         }
1196
1197         /*
1198          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1199          * driver.
1200          */
1201         if (tx_conf->tx_free_thresh != 0)
1202                 RTE_LOG(WARNING, PMD,
1203                         "The tx_free_thresh parameter is not "
1204                         "used for the 1G driver.\n");
1205         if (tx_conf->tx_rs_thresh != 0)
1206                 RTE_LOG(WARNING, PMD,
1207                         "The tx_rs_thresh parameter is not "
1208                         "used for the 1G driver.\n");
1209         if (tx_conf->tx_thresh.wthresh == 0)
1210                 RTE_LOG(WARNING, PMD,
1211                         "To improve 1G driver performance, consider setting "
1212                         "the TX WTHRESH value to 4, 8, or 16.\n");
1213
1214         /* Free memory prior to re-allocation if needed */
1215         if (dev->data->tx_queues[queue_idx] != NULL) {
1216                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1217                 dev->data->tx_queues[queue_idx] = NULL;
1218         }
1219
1220         /* First allocate the tx queue data structure */
1221         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1222                                                         CACHE_LINE_SIZE);
1223         if (txq == NULL)
1224                 return (-ENOMEM);
1225
1226         /*
1227          * Allocate TX ring hardware descriptors. A memzone large enough to
1228          * handle the maximum ring size is allocated in order to allow for
1229          * resizing in later calls to the queue setup function.
1230          */
1231         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1232         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1233                                         size, socket_id);
1234         if (tz == NULL) {
1235                 igb_tx_queue_release(txq);
1236                 return (-ENOMEM);
1237         }
1238
1239         txq->nb_tx_desc = nb_desc;
1240         txq->pthresh = tx_conf->tx_thresh.pthresh;
1241         txq->hthresh = tx_conf->tx_thresh.hthresh;
1242         txq->wthresh = tx_conf->tx_thresh.wthresh;
1243         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1244                 txq->wthresh = 1;
1245         txq->queue_id = queue_idx;
1246         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1247                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1248         txq->port_id = dev->data->port_id;
1249
1250         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1251 #ifndef RTE_LIBRTE_XEN_DOM0
1252         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1253 #else
1254         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1255 #endif
1256          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1257         /* Allocate software ring */
1258         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1259                                    sizeof(struct igb_tx_entry) * nb_desc,
1260                                    CACHE_LINE_SIZE);
1261         if (txq->sw_ring == NULL) {
1262                 igb_tx_queue_release(txq);
1263                 return (-ENOMEM);
1264         }
1265         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1266                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1267
1268         igb_reset_tx_queue(txq, dev);
1269         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1270         dev->data->tx_queues[queue_idx] = txq;
1271
1272         return (0);
1273 }
1274
1275 static void
1276 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1277 {
1278         unsigned i;
1279
1280         if (rxq->sw_ring != NULL) {
1281                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1282                         if (rxq->sw_ring[i].mbuf != NULL) {
1283                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1284                                 rxq->sw_ring[i].mbuf = NULL;
1285                         }
1286                 }
1287         }
1288 }
1289
1290 static void
1291 igb_rx_queue_release(struct igb_rx_queue *rxq)
1292 {
1293         if (rxq != NULL) {
1294                 igb_rx_queue_release_mbufs(rxq);
1295                 rte_free(rxq->sw_ring);
1296                 rte_free(rxq);
1297         }
1298 }
1299
1300 void
1301 eth_igb_rx_queue_release(void *rxq)
1302 {
1303         igb_rx_queue_release(rxq);
1304 }
1305
1306 static void
1307 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1308 {
1309         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1310                         .pkt_addr = 0}};
1311         unsigned i;
1312
1313         /* Zero out HW ring memory */
1314         for (i = 0; i < rxq->nb_rx_desc; i++) {
1315                 rxq->rx_ring[i] = zeroed_desc;
1316         }
1317
1318         rxq->rx_tail = 0;
1319         rxq->pkt_first_seg = NULL;
1320         rxq->pkt_last_seg = NULL;
1321 }
1322
1323 int
1324 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1325                          uint16_t queue_idx,
1326                          uint16_t nb_desc,
1327                          unsigned int socket_id,
1328                          const struct rte_eth_rxconf *rx_conf,
1329                          struct rte_mempool *mp)
1330 {
1331         const struct rte_memzone *rz;
1332         struct igb_rx_queue *rxq;
1333         struct e1000_hw     *hw;
1334         unsigned int size;
1335
1336         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337
1338         /*
1339          * Validate number of receive descriptors.
1340          * It must not exceed hardware maximum, and must be multiple
1341          * of IGB_ALIGN.
1342          */
1343         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1344             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1345                 return (-EINVAL);
1346         }
1347
1348         /* Free memory prior to re-allocation if needed */
1349         if (dev->data->rx_queues[queue_idx] != NULL) {
1350                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1351                 dev->data->rx_queues[queue_idx] = NULL;
1352         }
1353
1354         /* First allocate the RX queue data structure. */
1355         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1356                           CACHE_LINE_SIZE);
1357         if (rxq == NULL)
1358                 return (-ENOMEM);
1359         rxq->mb_pool = mp;
1360         rxq->nb_rx_desc = nb_desc;
1361         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1362         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1363         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1364         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1365                 rxq->wthresh = 1;
1366         rxq->drop_en = rx_conf->rx_drop_en;
1367         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1368         rxq->queue_id = queue_idx;
1369         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1370                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1371         rxq->port_id = dev->data->port_id;
1372         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1373                                   ETHER_CRC_LEN);
1374
1375         /*
1376          *  Allocate RX ring hardware descriptors. A memzone large enough to
1377          *  handle the maximum ring size is allocated in order to allow for
1378          *  resizing in later calls to the queue setup function.
1379          */
1380         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1381         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1382         if (rz == NULL) {
1383                 igb_rx_queue_release(rxq);
1384                 return (-ENOMEM);
1385         }
1386         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1387         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1388 #ifndef RTE_LIBRTE_XEN_DOM0
1389         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1390 #else
1391         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1392 #endif
1393         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1394
1395         /* Allocate software ring. */
1396         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1397                                    sizeof(struct igb_rx_entry) * nb_desc,
1398                                    CACHE_LINE_SIZE);
1399         if (rxq->sw_ring == NULL) {
1400                 igb_rx_queue_release(rxq);
1401                 return (-ENOMEM);
1402         }
1403         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1404                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1405
1406         dev->data->rx_queues[queue_idx] = rxq;
1407         igb_reset_rx_queue(rxq);
1408
1409         return 0;
1410 }
1411
1412 uint32_t
1413 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1414 {
1415 #define IGB_RXQ_SCAN_INTERVAL 4
1416         volatile union e1000_adv_rx_desc *rxdp;
1417         struct igb_rx_queue *rxq;
1418         uint32_t desc = 0;
1419
1420         if (rx_queue_id >= dev->data->nb_rx_queues) {
1421                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1422                 return 0;
1423         }
1424
1425         rxq = dev->data->rx_queues[rx_queue_id];
1426         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1427
1428         while ((desc < rxq->nb_rx_desc) &&
1429                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1430                 desc += IGB_RXQ_SCAN_INTERVAL;
1431                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1432                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1433                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1434                                 desc - rxq->nb_rx_desc]);
1435         }
1436
1437         return 0;
1438 }
1439
1440 int
1441 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1442 {
1443         volatile union e1000_adv_rx_desc *rxdp;
1444         struct igb_rx_queue *rxq = rx_queue;
1445         uint32_t desc;
1446
1447         if (unlikely(offset >= rxq->nb_rx_desc))
1448                 return 0;
1449         desc = rxq->rx_tail + offset;
1450         if (desc >= rxq->nb_rx_desc)
1451                 desc -= rxq->nb_rx_desc;
1452
1453         rxdp = &rxq->rx_ring[desc];
1454         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1455 }
1456
1457 void
1458 igb_dev_clear_queues(struct rte_eth_dev *dev)
1459 {
1460         uint16_t i;
1461         struct igb_tx_queue *txq;
1462         struct igb_rx_queue *rxq;
1463
1464         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1465                 txq = dev->data->tx_queues[i];
1466                 if (txq != NULL) {
1467                         igb_tx_queue_release_mbufs(txq);
1468                         igb_reset_tx_queue(txq, dev);
1469                 }
1470         }
1471
1472         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1473                 rxq = dev->data->rx_queues[i];
1474                 if (rxq != NULL) {
1475                         igb_rx_queue_release_mbufs(rxq);
1476                         igb_reset_rx_queue(rxq);
1477                 }
1478         }
1479 }
1480
1481 /**
1482  * Receive Side Scaling (RSS).
1483  * See section 7.1.1.7 in the following document:
1484  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1485  *
1486  * Principles:
1487  * The source and destination IP addresses of the IP header and the source and
1488  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1489  * against a configurable random key to compute a 32-bit RSS hash result.
1490  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1491  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1492  * RSS output index which is used as the RX queue index where to store the
1493  * received packets.
1494  * The following output is supplied in the RX write-back descriptor:
1495  *     - 32-bit result of the Microsoft RSS hash function,
1496  *     - 4-bit RSS type field.
1497  */
1498
1499 /*
1500  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1501  * Used as the default key.
1502  */
1503 static uint8_t rss_intel_key[40] = {
1504         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1505         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1506         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1507         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1508         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1509 };
1510
1511 static void
1512 igb_rss_disable(struct rte_eth_dev *dev)
1513 {
1514         struct e1000_hw *hw;
1515         uint32_t mrqc;
1516
1517         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1518         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1519         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1520         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1521 }
1522
1523 static void
1524 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1525 {
1526         uint8_t  *hash_key;
1527         uint32_t rss_key;
1528         uint32_t mrqc;
1529         uint16_t rss_hf;
1530         uint16_t i;
1531
1532         hash_key = rss_conf->rss_key;
1533         if (hash_key != NULL) {
1534                 /* Fill in RSS hash key */
1535                 for (i = 0; i < 10; i++) {
1536                         rss_key  = hash_key[(i * 4)];
1537                         rss_key |= hash_key[(i * 4) + 1] << 8;
1538                         rss_key |= hash_key[(i * 4) + 2] << 16;
1539                         rss_key |= hash_key[(i * 4) + 3] << 24;
1540                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1541                 }
1542         }
1543
1544         /* Set configured hashing protocols in MRQC register */
1545         rss_hf = rss_conf->rss_hf;
1546         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1547         if (rss_hf & ETH_RSS_IPV4)
1548                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1549         if (rss_hf & ETH_RSS_IPV4_TCP)
1550                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1551         if (rss_hf & ETH_RSS_IPV6)
1552                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1553         if (rss_hf & ETH_RSS_IPV6_EX)
1554                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1555         if (rss_hf & ETH_RSS_IPV6_TCP)
1556                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1557         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1558                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1559         if (rss_hf & ETH_RSS_IPV4_UDP)
1560                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1561         if (rss_hf & ETH_RSS_IPV6_UDP)
1562                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1563         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1564                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1565         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1566 }
1567
1568 int
1569 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1570                         struct rte_eth_rss_conf *rss_conf)
1571 {
1572         struct e1000_hw *hw;
1573         uint32_t mrqc;
1574         uint16_t rss_hf;
1575
1576         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1577
1578         /*
1579          * Before changing anything, first check that the update RSS operation
1580          * does not attempt to disable RSS, if RSS was enabled at
1581          * initialization time, or does not attempt to enable RSS, if RSS was
1582          * disabled at initialization time.
1583          */
1584         rss_hf = rss_conf->rss_hf;
1585         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1586         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1587                 if (rss_hf != 0) /* Enable RSS */
1588                         return -(EINVAL);
1589                 return 0; /* Nothing to do */
1590         }
1591         /* RSS enabled */
1592         if (rss_hf == 0) /* Disable RSS */
1593                 return -(EINVAL);
1594         igb_hw_rss_hash_set(hw, rss_conf);
1595         return 0;
1596 }
1597
1598 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1599                               struct rte_eth_rss_conf *rss_conf)
1600 {
1601         struct e1000_hw *hw;
1602         uint8_t *hash_key;
1603         uint32_t rss_key;
1604         uint32_t mrqc;
1605         uint16_t rss_hf;
1606         uint16_t i;
1607
1608         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1609         hash_key = rss_conf->rss_key;
1610         if (hash_key != NULL) {
1611                 /* Return RSS hash key */
1612                 for (i = 0; i < 10; i++) {
1613                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1614                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1615                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1616                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1617                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1618                 }
1619         }
1620
1621         /* Get RSS functions configured in MRQC register */
1622         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1623         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1624                 rss_conf->rss_hf = 0;
1625                 return 0;
1626         }
1627         rss_hf = 0;
1628         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1629                 rss_hf |= ETH_RSS_IPV4;
1630         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1631                 rss_hf |= ETH_RSS_IPV4_TCP;
1632         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1633                 rss_hf |= ETH_RSS_IPV6;
1634         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1635                 rss_hf |= ETH_RSS_IPV6_EX;
1636         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1637                 rss_hf |= ETH_RSS_IPV6_TCP;
1638         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1639                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1640         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1641                 rss_hf |= ETH_RSS_IPV4_UDP;
1642         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1643                 rss_hf |= ETH_RSS_IPV6_UDP;
1644         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1645                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1646         rss_conf->rss_hf = rss_hf;
1647         return 0;
1648 }
1649
1650 static void
1651 igb_rss_configure(struct rte_eth_dev *dev)
1652 {
1653         struct rte_eth_rss_conf rss_conf;
1654         struct e1000_hw *hw;
1655         uint32_t shift;
1656         uint16_t i;
1657
1658         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1659
1660         /* Fill in redirection table. */
1661         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1662         for (i = 0; i < 128; i++) {
1663                 union e1000_reta {
1664                         uint32_t dword;
1665                         uint8_t  bytes[4];
1666                 } reta;
1667                 uint8_t q_idx;
1668
1669                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1670                                    i % dev->data->nb_rx_queues : 0);
1671                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1672                 if ((i & 3) == 3)
1673                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1674         }
1675
1676         /*
1677          * Configure the RSS key and the RSS protocols used to compute
1678          * the RSS hash of input packets.
1679          */
1680         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1681         if (rss_conf.rss_hf == 0) {
1682                 igb_rss_disable(dev);
1683                 return;
1684         }
1685         if (rss_conf.rss_key == NULL)
1686                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1687         igb_hw_rss_hash_set(hw, &rss_conf);
1688 }
1689
1690 /*
1691  * Check if the mac type support VMDq or not.
1692  * Return 1 if it supports, otherwise, return 0.
1693  */
1694 static int
1695 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1696 {
1697         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1698
1699         switch (hw->mac.type) {
1700         case e1000_82576:
1701         case e1000_82580:
1702         case e1000_i350:
1703                 return 1;
1704         case e1000_82540:
1705         case e1000_82541:
1706         case e1000_82542:
1707         case e1000_82543:
1708         case e1000_82544:
1709         case e1000_82545:
1710         case e1000_82546:
1711         case e1000_82547:
1712         case e1000_82571:
1713         case e1000_82572:
1714         case e1000_82573:
1715         case e1000_82574:
1716         case e1000_82583:
1717         case e1000_i210:
1718         case e1000_i211:
1719         default:
1720                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1721                 return 0;
1722         }
1723 }
1724
1725 static int
1726 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1727 {
1728         struct rte_eth_vmdq_rx_conf *cfg;
1729         struct e1000_hw *hw;
1730         uint32_t mrqc, vt_ctl, vmolr, rctl;
1731         int i;
1732
1733         PMD_INIT_LOG(DEBUG, ">>");
1734         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1735         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1736
1737         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1738         if (igb_is_vmdq_supported(dev) == 0)
1739                 return -1;
1740
1741         igb_rss_disable(dev);
1742
1743         /* RCTL: eanble VLAN filter */
1744         rctl = E1000_READ_REG(hw, E1000_RCTL);
1745         rctl |= E1000_RCTL_VFE;
1746         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1747
1748         /* MRQC: enable vmdq */
1749         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1750         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1751         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1752
1753         /* VTCTL:  pool selection according to VLAN tag */
1754         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1755         if (cfg->enable_default_pool)
1756                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1757         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1758         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1759
1760         /*
1761          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1762          * Both 82576 and 82580 support it
1763          */
1764         if (hw->mac.type != e1000_i350) {
1765                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1766                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1767                         vmolr |= E1000_VMOLR_STRVLAN;
1768                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1769                 }
1770         }
1771
1772         /* VFTA - enable all vlan filters */
1773         for (i = 0; i < IGB_VFTA_SIZE; i++)
1774                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1775
1776         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1777         if (hw->mac.type != e1000_82580)
1778                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1779
1780         /*
1781          * RAH/RAL - allow pools to read specific mac addresses
1782          * In this case, all pools should be able to read from mac addr 0
1783          */
1784         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1785         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1786
1787         /* VLVF: set up filters for vlan tags as configured */
1788         for (i = 0; i < cfg->nb_pool_maps; i++) {
1789                 /* set vlan id in VF register and set the valid bit */
1790                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1791                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1792                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1793                         E1000_VLVF_POOLSEL_MASK)));
1794         }
1795
1796         E1000_WRITE_FLUSH(hw);
1797
1798         return 0;
1799 }
1800
1801
1802 /*********************************************************************
1803  *
1804  *  Enable receive unit.
1805  *
1806  **********************************************************************/
1807
1808 static int
1809 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1810 {
1811         struct igb_rx_entry *rxe = rxq->sw_ring;
1812         uint64_t dma_addr;
1813         unsigned i;
1814
1815         /* Initialize software ring entries. */
1816         for (i = 0; i < rxq->nb_rx_desc; i++) {
1817                 volatile union e1000_adv_rx_desc *rxd;
1818                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1819
1820                 if (mbuf == NULL) {
1821                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1822                                 "queue_id=%hu\n", rxq->queue_id);
1823                         return (-ENOMEM);
1824                 }
1825                 dma_addr =
1826                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1827                 rxd = &rxq->rx_ring[i];
1828                 rxd->read.hdr_addr = dma_addr;
1829                 rxd->read.pkt_addr = dma_addr;
1830                 rxe[i].mbuf = mbuf;
1831         }
1832
1833         return 0;
1834 }
1835
1836 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1837 static int
1838 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1839 {
1840         struct e1000_hw *hw =
1841                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1842         uint32_t mrqc;
1843
1844         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1845                 /*
1846                  * SRIOV active scheme
1847                  * FIXME if support RSS together with VMDq & SRIOV
1848                  */
1849                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1850                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1851                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1852                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1853         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1854                 /*
1855                  * SRIOV inactive scheme
1856                  */
1857                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1858                         case ETH_MQ_RX_RSS:
1859                                 igb_rss_configure(dev);
1860                                 break;
1861                         case ETH_MQ_RX_VMDQ_ONLY:
1862                                 /*Configure general VMDQ only RX parameters*/
1863                                 igb_vmdq_rx_hw_configure(dev);
1864                                 break;
1865                         case ETH_MQ_RX_NONE:
1866                                 /* if mq_mode is none, disable rss mode.*/
1867                         default:
1868                                 igb_rss_disable(dev);
1869                                 break;
1870                 }
1871         }
1872
1873         return 0;
1874 }
1875
1876 int
1877 eth_igb_rx_init(struct rte_eth_dev *dev)
1878 {
1879         struct e1000_hw     *hw;
1880         struct igb_rx_queue *rxq;
1881         struct rte_pktmbuf_pool_private *mbp_priv;
1882         uint32_t rctl;
1883         uint32_t rxcsum;
1884         uint32_t srrctl;
1885         uint16_t buf_size;
1886         uint16_t rctl_bsize;
1887         uint16_t i;
1888         int ret;
1889
1890         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1891         srrctl = 0;
1892
1893         /*
1894          * Make sure receives are disabled while setting
1895          * up the descriptor ring.
1896          */
1897         rctl = E1000_READ_REG(hw, E1000_RCTL);
1898         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1899
1900         /*
1901          * Configure support of jumbo frames, if any.
1902          */
1903         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1904                 rctl |= E1000_RCTL_LPE;
1905
1906                 /*
1907                  * Set maximum packet length by default, and might be updated
1908                  * together with enabling/disabling dual VLAN.
1909                  */
1910                 E1000_WRITE_REG(hw, E1000_RLPML,
1911                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1912                                                 VLAN_TAG_SIZE);
1913         } else
1914                 rctl &= ~E1000_RCTL_LPE;
1915
1916         /* Configure and enable each RX queue. */
1917         rctl_bsize = 0;
1918         dev->rx_pkt_burst = eth_igb_recv_pkts;
1919         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1920                 uint64_t bus_addr;
1921                 uint32_t rxdctl;
1922
1923                 rxq = dev->data->rx_queues[i];
1924
1925                 /* Allocate buffers for descriptor rings and set up queue */
1926                 ret = igb_alloc_rx_queue_mbufs(rxq);
1927                 if (ret)
1928                         return ret;
1929
1930                 /*
1931                  * Reset crc_len in case it was changed after queue setup by a
1932                  *  call to configure
1933                  */
1934                 rxq->crc_len =
1935                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1936                                                         0 : ETHER_CRC_LEN);
1937
1938                 bus_addr = rxq->rx_ring_phys_addr;
1939                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1940                                 rxq->nb_rx_desc *
1941                                 sizeof(union e1000_adv_rx_desc));
1942                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1943                                 (uint32_t)(bus_addr >> 32));
1944                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1945
1946                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1947
1948                 /*
1949                  * Configure RX buffer size.
1950                  */
1951                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1952                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1953                                        RTE_PKTMBUF_HEADROOM);
1954                 if (buf_size >= 1024) {
1955                         /*
1956                          * Configure the BSIZEPACKET field of the SRRCTL
1957                          * register of the queue.
1958                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
1959                          * If this field is equal to 0b, then RCTL.BSIZE
1960                          * determines the RX packet buffer size.
1961                          */
1962                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1963                                    E1000_SRRCTL_BSIZEPKT_MASK);
1964                         buf_size = (uint16_t) ((srrctl &
1965                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
1966                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
1967
1968                         /* It adds dual VLAN length for supporting dual VLAN */
1969                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1970                                                 2 * VLAN_TAG_SIZE) > buf_size){
1971                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1972                                 dev->data->scattered_rx = 1;
1973                         }
1974                 } else {
1975                         /*
1976                          * Use BSIZE field of the device RCTL register.
1977                          */
1978                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1979                                 rctl_bsize = buf_size;
1980                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1981                         dev->data->scattered_rx = 1;
1982                 }
1983
1984                 /* Set if packets are dropped when no descriptors available */
1985                 if (rxq->drop_en)
1986                         srrctl |= E1000_SRRCTL_DROP_EN;
1987
1988                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1989
1990                 /* Enable this RX queue. */
1991                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1992                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1993                 rxdctl &= 0xFFF00000;
1994                 rxdctl |= (rxq->pthresh & 0x1F);
1995                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1996                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1997                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1998         }
1999
2000         /*
2001          * Setup BSIZE field of RCTL register, if needed.
2002          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2003          * register, since the code above configures the SRRCTL register of
2004          * the RX queue in such a case.
2005          * All configurable sizes are:
2006          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2007          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2008          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2009          *  2048: rctl |= E1000_RCTL_SZ_2048;
2010          *  1024: rctl |= E1000_RCTL_SZ_1024;
2011          *   512: rctl |= E1000_RCTL_SZ_512;
2012          *   256: rctl |= E1000_RCTL_SZ_256;
2013          */
2014         if (rctl_bsize > 0) {
2015                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2016                         rctl |= E1000_RCTL_SZ_512;
2017                 else /* 256 <= buf_size < 512 - use 256 */
2018                         rctl |= E1000_RCTL_SZ_256;
2019         }
2020
2021         /*
2022          * Configure RSS if device configured with multiple RX queues.
2023          */
2024         igb_dev_mq_rx_configure(dev);
2025
2026         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2027         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2028
2029         /*
2030          * Setup the Checksum Register.
2031          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2032          */
2033         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2034         rxcsum |= E1000_RXCSUM_PCSD;
2035
2036         /* Enable both L3/L4 rx checksum offload */
2037         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2038                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2039         else
2040                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2041         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2042
2043         /* Setup the Receive Control Register. */
2044         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2045                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2046
2047                 /* set STRCRC bit in all queues */
2048                 if (hw->mac.type == e1000_i350 ||
2049                     hw->mac.type == e1000_i210 ||
2050                     hw->mac.type == e1000_i211 ||
2051                     hw->mac.type == e1000_i354) {
2052                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2053                                 rxq = dev->data->rx_queues[i];
2054                                 uint32_t dvmolr = E1000_READ_REG(hw,
2055                                         E1000_DVMOLR(rxq->reg_idx));
2056                                 dvmolr |= E1000_DVMOLR_STRCRC;
2057                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2058                         }
2059                 }
2060         } else {
2061                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2062
2063                 /* clear STRCRC bit in all queues */
2064                 if (hw->mac.type == e1000_i350 ||
2065                     hw->mac.type == e1000_i210 ||
2066                     hw->mac.type == e1000_i211 ||
2067                     hw->mac.type == e1000_i354) {
2068                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2069                                 rxq = dev->data->rx_queues[i];
2070                                 uint32_t dvmolr = E1000_READ_REG(hw,
2071                                         E1000_DVMOLR(rxq->reg_idx));
2072                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2073                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2074                         }
2075                 }
2076         }
2077
2078         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2079         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2080                 E1000_RCTL_RDMTS_HALF |
2081                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2082
2083         /* Make sure VLAN Filters are off. */
2084         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2085                 rctl &= ~E1000_RCTL_VFE;
2086         /* Don't store bad packets. */
2087         rctl &= ~E1000_RCTL_SBP;
2088
2089         /* Enable Receives. */
2090         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2091
2092         /*
2093          * Setup the HW Rx Head and Tail Descriptor Pointers.
2094          * This needs to be done after enable.
2095          */
2096         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2097                 rxq = dev->data->rx_queues[i];
2098                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2099                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2100         }
2101
2102         return 0;
2103 }
2104
2105 /*********************************************************************
2106  *
2107  *  Enable transmit unit.
2108  *
2109  **********************************************************************/
2110 void
2111 eth_igb_tx_init(struct rte_eth_dev *dev)
2112 {
2113         struct e1000_hw     *hw;
2114         struct igb_tx_queue *txq;
2115         uint32_t tctl;
2116         uint32_t txdctl;
2117         uint16_t i;
2118
2119         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2120
2121         /* Setup the Base and Length of the Tx Descriptor Rings. */
2122         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2123                 uint64_t bus_addr;
2124                 txq = dev->data->tx_queues[i];
2125                 bus_addr = txq->tx_ring_phys_addr;
2126
2127                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2128                                 txq->nb_tx_desc *
2129                                 sizeof(union e1000_adv_tx_desc));
2130                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2131                                 (uint32_t)(bus_addr >> 32));
2132                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2133
2134                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2135                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2136                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2137
2138                 /* Setup Transmit threshold registers. */
2139                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2140                 txdctl |= txq->pthresh & 0x1F;
2141                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2142                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2143                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2144                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2145         }
2146
2147         /* Program the Transmit Control Register. */
2148         tctl = E1000_READ_REG(hw, E1000_TCTL);
2149         tctl &= ~E1000_TCTL_CT;
2150         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2151                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2152
2153         e1000_config_collision_dist(hw);
2154
2155         /* This write will effectively turn on the transmit unit. */
2156         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2157 }
2158
2159 /*********************************************************************
2160  *
2161  *  Enable VF receive unit.
2162  *
2163  **********************************************************************/
2164 int
2165 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2166 {
2167         struct e1000_hw     *hw;
2168         struct igb_rx_queue *rxq;
2169         struct rte_pktmbuf_pool_private *mbp_priv;
2170         uint32_t srrctl;
2171         uint16_t buf_size;
2172         uint16_t rctl_bsize;
2173         uint16_t i;
2174         int ret;
2175
2176         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2177
2178         /* setup MTU */
2179         e1000_rlpml_set_vf(hw,
2180                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2181                 VLAN_TAG_SIZE));
2182
2183         /* Configure and enable each RX queue. */
2184         rctl_bsize = 0;
2185         dev->rx_pkt_burst = eth_igb_recv_pkts;
2186         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2187                 uint64_t bus_addr;
2188                 uint32_t rxdctl;
2189
2190                 rxq = dev->data->rx_queues[i];
2191
2192                 /* Allocate buffers for descriptor rings and set up queue */
2193                 ret = igb_alloc_rx_queue_mbufs(rxq);
2194                 if (ret)
2195                         return ret;
2196
2197                 bus_addr = rxq->rx_ring_phys_addr;
2198                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2199                                 rxq->nb_rx_desc *
2200                                 sizeof(union e1000_adv_rx_desc));
2201                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2202                                 (uint32_t)(bus_addr >> 32));
2203                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2204
2205                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2206
2207                 /*
2208                  * Configure RX buffer size.
2209                  */
2210                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2211                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2212                                        RTE_PKTMBUF_HEADROOM);
2213                 if (buf_size >= 1024) {
2214                         /*
2215                          * Configure the BSIZEPACKET field of the SRRCTL
2216                          * register of the queue.
2217                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2218                          * If this field is equal to 0b, then RCTL.BSIZE
2219                          * determines the RX packet buffer size.
2220                          */
2221                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2222                                    E1000_SRRCTL_BSIZEPKT_MASK);
2223                         buf_size = (uint16_t) ((srrctl &
2224                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2225                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2226
2227                         /* It adds dual VLAN length for supporting dual VLAN */
2228                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2229                                                 2 * VLAN_TAG_SIZE) > buf_size){
2230                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2231                                 dev->data->scattered_rx = 1;
2232                         }
2233                 } else {
2234                         /*
2235                          * Use BSIZE field of the device RCTL register.
2236                          */
2237                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2238                                 rctl_bsize = buf_size;
2239                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2240                         dev->data->scattered_rx = 1;
2241                 }
2242
2243                 /* Set if packets are dropped when no descriptors available */
2244                 if (rxq->drop_en)
2245                         srrctl |= E1000_SRRCTL_DROP_EN;
2246
2247                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2248
2249                 /* Enable this RX queue. */
2250                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2251                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2252                 rxdctl &= 0xFFF00000;
2253                 rxdctl |= (rxq->pthresh & 0x1F);
2254                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2255                 if (hw->mac.type == e1000_vfadapt) {
2256                         /*
2257                          * Workaround of 82576 VF Erratum
2258                          * force set WTHRESH to 1
2259                          * to avoid Write-Back not triggered sometimes
2260                          */
2261                         rxdctl |= 0x10000;
2262                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2263                 }
2264                 else
2265                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2266                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2267         }
2268
2269         /*
2270          * Setup the HW Rx Head and Tail Descriptor Pointers.
2271          * This needs to be done after enable.
2272          */
2273         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2274                 rxq = dev->data->rx_queues[i];
2275                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2276                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2277         }
2278
2279         return 0;
2280 }
2281
2282 /*********************************************************************
2283  *
2284  *  Enable VF transmit unit.
2285  *
2286  **********************************************************************/
2287 void
2288 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2289 {
2290         struct e1000_hw     *hw;
2291         struct igb_tx_queue *txq;
2292         uint32_t txdctl;
2293         uint16_t i;
2294
2295         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2296
2297         /* Setup the Base and Length of the Tx Descriptor Rings. */
2298         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2299                 uint64_t bus_addr;
2300
2301                 txq = dev->data->tx_queues[i];
2302                 bus_addr = txq->tx_ring_phys_addr;
2303                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2304                                 txq->nb_tx_desc *
2305                                 sizeof(union e1000_adv_tx_desc));
2306                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2307                                 (uint32_t)(bus_addr >> 32));
2308                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2309
2310                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2311                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2312                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2313
2314                 /* Setup Transmit threshold registers. */
2315                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2316                 txdctl |= txq->pthresh & 0x1F;
2317                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2318                 if (hw->mac.type == e1000_82576) {
2319                         /*
2320                          * Workaround of 82576 VF Erratum
2321                          * force set WTHRESH to 1
2322                          * to avoid Write-Back not triggered sometimes
2323                          */
2324                         txdctl |= 0x10000;
2325                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2326                 }
2327                 else
2328                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2329                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2330                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2331         }
2332
2333 }
2334