mbuf: remove too specific flags mask
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 #define IGB_RSS_OFFLOAD_ALL ( \
77                 ETH_RSS_IPV4 | \
78                 ETH_RSS_IPV4_TCP | \
79                 ETH_RSS_IPV6 | \
80                 ETH_RSS_IPV6_EX | \
81                 ETH_RSS_IPV6_TCP | \
82                 ETH_RSS_IPV6_TCP_EX | \
83                 ETH_RSS_IPV4_UDP | \
84                 ETH_RSS_IPV6_UDP | \
85                 ETH_RSS_IPV6_UDP_EX)
86
87 /* Bit Mask to indicate what bits required for building TX context */
88 #define IGB_TX_OFFLOAD_MASK (                    \
89                 PKT_TX_VLAN_PKT |                \
90                 PKT_TX_IP_CKSUM |                \
91                 PKT_TX_L4_MASK)
92
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
95 {
96         struct rte_mbuf *m;
97
98         m = __rte_mbuf_raw_alloc(mp);
99         __rte_mbuf_sanity_check_raw(m, 0);
100         return (m);
101 }
102
103 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
104         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
105
106 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
107         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
108
109 /**
110  * Structure associated with each descriptor of the RX ring of a RX queue.
111  */
112 struct igb_rx_entry {
113         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
114 };
115
116 /**
117  * Structure associated with each descriptor of the TX ring of a TX queue.
118  */
119 struct igb_tx_entry {
120         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
121         uint16_t next_id; /**< Index of next descriptor in ring. */
122         uint16_t last_id; /**< Index of last scattered descriptor. */
123 };
124
125 /**
126  * Structure associated with each RX queue.
127  */
128 struct igb_rx_queue {
129         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
130         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
131         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
132         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
133         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
134         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
135         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
136         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
137         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
138         uint16_t            rx_tail;    /**< current value of RDT register. */
139         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
140         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
141         uint16_t            queue_id;   /**< RX queue index. */
142         uint16_t            reg_idx;    /**< RX queue register index. */
143         uint8_t             port_id;    /**< Device port identifier. */
144         uint8_t             pthresh;    /**< Prefetch threshold register. */
145         uint8_t             hthresh;    /**< Host threshold register. */
146         uint8_t             wthresh;    /**< Write-back threshold register. */
147         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
148         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
149 };
150
151 /**
152  * Hardware context number
153  */
154 enum igb_advctx_num {
155         IGB_CTX_0    = 0, /**< CTX0    */
156         IGB_CTX_1    = 1, /**< CTX1    */
157         IGB_CTX_NUM  = 2, /**< CTX_NUM */
158 };
159
160 /** Offload features */
161 union igb_vlan_macip {
162         uint32_t data;
163         struct {
164                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
165                 uint16_t vlan_tci;
166                 /**< VLAN Tag Control Identifier (CPU order). */
167         } f;
168 };
169
170 /*
171  * Compare mask for vlan_macip_len.data,
172  * should be in sync with igb_vlan_macip.f layout.
173  * */
174 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
175 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
176 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
177 /** MAC+IP  length. */
178 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
179
180 /**
181  * Strucutre to check if new context need be built
182  */
183 struct igb_advctx_info {
184         uint64_t flags;           /**< ol_flags related to context build. */
185         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
186         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
187 };
188
189 /**
190  * Structure associated with each TX queue.
191  */
192 struct igb_tx_queue {
193         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
194         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
195         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
196         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
197         uint32_t               txd_type;      /**< Device-specific TXD type */
198         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
199         uint16_t               tx_tail; /**< Current value of TDT register. */
200         uint16_t               tx_head;
201         /**< Index of first used TX descriptor. */
202         uint16_t               queue_id; /**< TX queue index. */
203         uint16_t               reg_idx;  /**< TX queue register index. */
204         uint8_t                port_id;  /**< Device port identifier. */
205         uint8_t                pthresh;  /**< Prefetch threshold register. */
206         uint8_t                hthresh;  /**< Host threshold register. */
207         uint8_t                wthresh;  /**< Write-back threshold register. */
208         uint32_t               ctx_curr;
209         /**< Current used hardware descriptor. */
210         uint32_t               ctx_start;
211         /**< Start context position for transmit queue. */
212         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
213         /**< Hardware context history.*/
214 };
215
216 #if 1
217 #define RTE_PMD_USE_PREFETCH
218 #endif
219
220 #ifdef RTE_PMD_USE_PREFETCH
221 #define rte_igb_prefetch(p)     rte_prefetch0(p)
222 #else
223 #define rte_igb_prefetch(p)     do {} while(0)
224 #endif
225
226 #ifdef RTE_PMD_PACKET_PREFETCH
227 #define rte_packet_prefetch(p) rte_prefetch1(p)
228 #else
229 #define rte_packet_prefetch(p)  do {} while(0)
230 #endif
231
232 /*
233  * Macro for VMDq feature for 1 GbE NIC.
234  */
235 #define E1000_VMOLR_SIZE                        (8)
236
237 /*********************************************************************
238  *
239  *  TX function
240  *
241  **********************************************************************/
242
243 /*
244  * Advanced context descriptor are almost same between igb/ixgbe
245  * This is a separate function, looking for optimization opportunity here
246  * Rework required to go with the pre-defined values.
247  */
248
249 static inline void
250 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
251                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
252                 uint64_t ol_flags, uint32_t vlan_macip_lens)
253 {
254         uint32_t type_tucmd_mlhl;
255         uint32_t mss_l4len_idx;
256         uint32_t ctx_idx, ctx_curr;
257         uint32_t cmp_mask;
258
259         ctx_curr = txq->ctx_curr;
260         ctx_idx = ctx_curr + txq->ctx_start;
261
262         cmp_mask = 0;
263         type_tucmd_mlhl = 0;
264
265         if (ol_flags & PKT_TX_VLAN_PKT) {
266                 cmp_mask |= TX_VLAN_CMP_MASK;
267         }
268
269         if (ol_flags & PKT_TX_IP_CKSUM) {
270                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
271                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
272         }
273
274         /* Specify which HW CTX to upload. */
275         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
276         switch (ol_flags & PKT_TX_L4_MASK) {
277         case PKT_TX_UDP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         case PKT_TX_TCP_CKSUM:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
288                 break;
289         case PKT_TX_SCTP_CKSUM:
290                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
293                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
294                 break;
295         default:
296                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
297                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298                 break;
299         }
300
301         txq->ctx_cache[ctx_curr].flags           = ol_flags;
302         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
303         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
304                 vlan_macip_lens & cmp_mask;
305
306         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
307         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
308         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
309         ctx_txd->seqnum_seed     = 0;
310 }
311
312 /*
313  * Check which hardware context can be used. Use the existing match
314  * or create a new context descriptor.
315  */
316 static inline uint32_t
317 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
318                 uint32_t vlan_macip_lens)
319 {
320         /* If match with the current context */
321         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
322                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
323                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
324                         return txq->ctx_curr;
325         }
326
327         /* If match with the second context */
328         txq->ctx_curr ^= 1;
329         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
330                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
331                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
332                         return txq->ctx_curr;
333         }
334
335         /* Mismatch, use the previous context */
336         return (IGB_CTX_NUM);
337 }
338
339 static inline uint32_t
340 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
341 {
342         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
343         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
344         uint32_t tmp;
345
346         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
347         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
348         return tmp;
349 }
350
351 static inline uint32_t
352 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
353 {
354         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
355         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
356 }
357
358 uint16_t
359 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
360                uint16_t nb_pkts)
361 {
362         struct igb_tx_queue *txq;
363         struct igb_tx_entry *sw_ring;
364         struct igb_tx_entry *txe, *txn;
365         volatile union e1000_adv_tx_desc *txr;
366         volatile union e1000_adv_tx_desc *txd;
367         struct rte_mbuf     *tx_pkt;
368         struct rte_mbuf     *m_seg;
369         union igb_vlan_macip vlan_macip_lens;
370         uint64_t buf_dma_addr;
371         uint32_t olinfo_status;
372         uint32_t cmd_type_len;
373         uint32_t pkt_len;
374         uint16_t slen;
375         uint64_t ol_flags;
376         uint16_t tx_end;
377         uint16_t tx_id;
378         uint16_t tx_last;
379         uint16_t nb_tx;
380         uint64_t tx_ol_req;
381         uint32_t new_ctx = 0;
382         uint32_t ctx = 0;
383
384         txq = tx_queue;
385         sw_ring = txq->sw_ring;
386         txr     = txq->tx_ring;
387         tx_id   = txq->tx_tail;
388         txe = &sw_ring[tx_id];
389
390         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
391                 tx_pkt = *tx_pkts++;
392                 pkt_len = tx_pkt->pkt_len;
393
394                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
395
396                 /*
397                  * The number of descriptors that must be allocated for a
398                  * packet is the number of segments of that packet, plus 1
399                  * Context Descriptor for the VLAN Tag Identifier, if any.
400                  * Determine the last TX descriptor to allocate in the TX ring
401                  * for the packet, starting from the current position (tx_id)
402                  * in the ring.
403                  */
404                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
405
406                 ol_flags = tx_pkt->ol_flags;
407                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
408                 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
409                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
410
411                 /* If a Context Descriptor need be built . */
412                 if (tx_ol_req) {
413                         ctx = what_advctx_update(txq, tx_ol_req,
414                                 vlan_macip_lens.data);
415                         /* Only allocate context descriptor if required*/
416                         new_ctx = (ctx == IGB_CTX_NUM);
417                         ctx = txq->ctx_curr;
418                         tx_last = (uint16_t) (tx_last + new_ctx);
419                 }
420                 if (tx_last >= txq->nb_tx_desc)
421                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
422
423                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
424                            " tx_first=%u tx_last=%u",
425                            (unsigned) txq->port_id,
426                            (unsigned) txq->queue_id,
427                            (unsigned) pkt_len,
428                            (unsigned) tx_id,
429                            (unsigned) tx_last);
430
431                 /*
432                  * Check if there are enough free descriptors in the TX ring
433                  * to transmit the next packet.
434                  * This operation is based on the two following rules:
435                  *
436                  *   1- Only check that the last needed TX descriptor can be
437                  *      allocated (by construction, if that descriptor is free,
438                  *      all intermediate ones are also free).
439                  *
440                  *      For this purpose, the index of the last TX descriptor
441                  *      used for a packet (the "last descriptor" of a packet)
442                  *      is recorded in the TX entries (the last one included)
443                  *      that are associated with all TX descriptors allocated
444                  *      for that packet.
445                  *
446                  *   2- Avoid to allocate the last free TX descriptor of the
447                  *      ring, in order to never set the TDT register with the
448                  *      same value stored in parallel by the NIC in the TDH
449                  *      register, which makes the TX engine of the NIC enter
450                  *      in a deadlock situation.
451                  *
452                  *      By extension, avoid to allocate a free descriptor that
453                  *      belongs to the last set of free descriptors allocated
454                  *      to the same packet previously transmitted.
455                  */
456
457                 /*
458                  * The "last descriptor" of the previously sent packet, if any,
459                  * which used the last descriptor to allocate.
460                  */
461                 tx_end = sw_ring[tx_last].last_id;
462
463                 /*
464                  * The next descriptor following that "last descriptor" in the
465                  * ring.
466                  */
467                 tx_end = sw_ring[tx_end].next_id;
468
469                 /*
470                  * The "last descriptor" associated with that next descriptor.
471                  */
472                 tx_end = sw_ring[tx_end].last_id;
473
474                 /*
475                  * Check that this descriptor is free.
476                  */
477                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
478                         if (nb_tx == 0)
479                                 return (0);
480                         goto end_of_tx;
481                 }
482
483                 /*
484                  * Set common flags of all TX Data Descriptors.
485                  *
486                  * The following bits must be set in all Data Descriptors:
487                  *   - E1000_ADVTXD_DTYP_DATA
488                  *   - E1000_ADVTXD_DCMD_DEXT
489                  *
490                  * The following bits must be set in the first Data Descriptor
491                  * and are ignored in the other ones:
492                  *   - E1000_ADVTXD_DCMD_IFCS
493                  *   - E1000_ADVTXD_MAC_1588
494                  *   - E1000_ADVTXD_DCMD_VLE
495                  *
496                  * The following bits must only be set in the last Data
497                  * Descriptor:
498                  *   - E1000_TXD_CMD_EOP
499                  *
500                  * The following bits can be set in any Data Descriptor, but
501                  * are only set in the last Data Descriptor:
502                  *   - E1000_TXD_CMD_RS
503                  */
504                 cmd_type_len = txq->txd_type |
505                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
506                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
507 #if defined(RTE_LIBRTE_IEEE1588)
508                 if (ol_flags & PKT_TX_IEEE1588_TMST)
509                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
510 #endif
511                 if (tx_ol_req) {
512                         /* Setup TX Advanced context descriptor if required */
513                         if (new_ctx) {
514                                 volatile struct e1000_adv_tx_context_desc *
515                                     ctx_txd;
516
517                                 ctx_txd = (volatile struct
518                                     e1000_adv_tx_context_desc *)
519                                     &txr[tx_id];
520
521                                 txn = &sw_ring[txe->next_id];
522                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
523
524                                 if (txe->mbuf != NULL) {
525                                         rte_pktmbuf_free_seg(txe->mbuf);
526                                         txe->mbuf = NULL;
527                                 }
528
529                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
530                                     vlan_macip_lens.data);
531
532                                 txe->last_id = tx_last;
533                                 tx_id = txe->next_id;
534                                 txe = txn;
535                         }
536
537                         /* Setup the TX Advanced Data Descriptor */
538                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
539                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
540                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
541                 }
542
543                 m_seg = tx_pkt;
544                 do {
545                         txn = &sw_ring[txe->next_id];
546                         txd = &txr[tx_id];
547
548                         if (txe->mbuf != NULL)
549                                 rte_pktmbuf_free_seg(txe->mbuf);
550                         txe->mbuf = m_seg;
551
552                         /*
553                          * Set up transmit descriptor.
554                          */
555                         slen = (uint16_t) m_seg->data_len;
556                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
557                         txd->read.buffer_addr =
558                                 rte_cpu_to_le_64(buf_dma_addr);
559                         txd->read.cmd_type_len =
560                                 rte_cpu_to_le_32(cmd_type_len | slen);
561                         txd->read.olinfo_status =
562                                 rte_cpu_to_le_32(olinfo_status);
563                         txe->last_id = tx_last;
564                         tx_id = txe->next_id;
565                         txe = txn;
566                         m_seg = m_seg->next;
567                 } while (m_seg != NULL);
568
569                 /*
570                  * The last packet data descriptor needs End Of Packet (EOP)
571                  * and Report Status (RS).
572                  */
573                 txd->read.cmd_type_len |=
574                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
575         }
576  end_of_tx:
577         rte_wmb();
578
579         /*
580          * Set the Transmit Descriptor Tail (TDT).
581          */
582         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
583         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
584                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
585                    (unsigned) tx_id, (unsigned) nb_tx);
586         txq->tx_tail = tx_id;
587
588         return (nb_tx);
589 }
590
591 /*********************************************************************
592  *
593  *  RX functions
594  *
595  **********************************************************************/
596 static inline uint64_t
597 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
598 {
599         uint64_t pkt_flags;
600
601         static uint64_t ip_pkt_types_map[16] = {
602                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
603                 PKT_RX_IPV6_HDR, 0, 0, 0,
604                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
605                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
606         };
607
608 #if defined(RTE_LIBRTE_IEEE1588)
609         static uint32_t ip_pkt_etqf_map[8] = {
610                 0, 0, 0, PKT_RX_IEEE1588_PTP,
611                 0, 0, 0, 0,
612         };
613
614         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
615                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
616                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
617 #else
618         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
619                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
620 #endif
621         return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH);
622 }
623
624 static inline uint64_t
625 rx_desc_status_to_pkt_flags(uint32_t rx_status)
626 {
627         uint64_t pkt_flags;
628
629         /* Check if VLAN present */
630         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
631
632 #if defined(RTE_LIBRTE_IEEE1588)
633         if (rx_status & E1000_RXD_STAT_TMST)
634                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
635 #endif
636         return pkt_flags;
637 }
638
639 static inline uint64_t
640 rx_desc_error_to_pkt_flags(uint32_t rx_status)
641 {
642         /*
643          * Bit 30: IPE, IPv4 checksum error
644          * Bit 29: L4I, L4I integrity error
645          */
646
647         static uint64_t error_to_pkt_flags_map[4] = {
648                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
649                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
650         };
651         return error_to_pkt_flags_map[(rx_status >>
652                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
653 }
654
655 uint16_t
656 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
657                uint16_t nb_pkts)
658 {
659         struct igb_rx_queue *rxq;
660         volatile union e1000_adv_rx_desc *rx_ring;
661         volatile union e1000_adv_rx_desc *rxdp;
662         struct igb_rx_entry *sw_ring;
663         struct igb_rx_entry *rxe;
664         struct rte_mbuf *rxm;
665         struct rte_mbuf *nmb;
666         union e1000_adv_rx_desc rxd;
667         uint64_t dma_addr;
668         uint32_t staterr;
669         uint32_t hlen_type_rss;
670         uint16_t pkt_len;
671         uint16_t rx_id;
672         uint16_t nb_rx;
673         uint16_t nb_hold;
674         uint64_t pkt_flags;
675
676         nb_rx = 0;
677         nb_hold = 0;
678         rxq = rx_queue;
679         rx_id = rxq->rx_tail;
680         rx_ring = rxq->rx_ring;
681         sw_ring = rxq->sw_ring;
682         while (nb_rx < nb_pkts) {
683                 /*
684                  * The order of operations here is important as the DD status
685                  * bit must not be read after any other descriptor fields.
686                  * rx_ring and rxdp are pointing to volatile data so the order
687                  * of accesses cannot be reordered by the compiler. If they were
688                  * not volatile, they could be reordered which could lead to
689                  * using invalid descriptor fields when read from rxd.
690                  */
691                 rxdp = &rx_ring[rx_id];
692                 staterr = rxdp->wb.upper.status_error;
693                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
694                         break;
695                 rxd = *rxdp;
696
697                 /*
698                  * End of packet.
699                  *
700                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
701                  * likely to be invalid and to be dropped by the various
702                  * validation checks performed by the network stack.
703                  *
704                  * Allocate a new mbuf to replenish the RX ring descriptor.
705                  * If the allocation fails:
706                  *    - arrange for that RX descriptor to be the first one
707                  *      being parsed the next time the receive function is
708                  *      invoked [on the same queue].
709                  *
710                  *    - Stop parsing the RX ring and return immediately.
711                  *
712                  * This policy do not drop the packet received in the RX
713                  * descriptor for which the allocation of a new mbuf failed.
714                  * Thus, it allows that packet to be later retrieved if
715                  * mbuf have been freed in the mean time.
716                  * As a side effect, holding RX descriptors instead of
717                  * systematically giving them back to the NIC may lead to
718                  * RX ring exhaustion situations.
719                  * However, the NIC can gracefully prevent such situations
720                  * to happen by sending specific "back-pressure" flow control
721                  * frames to its peer(s).
722                  */
723                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
724                            "staterr=0x%x pkt_len=%u",
725                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
726                            (unsigned) rx_id, (unsigned) staterr,
727                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
728
729                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
730                 if (nmb == NULL) {
731                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
732                                    "queue_id=%u", (unsigned) rxq->port_id,
733                                    (unsigned) rxq->queue_id);
734                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
735                         break;
736                 }
737
738                 nb_hold++;
739                 rxe = &sw_ring[rx_id];
740                 rx_id++;
741                 if (rx_id == rxq->nb_rx_desc)
742                         rx_id = 0;
743
744                 /* Prefetch next mbuf while processing current one. */
745                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
746
747                 /*
748                  * When next RX descriptor is on a cache-line boundary,
749                  * prefetch the next 4 RX descriptors and the next 8 pointers
750                  * to mbufs.
751                  */
752                 if ((rx_id & 0x3) == 0) {
753                         rte_igb_prefetch(&rx_ring[rx_id]);
754                         rte_igb_prefetch(&sw_ring[rx_id]);
755                 }
756
757                 rxm = rxe->mbuf;
758                 rxe->mbuf = nmb;
759                 dma_addr =
760                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
761                 rxdp->read.hdr_addr = dma_addr;
762                 rxdp->read.pkt_addr = dma_addr;
763
764                 /*
765                  * Initialize the returned mbuf.
766                  * 1) setup generic mbuf fields:
767                  *    - number of segments,
768                  *    - next segment,
769                  *    - packet length,
770                  *    - RX port identifier.
771                  * 2) integrate hardware offload data, if any:
772                  *    - RSS flag & hash,
773                  *    - IP checksum flag,
774                  *    - VLAN TCI, if any,
775                  *    - error flags.
776                  */
777                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
778                                       rxq->crc_len);
779                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
780                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
781                 rxm->nb_segs = 1;
782                 rxm->next = NULL;
783                 rxm->pkt_len = pkt_len;
784                 rxm->data_len = pkt_len;
785                 rxm->port = rxq->port_id;
786
787                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
788                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
789                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
790                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
791
792                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
793                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
794                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
795                 rxm->ol_flags = pkt_flags;
796
797                 /*
798                  * Store the mbuf address into the next entry of the array
799                  * of returned packets.
800                  */
801                 rx_pkts[nb_rx++] = rxm;
802         }
803         rxq->rx_tail = rx_id;
804
805         /*
806          * If the number of free RX descriptors is greater than the RX free
807          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
808          * register.
809          * Update the RDT with the value of the last processed RX descriptor
810          * minus 1, to guarantee that the RDT register is never equal to the
811          * RDH register, which creates a "full" ring situtation from the
812          * hardware point of view...
813          */
814         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
815         if (nb_hold > rxq->rx_free_thresh) {
816                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
817                            "nb_hold=%u nb_rx=%u",
818                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819                            (unsigned) rx_id, (unsigned) nb_hold,
820                            (unsigned) nb_rx);
821                 rx_id = (uint16_t) ((rx_id == 0) ?
822                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
823                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
824                 nb_hold = 0;
825         }
826         rxq->nb_rx_hold = nb_hold;
827         return (nb_rx);
828 }
829
830 uint16_t
831 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
832                          uint16_t nb_pkts)
833 {
834         struct igb_rx_queue *rxq;
835         volatile union e1000_adv_rx_desc *rx_ring;
836         volatile union e1000_adv_rx_desc *rxdp;
837         struct igb_rx_entry *sw_ring;
838         struct igb_rx_entry *rxe;
839         struct rte_mbuf *first_seg;
840         struct rte_mbuf *last_seg;
841         struct rte_mbuf *rxm;
842         struct rte_mbuf *nmb;
843         union e1000_adv_rx_desc rxd;
844         uint64_t dma; /* Physical address of mbuf data buffer */
845         uint32_t staterr;
846         uint32_t hlen_type_rss;
847         uint16_t rx_id;
848         uint16_t nb_rx;
849         uint16_t nb_hold;
850         uint16_t data_len;
851         uint64_t pkt_flags;
852
853         nb_rx = 0;
854         nb_hold = 0;
855         rxq = rx_queue;
856         rx_id = rxq->rx_tail;
857         rx_ring = rxq->rx_ring;
858         sw_ring = rxq->sw_ring;
859
860         /*
861          * Retrieve RX context of current packet, if any.
862          */
863         first_seg = rxq->pkt_first_seg;
864         last_seg = rxq->pkt_last_seg;
865
866         while (nb_rx < nb_pkts) {
867         next_desc:
868                 /*
869                  * The order of operations here is important as the DD status
870                  * bit must not be read after any other descriptor fields.
871                  * rx_ring and rxdp are pointing to volatile data so the order
872                  * of accesses cannot be reordered by the compiler. If they were
873                  * not volatile, they could be reordered which could lead to
874                  * using invalid descriptor fields when read from rxd.
875                  */
876                 rxdp = &rx_ring[rx_id];
877                 staterr = rxdp->wb.upper.status_error;
878                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
879                         break;
880                 rxd = *rxdp;
881
882                 /*
883                  * Descriptor done.
884                  *
885                  * Allocate a new mbuf to replenish the RX ring descriptor.
886                  * If the allocation fails:
887                  *    - arrange for that RX descriptor to be the first one
888                  *      being parsed the next time the receive function is
889                  *      invoked [on the same queue].
890                  *
891                  *    - Stop parsing the RX ring and return immediately.
892                  *
893                  * This policy does not drop the packet received in the RX
894                  * descriptor for which the allocation of a new mbuf failed.
895                  * Thus, it allows that packet to be later retrieved if
896                  * mbuf have been freed in the mean time.
897                  * As a side effect, holding RX descriptors instead of
898                  * systematically giving them back to the NIC may lead to
899                  * RX ring exhaustion situations.
900                  * However, the NIC can gracefully prevent such situations
901                  * to happen by sending specific "back-pressure" flow control
902                  * frames to its peer(s).
903                  */
904                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
905                            "staterr=0x%x data_len=%u",
906                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
907                            (unsigned) rx_id, (unsigned) staterr,
908                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
909
910                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
911                 if (nmb == NULL) {
912                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
913                                    "queue_id=%u", (unsigned) rxq->port_id,
914                                    (unsigned) rxq->queue_id);
915                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
916                         break;
917                 }
918
919                 nb_hold++;
920                 rxe = &sw_ring[rx_id];
921                 rx_id++;
922                 if (rx_id == rxq->nb_rx_desc)
923                         rx_id = 0;
924
925                 /* Prefetch next mbuf while processing current one. */
926                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
927
928                 /*
929                  * When next RX descriptor is on a cache-line boundary,
930                  * prefetch the next 4 RX descriptors and the next 8 pointers
931                  * to mbufs.
932                  */
933                 if ((rx_id & 0x3) == 0) {
934                         rte_igb_prefetch(&rx_ring[rx_id]);
935                         rte_igb_prefetch(&sw_ring[rx_id]);
936                 }
937
938                 /*
939                  * Update RX descriptor with the physical address of the new
940                  * data buffer of the new allocated mbuf.
941                  */
942                 rxm = rxe->mbuf;
943                 rxe->mbuf = nmb;
944                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
945                 rxdp->read.pkt_addr = dma;
946                 rxdp->read.hdr_addr = dma;
947
948                 /*
949                  * Set data length & data buffer address of mbuf.
950                  */
951                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
952                 rxm->data_len = data_len;
953                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
954
955                 /*
956                  * If this is the first buffer of the received packet,
957                  * set the pointer to the first mbuf of the packet and
958                  * initialize its context.
959                  * Otherwise, update the total length and the number of segments
960                  * of the current scattered packet, and update the pointer to
961                  * the last mbuf of the current packet.
962                  */
963                 if (first_seg == NULL) {
964                         first_seg = rxm;
965                         first_seg->pkt_len = data_len;
966                         first_seg->nb_segs = 1;
967                 } else {
968                         first_seg->pkt_len += data_len;
969                         first_seg->nb_segs++;
970                         last_seg->next = rxm;
971                 }
972
973                 /*
974                  * If this is not the last buffer of the received packet,
975                  * update the pointer to the last mbuf of the current scattered
976                  * packet and continue to parse the RX ring.
977                  */
978                 if (! (staterr & E1000_RXD_STAT_EOP)) {
979                         last_seg = rxm;
980                         goto next_desc;
981                 }
982
983                 /*
984                  * This is the last buffer of the received packet.
985                  * If the CRC is not stripped by the hardware:
986                  *   - Subtract the CRC length from the total packet length.
987                  *   - If the last buffer only contains the whole CRC or a part
988                  *     of it, free the mbuf associated to the last buffer.
989                  *     If part of the CRC is also contained in the previous
990                  *     mbuf, subtract the length of that CRC part from the
991                  *     data length of the previous mbuf.
992                  */
993                 rxm->next = NULL;
994                 if (unlikely(rxq->crc_len > 0)) {
995                         first_seg->pkt_len -= ETHER_CRC_LEN;
996                         if (data_len <= ETHER_CRC_LEN) {
997                                 rte_pktmbuf_free_seg(rxm);
998                                 first_seg->nb_segs--;
999                                 last_seg->data_len = (uint16_t)
1000                                         (last_seg->data_len -
1001                                          (ETHER_CRC_LEN - data_len));
1002                                 last_seg->next = NULL;
1003                         } else
1004                                 rxm->data_len =
1005                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1006                 }
1007
1008                 /*
1009                  * Initialize the first mbuf of the returned packet:
1010                  *    - RX port identifier,
1011                  *    - hardware offload data, if any:
1012                  *      - RSS flag & hash,
1013                  *      - IP checksum flag,
1014                  *      - VLAN TCI, if any,
1015                  *      - error flags.
1016                  */
1017                 first_seg->port = rxq->port_id;
1018                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1019
1020                 /*
1021                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1022                  * set in the pkt_flags field.
1023                  */
1024                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1025                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1026                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1027                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1028                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1029                 first_seg->ol_flags = pkt_flags;
1030
1031                 /* Prefetch data of first segment, if configured to do so. */
1032                 rte_packet_prefetch((char *)first_seg->buf_addr +
1033                         first_seg->data_off);
1034
1035                 /*
1036                  * Store the mbuf address into the next entry of the array
1037                  * of returned packets.
1038                  */
1039                 rx_pkts[nb_rx++] = first_seg;
1040
1041                 /*
1042                  * Setup receipt context for a new packet.
1043                  */
1044                 first_seg = NULL;
1045         }
1046
1047         /*
1048          * Record index of the next RX descriptor to probe.
1049          */
1050         rxq->rx_tail = rx_id;
1051
1052         /*
1053          * Save receive context.
1054          */
1055         rxq->pkt_first_seg = first_seg;
1056         rxq->pkt_last_seg = last_seg;
1057
1058         /*
1059          * If the number of free RX descriptors is greater than the RX free
1060          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1061          * register.
1062          * Update the RDT with the value of the last processed RX descriptor
1063          * minus 1, to guarantee that the RDT register is never equal to the
1064          * RDH register, which creates a "full" ring situtation from the
1065          * hardware point of view...
1066          */
1067         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1068         if (nb_hold > rxq->rx_free_thresh) {
1069                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1070                            "nb_hold=%u nb_rx=%u",
1071                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1072                            (unsigned) rx_id, (unsigned) nb_hold,
1073                            (unsigned) nb_rx);
1074                 rx_id = (uint16_t) ((rx_id == 0) ?
1075                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1076                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1077                 nb_hold = 0;
1078         }
1079         rxq->nb_rx_hold = nb_hold;
1080         return (nb_rx);
1081 }
1082
1083 /*
1084  * Rings setup and release.
1085  *
1086  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1087  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1088  * This will also optimize cache line size effect.
1089  * H/W supports up to cache line size 128.
1090  */
1091 #define IGB_ALIGN 128
1092
1093 /*
1094  * Maximum number of Ring Descriptors.
1095  *
1096  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1097  * desscriptors should meet the following condition:
1098  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1099  */
1100 #define IGB_MIN_RING_DESC 32
1101 #define IGB_MAX_RING_DESC 4096
1102
1103 static const struct rte_memzone *
1104 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1105                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1106 {
1107         char z_name[RTE_MEMZONE_NAMESIZE];
1108         const struct rte_memzone *mz;
1109
1110         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1111                         dev->driver->pci_drv.name, ring_name,
1112                                 dev->data->port_id, queue_id);
1113         mz = rte_memzone_lookup(z_name);
1114         if (mz)
1115                 return mz;
1116
1117 #ifdef RTE_LIBRTE_XEN_DOM0
1118         return rte_memzone_reserve_bounded(z_name, ring_size,
1119                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1120 #else
1121         return rte_memzone_reserve_aligned(z_name, ring_size,
1122                         socket_id, 0, IGB_ALIGN);
1123 #endif
1124 }
1125
1126 static void
1127 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1128 {
1129         unsigned i;
1130
1131         if (txq->sw_ring != NULL) {
1132                 for (i = 0; i < txq->nb_tx_desc; i++) {
1133                         if (txq->sw_ring[i].mbuf != NULL) {
1134                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1135                                 txq->sw_ring[i].mbuf = NULL;
1136                         }
1137                 }
1138         }
1139 }
1140
1141 static void
1142 igb_tx_queue_release(struct igb_tx_queue *txq)
1143 {
1144         if (txq != NULL) {
1145                 igb_tx_queue_release_mbufs(txq);
1146                 rte_free(txq->sw_ring);
1147                 rte_free(txq);
1148         }
1149 }
1150
1151 void
1152 eth_igb_tx_queue_release(void *txq)
1153 {
1154         igb_tx_queue_release(txq);
1155 }
1156
1157 static void
1158 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1159 {
1160         txq->tx_head = 0;
1161         txq->tx_tail = 0;
1162         txq->ctx_curr = 0;
1163         memset((void*)&txq->ctx_cache, 0,
1164                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1165 }
1166
1167 static void
1168 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1169 {
1170         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1171                         .buffer_addr = 0}};
1172         struct igb_tx_entry *txe = txq->sw_ring;
1173         uint16_t i, prev;
1174         struct e1000_hw *hw;
1175
1176         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1177         /* Zero out HW ring memory */
1178         for (i = 0; i < txq->nb_tx_desc; i++) {
1179                 txq->tx_ring[i] = zeroed_desc;
1180         }
1181
1182         /* Initialize ring entries */
1183         prev = (uint16_t)(txq->nb_tx_desc - 1);
1184         for (i = 0; i < txq->nb_tx_desc; i++) {
1185                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1186
1187                 txd->wb.status = E1000_TXD_STAT_DD;
1188                 txe[i].mbuf = NULL;
1189                 txe[i].last_id = i;
1190                 txe[prev].next_id = i;
1191                 prev = i;
1192         }
1193
1194         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1195         /* 82575 specific, each tx queue will use 2 hw contexts */
1196         if (hw->mac.type == e1000_82575)
1197                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1198
1199         igb_reset_tx_queue_stat(txq);
1200 }
1201
1202 int
1203 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1204                          uint16_t queue_idx,
1205                          uint16_t nb_desc,
1206                          unsigned int socket_id,
1207                          const struct rte_eth_txconf *tx_conf)
1208 {
1209         const struct rte_memzone *tz;
1210         struct igb_tx_queue *txq;
1211         struct e1000_hw     *hw;
1212         uint32_t size;
1213
1214         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215
1216         /*
1217          * Validate number of transmit descriptors.
1218          * It must not exceed hardware maximum, and must be multiple
1219          * of IGB_ALIGN.
1220          */
1221         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1222             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1223                 return -EINVAL;
1224         }
1225
1226         /*
1227          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1228          * driver.
1229          */
1230         if (tx_conf->tx_free_thresh != 0)
1231                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1232                              "used for the 1G driver.");
1233         if (tx_conf->tx_rs_thresh != 0)
1234                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1235                              "used for the 1G driver.");
1236         if (tx_conf->tx_thresh.wthresh == 0)
1237                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1238                              "consider setting the TX WTHRESH value to 4, 8, "
1239                              "or 16.");
1240
1241         /* Free memory prior to re-allocation if needed */
1242         if (dev->data->tx_queues[queue_idx] != NULL) {
1243                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1244                 dev->data->tx_queues[queue_idx] = NULL;
1245         }
1246
1247         /* First allocate the tx queue data structure */
1248         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1249                                                         CACHE_LINE_SIZE);
1250         if (txq == NULL)
1251                 return (-ENOMEM);
1252
1253         /*
1254          * Allocate TX ring hardware descriptors. A memzone large enough to
1255          * handle the maximum ring size is allocated in order to allow for
1256          * resizing in later calls to the queue setup function.
1257          */
1258         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1259         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1260                                         size, socket_id);
1261         if (tz == NULL) {
1262                 igb_tx_queue_release(txq);
1263                 return (-ENOMEM);
1264         }
1265
1266         txq->nb_tx_desc = nb_desc;
1267         txq->pthresh = tx_conf->tx_thresh.pthresh;
1268         txq->hthresh = tx_conf->tx_thresh.hthresh;
1269         txq->wthresh = tx_conf->tx_thresh.wthresh;
1270         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1271                 txq->wthresh = 1;
1272         txq->queue_id = queue_idx;
1273         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1274                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1275         txq->port_id = dev->data->port_id;
1276
1277         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1278 #ifndef RTE_LIBRTE_XEN_DOM0
1279         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1280 #else
1281         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1282 #endif
1283          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1284         /* Allocate software ring */
1285         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1286                                    sizeof(struct igb_tx_entry) * nb_desc,
1287                                    CACHE_LINE_SIZE);
1288         if (txq->sw_ring == NULL) {
1289                 igb_tx_queue_release(txq);
1290                 return (-ENOMEM);
1291         }
1292         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1293                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1294
1295         igb_reset_tx_queue(txq, dev);
1296         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1297         dev->data->tx_queues[queue_idx] = txq;
1298
1299         return (0);
1300 }
1301
1302 static void
1303 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1304 {
1305         unsigned i;
1306
1307         if (rxq->sw_ring != NULL) {
1308                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1309                         if (rxq->sw_ring[i].mbuf != NULL) {
1310                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1311                                 rxq->sw_ring[i].mbuf = NULL;
1312                         }
1313                 }
1314         }
1315 }
1316
1317 static void
1318 igb_rx_queue_release(struct igb_rx_queue *rxq)
1319 {
1320         if (rxq != NULL) {
1321                 igb_rx_queue_release_mbufs(rxq);
1322                 rte_free(rxq->sw_ring);
1323                 rte_free(rxq);
1324         }
1325 }
1326
1327 void
1328 eth_igb_rx_queue_release(void *rxq)
1329 {
1330         igb_rx_queue_release(rxq);
1331 }
1332
1333 static void
1334 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1335 {
1336         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1337                         .pkt_addr = 0}};
1338         unsigned i;
1339
1340         /* Zero out HW ring memory */
1341         for (i = 0; i < rxq->nb_rx_desc; i++) {
1342                 rxq->rx_ring[i] = zeroed_desc;
1343         }
1344
1345         rxq->rx_tail = 0;
1346         rxq->pkt_first_seg = NULL;
1347         rxq->pkt_last_seg = NULL;
1348 }
1349
1350 int
1351 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1352                          uint16_t queue_idx,
1353                          uint16_t nb_desc,
1354                          unsigned int socket_id,
1355                          const struct rte_eth_rxconf *rx_conf,
1356                          struct rte_mempool *mp)
1357 {
1358         const struct rte_memzone *rz;
1359         struct igb_rx_queue *rxq;
1360         struct e1000_hw     *hw;
1361         unsigned int size;
1362
1363         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1364
1365         /*
1366          * Validate number of receive descriptors.
1367          * It must not exceed hardware maximum, and must be multiple
1368          * of IGB_ALIGN.
1369          */
1370         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1371             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1372                 return (-EINVAL);
1373         }
1374
1375         /* Free memory prior to re-allocation if needed */
1376         if (dev->data->rx_queues[queue_idx] != NULL) {
1377                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1378                 dev->data->rx_queues[queue_idx] = NULL;
1379         }
1380
1381         /* First allocate the RX queue data structure. */
1382         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1383                           CACHE_LINE_SIZE);
1384         if (rxq == NULL)
1385                 return (-ENOMEM);
1386         rxq->mb_pool = mp;
1387         rxq->nb_rx_desc = nb_desc;
1388         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1389         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1390         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1391         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1392                 rxq->wthresh = 1;
1393         rxq->drop_en = rx_conf->rx_drop_en;
1394         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1395         rxq->queue_id = queue_idx;
1396         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1397                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1398         rxq->port_id = dev->data->port_id;
1399         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1400                                   ETHER_CRC_LEN);
1401
1402         /*
1403          *  Allocate RX ring hardware descriptors. A memzone large enough to
1404          *  handle the maximum ring size is allocated in order to allow for
1405          *  resizing in later calls to the queue setup function.
1406          */
1407         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1408         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1409         if (rz == NULL) {
1410                 igb_rx_queue_release(rxq);
1411                 return (-ENOMEM);
1412         }
1413         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1414         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1415 #ifndef RTE_LIBRTE_XEN_DOM0
1416         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1417 #else
1418         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1419 #endif
1420         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1421
1422         /* Allocate software ring. */
1423         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1424                                    sizeof(struct igb_rx_entry) * nb_desc,
1425                                    CACHE_LINE_SIZE);
1426         if (rxq->sw_ring == NULL) {
1427                 igb_rx_queue_release(rxq);
1428                 return (-ENOMEM);
1429         }
1430         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1431                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1432
1433         dev->data->rx_queues[queue_idx] = rxq;
1434         igb_reset_rx_queue(rxq);
1435
1436         return 0;
1437 }
1438
1439 uint32_t
1440 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1441 {
1442 #define IGB_RXQ_SCAN_INTERVAL 4
1443         volatile union e1000_adv_rx_desc *rxdp;
1444         struct igb_rx_queue *rxq;
1445         uint32_t desc = 0;
1446
1447         if (rx_queue_id >= dev->data->nb_rx_queues) {
1448                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1449                 return 0;
1450         }
1451
1452         rxq = dev->data->rx_queues[rx_queue_id];
1453         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1454
1455         while ((desc < rxq->nb_rx_desc) &&
1456                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1457                 desc += IGB_RXQ_SCAN_INTERVAL;
1458                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1459                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1460                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1461                                 desc - rxq->nb_rx_desc]);
1462         }
1463
1464         return 0;
1465 }
1466
1467 int
1468 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1469 {
1470         volatile union e1000_adv_rx_desc *rxdp;
1471         struct igb_rx_queue *rxq = rx_queue;
1472         uint32_t desc;
1473
1474         if (unlikely(offset >= rxq->nb_rx_desc))
1475                 return 0;
1476         desc = rxq->rx_tail + offset;
1477         if (desc >= rxq->nb_rx_desc)
1478                 desc -= rxq->nb_rx_desc;
1479
1480         rxdp = &rxq->rx_ring[desc];
1481         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1482 }
1483
1484 void
1485 igb_dev_clear_queues(struct rte_eth_dev *dev)
1486 {
1487         uint16_t i;
1488         struct igb_tx_queue *txq;
1489         struct igb_rx_queue *rxq;
1490
1491         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1492                 txq = dev->data->tx_queues[i];
1493                 if (txq != NULL) {
1494                         igb_tx_queue_release_mbufs(txq);
1495                         igb_reset_tx_queue(txq, dev);
1496                 }
1497         }
1498
1499         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1500                 rxq = dev->data->rx_queues[i];
1501                 if (rxq != NULL) {
1502                         igb_rx_queue_release_mbufs(rxq);
1503                         igb_reset_rx_queue(rxq);
1504                 }
1505         }
1506 }
1507
1508 /**
1509  * Receive Side Scaling (RSS).
1510  * See section 7.1.1.7 in the following document:
1511  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1512  *
1513  * Principles:
1514  * The source and destination IP addresses of the IP header and the source and
1515  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1516  * against a configurable random key to compute a 32-bit RSS hash result.
1517  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1518  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1519  * RSS output index which is used as the RX queue index where to store the
1520  * received packets.
1521  * The following output is supplied in the RX write-back descriptor:
1522  *     - 32-bit result of the Microsoft RSS hash function,
1523  *     - 4-bit RSS type field.
1524  */
1525
1526 /*
1527  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1528  * Used as the default key.
1529  */
1530 static uint8_t rss_intel_key[40] = {
1531         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1532         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1533         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1534         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1535         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1536 };
1537
1538 static void
1539 igb_rss_disable(struct rte_eth_dev *dev)
1540 {
1541         struct e1000_hw *hw;
1542         uint32_t mrqc;
1543
1544         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1546         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1547         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1548 }
1549
1550 static void
1551 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1552 {
1553         uint8_t  *hash_key;
1554         uint32_t rss_key;
1555         uint32_t mrqc;
1556         uint64_t rss_hf;
1557         uint16_t i;
1558
1559         hash_key = rss_conf->rss_key;
1560         if (hash_key != NULL) {
1561                 /* Fill in RSS hash key */
1562                 for (i = 0; i < 10; i++) {
1563                         rss_key  = hash_key[(i * 4)];
1564                         rss_key |= hash_key[(i * 4) + 1] << 8;
1565                         rss_key |= hash_key[(i * 4) + 2] << 16;
1566                         rss_key |= hash_key[(i * 4) + 3] << 24;
1567                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1568                 }
1569         }
1570
1571         /* Set configured hashing protocols in MRQC register */
1572         rss_hf = rss_conf->rss_hf;
1573         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1574         if (rss_hf & ETH_RSS_IPV4)
1575                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1576         if (rss_hf & ETH_RSS_IPV4_TCP)
1577                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1578         if (rss_hf & ETH_RSS_IPV6)
1579                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1580         if (rss_hf & ETH_RSS_IPV6_EX)
1581                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1582         if (rss_hf & ETH_RSS_IPV6_TCP)
1583                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1584         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1585                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1586         if (rss_hf & ETH_RSS_IPV4_UDP)
1587                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1588         if (rss_hf & ETH_RSS_IPV6_UDP)
1589                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1590         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1591                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1592         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1593 }
1594
1595 int
1596 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1597                         struct rte_eth_rss_conf *rss_conf)
1598 {
1599         struct e1000_hw *hw;
1600         uint32_t mrqc;
1601         uint64_t rss_hf;
1602
1603         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604
1605         /*
1606          * Before changing anything, first check that the update RSS operation
1607          * does not attempt to disable RSS, if RSS was enabled at
1608          * initialization time, or does not attempt to enable RSS, if RSS was
1609          * disabled at initialization time.
1610          */
1611         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1612         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1613         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1614                 if (rss_hf != 0) /* Enable RSS */
1615                         return -(EINVAL);
1616                 return 0; /* Nothing to do */
1617         }
1618         /* RSS enabled */
1619         if (rss_hf == 0) /* Disable RSS */
1620                 return -(EINVAL);
1621         igb_hw_rss_hash_set(hw, rss_conf);
1622         return 0;
1623 }
1624
1625 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1626                               struct rte_eth_rss_conf *rss_conf)
1627 {
1628         struct e1000_hw *hw;
1629         uint8_t *hash_key;
1630         uint32_t rss_key;
1631         uint32_t mrqc;
1632         uint64_t rss_hf;
1633         uint16_t i;
1634
1635         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636         hash_key = rss_conf->rss_key;
1637         if (hash_key != NULL) {
1638                 /* Return RSS hash key */
1639                 for (i = 0; i < 10; i++) {
1640                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1641                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1642                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1643                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1644                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1645                 }
1646         }
1647
1648         /* Get RSS functions configured in MRQC register */
1649         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1650         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1651                 rss_conf->rss_hf = 0;
1652                 return 0;
1653         }
1654         rss_hf = 0;
1655         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1656                 rss_hf |= ETH_RSS_IPV4;
1657         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1658                 rss_hf |= ETH_RSS_IPV4_TCP;
1659         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1660                 rss_hf |= ETH_RSS_IPV6;
1661         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1662                 rss_hf |= ETH_RSS_IPV6_EX;
1663         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1664                 rss_hf |= ETH_RSS_IPV6_TCP;
1665         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1666                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1667         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1668                 rss_hf |= ETH_RSS_IPV4_UDP;
1669         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1670                 rss_hf |= ETH_RSS_IPV6_UDP;
1671         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1672                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1673         rss_conf->rss_hf = rss_hf;
1674         return 0;
1675 }
1676
1677 static void
1678 igb_rss_configure(struct rte_eth_dev *dev)
1679 {
1680         struct rte_eth_rss_conf rss_conf;
1681         struct e1000_hw *hw;
1682         uint32_t shift;
1683         uint16_t i;
1684
1685         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686
1687         /* Fill in redirection table. */
1688         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1689         for (i = 0; i < 128; i++) {
1690                 union e1000_reta {
1691                         uint32_t dword;
1692                         uint8_t  bytes[4];
1693                 } reta;
1694                 uint8_t q_idx;
1695
1696                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1697                                    i % dev->data->nb_rx_queues : 0);
1698                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1699                 if ((i & 3) == 3)
1700                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1701         }
1702
1703         /*
1704          * Configure the RSS key and the RSS protocols used to compute
1705          * the RSS hash of input packets.
1706          */
1707         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1708         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1709                 igb_rss_disable(dev);
1710                 return;
1711         }
1712         if (rss_conf.rss_key == NULL)
1713                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1714         igb_hw_rss_hash_set(hw, &rss_conf);
1715 }
1716
1717 /*
1718  * Check if the mac type support VMDq or not.
1719  * Return 1 if it supports, otherwise, return 0.
1720  */
1721 static int
1722 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1723 {
1724         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1725
1726         switch (hw->mac.type) {
1727         case e1000_82576:
1728         case e1000_82580:
1729         case e1000_i350:
1730                 return 1;
1731         case e1000_82540:
1732         case e1000_82541:
1733         case e1000_82542:
1734         case e1000_82543:
1735         case e1000_82544:
1736         case e1000_82545:
1737         case e1000_82546:
1738         case e1000_82547:
1739         case e1000_82571:
1740         case e1000_82572:
1741         case e1000_82573:
1742         case e1000_82574:
1743         case e1000_82583:
1744         case e1000_i210:
1745         case e1000_i211:
1746         default:
1747                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1748                 return 0;
1749         }
1750 }
1751
1752 static int
1753 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1754 {
1755         struct rte_eth_vmdq_rx_conf *cfg;
1756         struct e1000_hw *hw;
1757         uint32_t mrqc, vt_ctl, vmolr, rctl;
1758         int i;
1759
1760         PMD_INIT_FUNC_TRACE();
1761
1762         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1763         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1764
1765         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1766         if (igb_is_vmdq_supported(dev) == 0)
1767                 return -1;
1768
1769         igb_rss_disable(dev);
1770
1771         /* RCTL: eanble VLAN filter */
1772         rctl = E1000_READ_REG(hw, E1000_RCTL);
1773         rctl |= E1000_RCTL_VFE;
1774         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1775
1776         /* MRQC: enable vmdq */
1777         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1778         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1779         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1780
1781         /* VTCTL:  pool selection according to VLAN tag */
1782         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1783         if (cfg->enable_default_pool)
1784                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1785         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1786         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1787
1788         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1789                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1790                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1791                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1792                         E1000_VMOLR_MPME);
1793
1794                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1795                         vmolr |= E1000_VMOLR_AUPE;
1796                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1797                         vmolr |= E1000_VMOLR_ROMPE;
1798                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1799                         vmolr |= E1000_VMOLR_ROPE;
1800                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1801                         vmolr |= E1000_VMOLR_BAM;
1802                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1803                         vmolr |= E1000_VMOLR_MPME;
1804
1805                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1806         }
1807
1808         /*
1809          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1810          * Both 82576 and 82580 support it
1811          */
1812         if (hw->mac.type != e1000_i350) {
1813                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1814                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1815                         vmolr |= E1000_VMOLR_STRVLAN;
1816                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1817                 }
1818         }
1819
1820         /* VFTA - enable all vlan filters */
1821         for (i = 0; i < IGB_VFTA_SIZE; i++)
1822                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1823
1824         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1825         if (hw->mac.type != e1000_82580)
1826                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1827
1828         /*
1829          * RAH/RAL - allow pools to read specific mac addresses
1830          * In this case, all pools should be able to read from mac addr 0
1831          */
1832         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1833         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1834
1835         /* VLVF: set up filters for vlan tags as configured */
1836         for (i = 0; i < cfg->nb_pool_maps; i++) {
1837                 /* set vlan id in VF register and set the valid bit */
1838                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1839                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1840                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1841                         E1000_VLVF_POOLSEL_MASK)));
1842         }
1843
1844         E1000_WRITE_FLUSH(hw);
1845
1846         return 0;
1847 }
1848
1849
1850 /*********************************************************************
1851  *
1852  *  Enable receive unit.
1853  *
1854  **********************************************************************/
1855
1856 static int
1857 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1858 {
1859         struct igb_rx_entry *rxe = rxq->sw_ring;
1860         uint64_t dma_addr;
1861         unsigned i;
1862
1863         /* Initialize software ring entries. */
1864         for (i = 0; i < rxq->nb_rx_desc; i++) {
1865                 volatile union e1000_adv_rx_desc *rxd;
1866                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1867
1868                 if (mbuf == NULL) {
1869                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1870                                      "queue_id=%hu", rxq->queue_id);
1871                         return (-ENOMEM);
1872                 }
1873                 dma_addr =
1874                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1875                 rxd = &rxq->rx_ring[i];
1876                 rxd->read.hdr_addr = dma_addr;
1877                 rxd->read.pkt_addr = dma_addr;
1878                 rxe[i].mbuf = mbuf;
1879         }
1880
1881         return 0;
1882 }
1883
1884 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1885 static int
1886 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1887 {
1888         struct e1000_hw *hw =
1889                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1890         uint32_t mrqc;
1891
1892         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1893                 /*
1894                  * SRIOV active scheme
1895                  * FIXME if support RSS together with VMDq & SRIOV
1896                  */
1897                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1898                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1899                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1900                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1901         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1902                 /*
1903                  * SRIOV inactive scheme
1904                  */
1905                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1906                         case ETH_MQ_RX_RSS:
1907                                 igb_rss_configure(dev);
1908                                 break;
1909                         case ETH_MQ_RX_VMDQ_ONLY:
1910                                 /*Configure general VMDQ only RX parameters*/
1911                                 igb_vmdq_rx_hw_configure(dev);
1912                                 break;
1913                         case ETH_MQ_RX_NONE:
1914                                 /* if mq_mode is none, disable rss mode.*/
1915                         default:
1916                                 igb_rss_disable(dev);
1917                                 break;
1918                 }
1919         }
1920
1921         return 0;
1922 }
1923
1924 int
1925 eth_igb_rx_init(struct rte_eth_dev *dev)
1926 {
1927         struct e1000_hw     *hw;
1928         struct igb_rx_queue *rxq;
1929         struct rte_pktmbuf_pool_private *mbp_priv;
1930         uint32_t rctl;
1931         uint32_t rxcsum;
1932         uint32_t srrctl;
1933         uint16_t buf_size;
1934         uint16_t rctl_bsize;
1935         uint16_t i;
1936         int ret;
1937
1938         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1939         srrctl = 0;
1940
1941         /*
1942          * Make sure receives are disabled while setting
1943          * up the descriptor ring.
1944          */
1945         rctl = E1000_READ_REG(hw, E1000_RCTL);
1946         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1947
1948         /*
1949          * Configure support of jumbo frames, if any.
1950          */
1951         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1952                 rctl |= E1000_RCTL_LPE;
1953
1954                 /*
1955                  * Set maximum packet length by default, and might be updated
1956                  * together with enabling/disabling dual VLAN.
1957                  */
1958                 E1000_WRITE_REG(hw, E1000_RLPML,
1959                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1960                                                 VLAN_TAG_SIZE);
1961         } else
1962                 rctl &= ~E1000_RCTL_LPE;
1963
1964         /* Configure and enable each RX queue. */
1965         rctl_bsize = 0;
1966         dev->rx_pkt_burst = eth_igb_recv_pkts;
1967         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1968                 uint64_t bus_addr;
1969                 uint32_t rxdctl;
1970
1971                 rxq = dev->data->rx_queues[i];
1972
1973                 /* Allocate buffers for descriptor rings and set up queue */
1974                 ret = igb_alloc_rx_queue_mbufs(rxq);
1975                 if (ret)
1976                         return ret;
1977
1978                 /*
1979                  * Reset crc_len in case it was changed after queue setup by a
1980                  *  call to configure
1981                  */
1982                 rxq->crc_len =
1983                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1984                                                         0 : ETHER_CRC_LEN);
1985
1986                 bus_addr = rxq->rx_ring_phys_addr;
1987                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1988                                 rxq->nb_rx_desc *
1989                                 sizeof(union e1000_adv_rx_desc));
1990                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1991                                 (uint32_t)(bus_addr >> 32));
1992                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1993
1994                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1995
1996                 /*
1997                  * Configure RX buffer size.
1998                  */
1999                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2000                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2001                                        RTE_PKTMBUF_HEADROOM);
2002                 if (buf_size >= 1024) {
2003                         /*
2004                          * Configure the BSIZEPACKET field of the SRRCTL
2005                          * register of the queue.
2006                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2007                          * If this field is equal to 0b, then RCTL.BSIZE
2008                          * determines the RX packet buffer size.
2009                          */
2010                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2011                                    E1000_SRRCTL_BSIZEPKT_MASK);
2012                         buf_size = (uint16_t) ((srrctl &
2013                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2014                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2015
2016                         /* It adds dual VLAN length for supporting dual VLAN */
2017                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2018                                                 2 * VLAN_TAG_SIZE) > buf_size){
2019                                 if (!dev->data->scattered_rx)
2020                                         PMD_INIT_LOG(DEBUG,
2021                                                      "forcing scatter mode");
2022                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2023                                 dev->data->scattered_rx = 1;
2024                         }
2025                 } else {
2026                         /*
2027                          * Use BSIZE field of the device RCTL register.
2028                          */
2029                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2030                                 rctl_bsize = buf_size;
2031                         if (!dev->data->scattered_rx)
2032                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2033                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2034                         dev->data->scattered_rx = 1;
2035                 }
2036
2037                 /* Set if packets are dropped when no descriptors available */
2038                 if (rxq->drop_en)
2039                         srrctl |= E1000_SRRCTL_DROP_EN;
2040
2041                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2042
2043                 /* Enable this RX queue. */
2044                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2045                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2046                 rxdctl &= 0xFFF00000;
2047                 rxdctl |= (rxq->pthresh & 0x1F);
2048                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2049                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2050                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2051         }
2052
2053         if (dev->data->dev_conf.rxmode.enable_scatter) {
2054                 if (!dev->data->scattered_rx)
2055                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2056                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2057                 dev->data->scattered_rx = 1;
2058         }
2059
2060         /*
2061          * Setup BSIZE field of RCTL register, if needed.
2062          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2063          * register, since the code above configures the SRRCTL register of
2064          * the RX queue in such a case.
2065          * All configurable sizes are:
2066          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2067          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2068          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2069          *  2048: rctl |= E1000_RCTL_SZ_2048;
2070          *  1024: rctl |= E1000_RCTL_SZ_1024;
2071          *   512: rctl |= E1000_RCTL_SZ_512;
2072          *   256: rctl |= E1000_RCTL_SZ_256;
2073          */
2074         if (rctl_bsize > 0) {
2075                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2076                         rctl |= E1000_RCTL_SZ_512;
2077                 else /* 256 <= buf_size < 512 - use 256 */
2078                         rctl |= E1000_RCTL_SZ_256;
2079         }
2080
2081         /*
2082          * Configure RSS if device configured with multiple RX queues.
2083          */
2084         igb_dev_mq_rx_configure(dev);
2085
2086         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2087         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2088
2089         /*
2090          * Setup the Checksum Register.
2091          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2092          */
2093         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2094         rxcsum |= E1000_RXCSUM_PCSD;
2095
2096         /* Enable both L3/L4 rx checksum offload */
2097         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2098                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2099         else
2100                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2101         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2102
2103         /* Setup the Receive Control Register. */
2104         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2105                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2106
2107                 /* set STRCRC bit in all queues */
2108                 if (hw->mac.type == e1000_i350 ||
2109                     hw->mac.type == e1000_i210 ||
2110                     hw->mac.type == e1000_i211 ||
2111                     hw->mac.type == e1000_i354) {
2112                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2113                                 rxq = dev->data->rx_queues[i];
2114                                 uint32_t dvmolr = E1000_READ_REG(hw,
2115                                         E1000_DVMOLR(rxq->reg_idx));
2116                                 dvmolr |= E1000_DVMOLR_STRCRC;
2117                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2118                         }
2119                 }
2120         } else {
2121                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2122
2123                 /* clear STRCRC bit in all queues */
2124                 if (hw->mac.type == e1000_i350 ||
2125                     hw->mac.type == e1000_i210 ||
2126                     hw->mac.type == e1000_i211 ||
2127                     hw->mac.type == e1000_i354) {
2128                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2129                                 rxq = dev->data->rx_queues[i];
2130                                 uint32_t dvmolr = E1000_READ_REG(hw,
2131                                         E1000_DVMOLR(rxq->reg_idx));
2132                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2133                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2134                         }
2135                 }
2136         }
2137
2138         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2139         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2140                 E1000_RCTL_RDMTS_HALF |
2141                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2142
2143         /* Make sure VLAN Filters are off. */
2144         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2145                 rctl &= ~E1000_RCTL_VFE;
2146         /* Don't store bad packets. */
2147         rctl &= ~E1000_RCTL_SBP;
2148
2149         /* Enable Receives. */
2150         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2151
2152         /*
2153          * Setup the HW Rx Head and Tail Descriptor Pointers.
2154          * This needs to be done after enable.
2155          */
2156         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2157                 rxq = dev->data->rx_queues[i];
2158                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2159                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2160         }
2161
2162         return 0;
2163 }
2164
2165 /*********************************************************************
2166  *
2167  *  Enable transmit unit.
2168  *
2169  **********************************************************************/
2170 void
2171 eth_igb_tx_init(struct rte_eth_dev *dev)
2172 {
2173         struct e1000_hw     *hw;
2174         struct igb_tx_queue *txq;
2175         uint32_t tctl;
2176         uint32_t txdctl;
2177         uint16_t i;
2178
2179         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180
2181         /* Setup the Base and Length of the Tx Descriptor Rings. */
2182         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2183                 uint64_t bus_addr;
2184                 txq = dev->data->tx_queues[i];
2185                 bus_addr = txq->tx_ring_phys_addr;
2186
2187                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2188                                 txq->nb_tx_desc *
2189                                 sizeof(union e1000_adv_tx_desc));
2190                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2191                                 (uint32_t)(bus_addr >> 32));
2192                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2193
2194                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2195                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2196                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2197
2198                 /* Setup Transmit threshold registers. */
2199                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2200                 txdctl |= txq->pthresh & 0x1F;
2201                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2202                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2203                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2204                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2205         }
2206
2207         /* Program the Transmit Control Register. */
2208         tctl = E1000_READ_REG(hw, E1000_TCTL);
2209         tctl &= ~E1000_TCTL_CT;
2210         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2211                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2212
2213         e1000_config_collision_dist(hw);
2214
2215         /* This write will effectively turn on the transmit unit. */
2216         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2217 }
2218
2219 /*********************************************************************
2220  *
2221  *  Enable VF receive unit.
2222  *
2223  **********************************************************************/
2224 int
2225 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2226 {
2227         struct e1000_hw     *hw;
2228         struct igb_rx_queue *rxq;
2229         struct rte_pktmbuf_pool_private *mbp_priv;
2230         uint32_t srrctl;
2231         uint16_t buf_size;
2232         uint16_t rctl_bsize;
2233         uint16_t i;
2234         int ret;
2235
2236         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2237
2238         /* setup MTU */
2239         e1000_rlpml_set_vf(hw,
2240                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2241                 VLAN_TAG_SIZE));
2242
2243         /* Configure and enable each RX queue. */
2244         rctl_bsize = 0;
2245         dev->rx_pkt_burst = eth_igb_recv_pkts;
2246         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2247                 uint64_t bus_addr;
2248                 uint32_t rxdctl;
2249
2250                 rxq = dev->data->rx_queues[i];
2251
2252                 /* Allocate buffers for descriptor rings and set up queue */
2253                 ret = igb_alloc_rx_queue_mbufs(rxq);
2254                 if (ret)
2255                         return ret;
2256
2257                 bus_addr = rxq->rx_ring_phys_addr;
2258                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2259                                 rxq->nb_rx_desc *
2260                                 sizeof(union e1000_adv_rx_desc));
2261                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2262                                 (uint32_t)(bus_addr >> 32));
2263                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2264
2265                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2266
2267                 /*
2268                  * Configure RX buffer size.
2269                  */
2270                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2271                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2272                                        RTE_PKTMBUF_HEADROOM);
2273                 if (buf_size >= 1024) {
2274                         /*
2275                          * Configure the BSIZEPACKET field of the SRRCTL
2276                          * register of the queue.
2277                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2278                          * If this field is equal to 0b, then RCTL.BSIZE
2279                          * determines the RX packet buffer size.
2280                          */
2281                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2282                                    E1000_SRRCTL_BSIZEPKT_MASK);
2283                         buf_size = (uint16_t) ((srrctl &
2284                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2285                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2286
2287                         /* It adds dual VLAN length for supporting dual VLAN */
2288                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2289                                                 2 * VLAN_TAG_SIZE) > buf_size){
2290                                 if (!dev->data->scattered_rx)
2291                                         PMD_INIT_LOG(DEBUG,
2292                                                      "forcing scatter mode");
2293                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2294                                 dev->data->scattered_rx = 1;
2295                         }
2296                 } else {
2297                         /*
2298                          * Use BSIZE field of the device RCTL register.
2299                          */
2300                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2301                                 rctl_bsize = buf_size;
2302                         if (!dev->data->scattered_rx)
2303                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2304                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2305                         dev->data->scattered_rx = 1;
2306                 }
2307
2308                 /* Set if packets are dropped when no descriptors available */
2309                 if (rxq->drop_en)
2310                         srrctl |= E1000_SRRCTL_DROP_EN;
2311
2312                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2313
2314                 /* Enable this RX queue. */
2315                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2316                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2317                 rxdctl &= 0xFFF00000;
2318                 rxdctl |= (rxq->pthresh & 0x1F);
2319                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2320                 if (hw->mac.type == e1000_vfadapt) {
2321                         /*
2322                          * Workaround of 82576 VF Erratum
2323                          * force set WTHRESH to 1
2324                          * to avoid Write-Back not triggered sometimes
2325                          */
2326                         rxdctl |= 0x10000;
2327                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2328                 }
2329                 else
2330                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2331                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2332         }
2333
2334         if (dev->data->dev_conf.rxmode.enable_scatter) {
2335                 if (!dev->data->scattered_rx)
2336                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2337                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2338                 dev->data->scattered_rx = 1;
2339         }
2340
2341         /*
2342          * Setup the HW Rx Head and Tail Descriptor Pointers.
2343          * This needs to be done after enable.
2344          */
2345         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2346                 rxq = dev->data->rx_queues[i];
2347                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2348                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2349         }
2350
2351         return 0;
2352 }
2353
2354 /*********************************************************************
2355  *
2356  *  Enable VF transmit unit.
2357  *
2358  **********************************************************************/
2359 void
2360 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2361 {
2362         struct e1000_hw     *hw;
2363         struct igb_tx_queue *txq;
2364         uint32_t txdctl;
2365         uint16_t i;
2366
2367         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2368
2369         /* Setup the Base and Length of the Tx Descriptor Rings. */
2370         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2371                 uint64_t bus_addr;
2372
2373                 txq = dev->data->tx_queues[i];
2374                 bus_addr = txq->tx_ring_phys_addr;
2375                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2376                                 txq->nb_tx_desc *
2377                                 sizeof(union e1000_adv_tx_desc));
2378                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2379                                 (uint32_t)(bus_addr >> 32));
2380                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2381
2382                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2383                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2384                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2385
2386                 /* Setup Transmit threshold registers. */
2387                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2388                 txdctl |= txq->pthresh & 0x1F;
2389                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2390                 if (hw->mac.type == e1000_82576) {
2391                         /*
2392                          * Workaround of 82576 VF Erratum
2393                          * force set WTHRESH to 1
2394                          * to avoid Write-Back not triggered sometimes
2395                          */
2396                         txdctl |= 0x10000;
2397                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2398                 }
2399                 else
2400                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2401                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2402                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2403         }
2404
2405 }
2406