e1000: get queue info and descriptor limits
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK |                 \
80                 PKT_TX_TCP_SEG)
81
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
84 {
85         struct rte_mbuf *m;
86
87         m = __rte_mbuf_raw_alloc(mp);
88         __rte_mbuf_sanity_check_raw(m, 0);
89         return (m);
90 }
91
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
94
95 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
96         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97
98 /**
99  * Structure associated with each descriptor of the RX ring of a RX queue.
100  */
101 struct igb_rx_entry {
102         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
103 };
104
105 /**
106  * Structure associated with each descriptor of the TX ring of a TX queue.
107  */
108 struct igb_tx_entry {
109         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
110         uint16_t next_id; /**< Index of next descriptor in ring. */
111         uint16_t last_id; /**< Index of last scattered descriptor. */
112 };
113
114 /**
115  * Structure associated with each RX queue.
116  */
117 struct igb_rx_queue {
118         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
119         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
120         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
121         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
122         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
123         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
124         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
125         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
126         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
127         uint16_t            rx_tail;    /**< current value of RDT register. */
128         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
129         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
130         uint16_t            queue_id;   /**< RX queue index. */
131         uint16_t            reg_idx;    /**< RX queue register index. */
132         uint8_t             port_id;    /**< Device port identifier. */
133         uint8_t             pthresh;    /**< Prefetch threshold register. */
134         uint8_t             hthresh;    /**< Host threshold register. */
135         uint8_t             wthresh;    /**< Write-back threshold register. */
136         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
137         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
138 };
139
140 /**
141  * Hardware context number
142  */
143 enum igb_advctx_num {
144         IGB_CTX_0    = 0, /**< CTX0    */
145         IGB_CTX_1    = 1, /**< CTX1    */
146         IGB_CTX_NUM  = 2, /**< CTX_NUM */
147 };
148
149 /** Offload features */
150 union igb_tx_offload {
151         uint64_t data;
152         struct {
153                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
154                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
155                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
156                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
157                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
158
159                 /* uint64_t unused:8; */
160         };
161 };
162
163 /*
164  * Compare mask for igb_tx_offload.data,
165  * should be in sync with igb_tx_offload layout.
166  * */
167 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
168 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
169 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
170 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
171 /** Mac + IP + TCP + Mss mask. */
172 #define TX_TSO_CMP_MASK \
173         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
174
175 /**
176  * Strucutre to check if new context need be built
177  */
178 struct igb_advctx_info {
179         uint64_t flags;           /**< ol_flags related to context build. */
180         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
181         union igb_tx_offload tx_offload;
182         /** compare mask for tx offload. */
183         union igb_tx_offload tx_offload_mask;
184 };
185
186 /**
187  * Structure associated with each TX queue.
188  */
189 struct igb_tx_queue {
190         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
191         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
192         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
193         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
194         uint32_t               txd_type;      /**< Device-specific TXD type */
195         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
196         uint16_t               tx_tail; /**< Current value of TDT register. */
197         uint16_t               tx_head;
198         /**< Index of first used TX descriptor. */
199         uint16_t               queue_id; /**< TX queue index. */
200         uint16_t               reg_idx;  /**< TX queue register index. */
201         uint8_t                port_id;  /**< Device port identifier. */
202         uint8_t                pthresh;  /**< Prefetch threshold register. */
203         uint8_t                hthresh;  /**< Host threshold register. */
204         uint8_t                wthresh;  /**< Write-back threshold register. */
205         uint32_t               ctx_curr;
206         /**< Current used hardware descriptor. */
207         uint32_t               ctx_start;
208         /**< Start context position for transmit queue. */
209         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
210         /**< Hardware context history.*/
211 };
212
213 #if 1
214 #define RTE_PMD_USE_PREFETCH
215 #endif
216
217 #ifdef RTE_PMD_USE_PREFETCH
218 #define rte_igb_prefetch(p)     rte_prefetch0(p)
219 #else
220 #define rte_igb_prefetch(p)     do {} while(0)
221 #endif
222
223 #ifdef RTE_PMD_PACKET_PREFETCH
224 #define rte_packet_prefetch(p) rte_prefetch1(p)
225 #else
226 #define rte_packet_prefetch(p)  do {} while(0)
227 #endif
228
229 /*
230  * Macro for VMDq feature for 1 GbE NIC.
231  */
232 #define E1000_VMOLR_SIZE                        (8)
233 #define IGB_TSO_MAX_HDRLEN                      (512)
234 #define IGB_TSO_MAX_MSS                         (9216)
235
236 /*********************************************************************
237  *
238  *  TX function
239  *
240  **********************************************************************/
241
242 /*
243  *There're some limitations in hardware for TCP segmentation offload. We
244  *should check whether the parameters are valid.
245  */
246 static inline uint64_t
247 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
248 {
249         if (!(ol_req & PKT_TX_TCP_SEG))
250                 return ol_req;
251         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
252                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
253                 ol_req &= ~PKT_TX_TCP_SEG;
254                 ol_req |= PKT_TX_TCP_CKSUM;
255         }
256         return ol_req;
257 }
258
259 /*
260  * Advanced context descriptor are almost same between igb/ixgbe
261  * This is a separate function, looking for optimization opportunity here
262  * Rework required to go with the pre-defined values.
263  */
264
265 static inline void
266 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
267                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
268                 uint64_t ol_flags, union igb_tx_offload tx_offload)
269 {
270         uint32_t type_tucmd_mlhl;
271         uint32_t mss_l4len_idx;
272         uint32_t ctx_idx, ctx_curr;
273         uint32_t vlan_macip_lens;
274         union igb_tx_offload tx_offload_mask;
275
276         ctx_curr = txq->ctx_curr;
277         ctx_idx = ctx_curr + txq->ctx_start;
278
279         tx_offload_mask.data = 0;
280         type_tucmd_mlhl = 0;
281
282         /* Specify which HW CTX to upload. */
283         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
284
285         if (ol_flags & PKT_TX_VLAN_PKT)
286                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
287
288         /* check if TCP segmentation required for this packet */
289         if (ol_flags & PKT_TX_TCP_SEG) {
290                 /* implies IP cksum in IPv4 */
291                 if (ol_flags & PKT_TX_IP_CKSUM)
292                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
293                                 E1000_ADVTXD_TUCMD_L4T_TCP |
294                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
295                 else
296                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
297                                 E1000_ADVTXD_TUCMD_L4T_TCP |
298                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
299
300                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
301                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
302                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
303         } else { /* no TSO, check if hardware checksum is needed */
304                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
305                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
306
307                 if (ol_flags & PKT_TX_IP_CKSUM)
308                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
309
310                 switch (ol_flags & PKT_TX_L4_MASK) {
311                 case PKT_TX_UDP_CKSUM:
312                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
313                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
314                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
315                         break;
316                 case PKT_TX_TCP_CKSUM:
317                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
318                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
319                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
320                         break;
321                 case PKT_TX_SCTP_CKSUM:
322                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
323                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
324                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
325                         break;
326                 default:
327                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
328                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
329                         break;
330                 }
331         }
332
333         txq->ctx_cache[ctx_curr].flags = ol_flags;
334         txq->ctx_cache[ctx_idx].tx_offload.data =
335                 tx_offload_mask.data & tx_offload.data;
336         txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
337
338         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
339         vlan_macip_lens = (uint32_t)tx_offload.data;
340         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
341         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
342         ctx_txd->seqnum_seed = 0;
343 }
344
345 /*
346  * Check which hardware context can be used. Use the existing match
347  * or create a new context descriptor.
348  */
349 static inline uint32_t
350 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
351                 union igb_tx_offload tx_offload)
352 {
353         /* If match with the current context */
354         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
355                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
356                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
357                         return txq->ctx_curr;
358         }
359
360         /* If match with the second context */
361         txq->ctx_curr ^= 1;
362         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
363                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
364                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
365                         return txq->ctx_curr;
366         }
367
368         /* Mismatch, use the previous context */
369         return (IGB_CTX_NUM);
370 }
371
372 static inline uint32_t
373 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
374 {
375         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
376         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
377         uint32_t tmp;
378
379         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
380         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
381         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
382         return tmp;
383 }
384
385 static inline uint32_t
386 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
387 {
388         uint32_t cmdtype;
389         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
390         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
391         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
392         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
393         return cmdtype;
394 }
395
396 uint16_t
397 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
398                uint16_t nb_pkts)
399 {
400         struct igb_tx_queue *txq;
401         struct igb_tx_entry *sw_ring;
402         struct igb_tx_entry *txe, *txn;
403         volatile union e1000_adv_tx_desc *txr;
404         volatile union e1000_adv_tx_desc *txd;
405         struct rte_mbuf     *tx_pkt;
406         struct rte_mbuf     *m_seg;
407         uint64_t buf_dma_addr;
408         uint32_t olinfo_status;
409         uint32_t cmd_type_len;
410         uint32_t pkt_len;
411         uint16_t slen;
412         uint64_t ol_flags;
413         uint16_t tx_end;
414         uint16_t tx_id;
415         uint16_t tx_last;
416         uint16_t nb_tx;
417         uint64_t tx_ol_req;
418         uint32_t new_ctx = 0;
419         uint32_t ctx = 0;
420         union igb_tx_offload tx_offload = {0};
421
422         txq = tx_queue;
423         sw_ring = txq->sw_ring;
424         txr     = txq->tx_ring;
425         tx_id   = txq->tx_tail;
426         txe = &sw_ring[tx_id];
427
428         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
429                 tx_pkt = *tx_pkts++;
430                 pkt_len = tx_pkt->pkt_len;
431
432                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
433
434                 /*
435                  * The number of descriptors that must be allocated for a
436                  * packet is the number of segments of that packet, plus 1
437                  * Context Descriptor for the VLAN Tag Identifier, if any.
438                  * Determine the last TX descriptor to allocate in the TX ring
439                  * for the packet, starting from the current position (tx_id)
440                  * in the ring.
441                  */
442                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
443
444                 ol_flags = tx_pkt->ol_flags;
445                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
446
447                 /* If a Context Descriptor need be built . */
448                 if (tx_ol_req) {
449                         tx_offload.l2_len = tx_pkt->l2_len;
450                         tx_offload.l3_len = tx_pkt->l3_len;
451                         tx_offload.l4_len = tx_pkt->l4_len;
452                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
453                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
454                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
455
456                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
457                         /* Only allocate context descriptor if required*/
458                         new_ctx = (ctx == IGB_CTX_NUM);
459                         ctx = txq->ctx_curr;
460                         tx_last = (uint16_t) (tx_last + new_ctx);
461                 }
462                 if (tx_last >= txq->nb_tx_desc)
463                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
464
465                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
466                            " tx_first=%u tx_last=%u",
467                            (unsigned) txq->port_id,
468                            (unsigned) txq->queue_id,
469                            (unsigned) pkt_len,
470                            (unsigned) tx_id,
471                            (unsigned) tx_last);
472
473                 /*
474                  * Check if there are enough free descriptors in the TX ring
475                  * to transmit the next packet.
476                  * This operation is based on the two following rules:
477                  *
478                  *   1- Only check that the last needed TX descriptor can be
479                  *      allocated (by construction, if that descriptor is free,
480                  *      all intermediate ones are also free).
481                  *
482                  *      For this purpose, the index of the last TX descriptor
483                  *      used for a packet (the "last descriptor" of a packet)
484                  *      is recorded in the TX entries (the last one included)
485                  *      that are associated with all TX descriptors allocated
486                  *      for that packet.
487                  *
488                  *   2- Avoid to allocate the last free TX descriptor of the
489                  *      ring, in order to never set the TDT register with the
490                  *      same value stored in parallel by the NIC in the TDH
491                  *      register, which makes the TX engine of the NIC enter
492                  *      in a deadlock situation.
493                  *
494                  *      By extension, avoid to allocate a free descriptor that
495                  *      belongs to the last set of free descriptors allocated
496                  *      to the same packet previously transmitted.
497                  */
498
499                 /*
500                  * The "last descriptor" of the previously sent packet, if any,
501                  * which used the last descriptor to allocate.
502                  */
503                 tx_end = sw_ring[tx_last].last_id;
504
505                 /*
506                  * The next descriptor following that "last descriptor" in the
507                  * ring.
508                  */
509                 tx_end = sw_ring[tx_end].next_id;
510
511                 /*
512                  * The "last descriptor" associated with that next descriptor.
513                  */
514                 tx_end = sw_ring[tx_end].last_id;
515
516                 /*
517                  * Check that this descriptor is free.
518                  */
519                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
520                         if (nb_tx == 0)
521                                 return (0);
522                         goto end_of_tx;
523                 }
524
525                 /*
526                  * Set common flags of all TX Data Descriptors.
527                  *
528                  * The following bits must be set in all Data Descriptors:
529                  *   - E1000_ADVTXD_DTYP_DATA
530                  *   - E1000_ADVTXD_DCMD_DEXT
531                  *
532                  * The following bits must be set in the first Data Descriptor
533                  * and are ignored in the other ones:
534                  *   - E1000_ADVTXD_DCMD_IFCS
535                  *   - E1000_ADVTXD_MAC_1588
536                  *   - E1000_ADVTXD_DCMD_VLE
537                  *
538                  * The following bits must only be set in the last Data
539                  * Descriptor:
540                  *   - E1000_TXD_CMD_EOP
541                  *
542                  * The following bits can be set in any Data Descriptor, but
543                  * are only set in the last Data Descriptor:
544                  *   - E1000_TXD_CMD_RS
545                  */
546                 cmd_type_len = txq->txd_type |
547                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
548                 if (tx_ol_req & PKT_TX_TCP_SEG)
549                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
550                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
551 #if defined(RTE_LIBRTE_IEEE1588)
552                 if (ol_flags & PKT_TX_IEEE1588_TMST)
553                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
554 #endif
555                 if (tx_ol_req) {
556                         /* Setup TX Advanced context descriptor if required */
557                         if (new_ctx) {
558                                 volatile struct e1000_adv_tx_context_desc *
559                                     ctx_txd;
560
561                                 ctx_txd = (volatile struct
562                                     e1000_adv_tx_context_desc *)
563                                     &txr[tx_id];
564
565                                 txn = &sw_ring[txe->next_id];
566                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
567
568                                 if (txe->mbuf != NULL) {
569                                         rte_pktmbuf_free_seg(txe->mbuf);
570                                         txe->mbuf = NULL;
571                                 }
572
573                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
574
575                                 txe->last_id = tx_last;
576                                 tx_id = txe->next_id;
577                                 txe = txn;
578                         }
579
580                         /* Setup the TX Advanced Data Descriptor */
581                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
582                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
583                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
584                 }
585
586                 m_seg = tx_pkt;
587                 do {
588                         txn = &sw_ring[txe->next_id];
589                         txd = &txr[tx_id];
590
591                         if (txe->mbuf != NULL)
592                                 rte_pktmbuf_free_seg(txe->mbuf);
593                         txe->mbuf = m_seg;
594
595                         /*
596                          * Set up transmit descriptor.
597                          */
598                         slen = (uint16_t) m_seg->data_len;
599                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
600                         txd->read.buffer_addr =
601                                 rte_cpu_to_le_64(buf_dma_addr);
602                         txd->read.cmd_type_len =
603                                 rte_cpu_to_le_32(cmd_type_len | slen);
604                         txd->read.olinfo_status =
605                                 rte_cpu_to_le_32(olinfo_status);
606                         txe->last_id = tx_last;
607                         tx_id = txe->next_id;
608                         txe = txn;
609                         m_seg = m_seg->next;
610                 } while (m_seg != NULL);
611
612                 /*
613                  * The last packet data descriptor needs End Of Packet (EOP)
614                  * and Report Status (RS).
615                  */
616                 txd->read.cmd_type_len |=
617                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
618         }
619  end_of_tx:
620         rte_wmb();
621
622         /*
623          * Set the Transmit Descriptor Tail (TDT).
624          */
625         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
626         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
627                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
628                    (unsigned) tx_id, (unsigned) nb_tx);
629         txq->tx_tail = tx_id;
630
631         return (nb_tx);
632 }
633
634 /*********************************************************************
635  *
636  *  RX functions
637  *
638  **********************************************************************/
639 #define IGB_PACKET_TYPE_IPV4              0X01
640 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
641 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
642 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
643 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
644 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
645 #define IGB_PACKET_TYPE_IPV6              0X04
646 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
647 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
648 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
649 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
650 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
651 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
652 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
653 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
654 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
655 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
656 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
657 #define IGB_PACKET_TYPE_MAX               0X80
658 #define IGB_PACKET_TYPE_MASK              0X7F
659 #define IGB_PACKET_TYPE_SHIFT             0X04
660 static inline uint32_t
661 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
662 {
663         static const uint32_t
664                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
665                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
666                         RTE_PTYPE_L3_IPV4,
667                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
668                         RTE_PTYPE_L3_IPV4_EXT,
669                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
670                         RTE_PTYPE_L3_IPV6,
671                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
672                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673                         RTE_PTYPE_INNER_L3_IPV6,
674                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
675                         RTE_PTYPE_L3_IPV6_EXT,
676                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
677                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
678                         RTE_PTYPE_INNER_L3_IPV6_EXT,
679                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
680                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
681                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
682                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
683                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
684                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
686                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
687                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
688                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
689                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
690                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
691                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
692                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
693                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
694                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
695                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
696                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
698                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
700                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
703                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
705                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
706                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
707         };
708         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
709                 return RTE_PTYPE_UNKNOWN;
710
711         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
712
713         return ptype_table[pkt_info];
714 }
715
716 static inline uint64_t
717 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
718 {
719         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
720
721 #if defined(RTE_LIBRTE_IEEE1588)
722         static uint32_t ip_pkt_etqf_map[8] = {
723                 0, 0, 0, PKT_RX_IEEE1588_PTP,
724                 0, 0, 0, 0,
725         };
726
727         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
728         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
729
730         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
731         if (hw->mac.type == e1000_i210)
732                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
733         else
734                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
735 #else
736         RTE_SET_USED(rxq);
737 #endif
738
739         return pkt_flags;
740 }
741
742 static inline uint64_t
743 rx_desc_status_to_pkt_flags(uint32_t rx_status)
744 {
745         uint64_t pkt_flags;
746
747         /* Check if VLAN present */
748         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
749
750 #if defined(RTE_LIBRTE_IEEE1588)
751         if (rx_status & E1000_RXD_STAT_TMST)
752                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
753 #endif
754         return pkt_flags;
755 }
756
757 static inline uint64_t
758 rx_desc_error_to_pkt_flags(uint32_t rx_status)
759 {
760         /*
761          * Bit 30: IPE, IPv4 checksum error
762          * Bit 29: L4I, L4I integrity error
763          */
764
765         static uint64_t error_to_pkt_flags_map[4] = {
766                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
767                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
768         };
769         return error_to_pkt_flags_map[(rx_status >>
770                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
771 }
772
773 uint16_t
774 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
775                uint16_t nb_pkts)
776 {
777         struct igb_rx_queue *rxq;
778         volatile union e1000_adv_rx_desc *rx_ring;
779         volatile union e1000_adv_rx_desc *rxdp;
780         struct igb_rx_entry *sw_ring;
781         struct igb_rx_entry *rxe;
782         struct rte_mbuf *rxm;
783         struct rte_mbuf *nmb;
784         union e1000_adv_rx_desc rxd;
785         uint64_t dma_addr;
786         uint32_t staterr;
787         uint32_t hlen_type_rss;
788         uint16_t pkt_len;
789         uint16_t rx_id;
790         uint16_t nb_rx;
791         uint16_t nb_hold;
792         uint64_t pkt_flags;
793
794         nb_rx = 0;
795         nb_hold = 0;
796         rxq = rx_queue;
797         rx_id = rxq->rx_tail;
798         rx_ring = rxq->rx_ring;
799         sw_ring = rxq->sw_ring;
800         while (nb_rx < nb_pkts) {
801                 /*
802                  * The order of operations here is important as the DD status
803                  * bit must not be read after any other descriptor fields.
804                  * rx_ring and rxdp are pointing to volatile data so the order
805                  * of accesses cannot be reordered by the compiler. If they were
806                  * not volatile, they could be reordered which could lead to
807                  * using invalid descriptor fields when read from rxd.
808                  */
809                 rxdp = &rx_ring[rx_id];
810                 staterr = rxdp->wb.upper.status_error;
811                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
812                         break;
813                 rxd = *rxdp;
814
815                 /*
816                  * End of packet.
817                  *
818                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
819                  * likely to be invalid and to be dropped by the various
820                  * validation checks performed by the network stack.
821                  *
822                  * Allocate a new mbuf to replenish the RX ring descriptor.
823                  * If the allocation fails:
824                  *    - arrange for that RX descriptor to be the first one
825                  *      being parsed the next time the receive function is
826                  *      invoked [on the same queue].
827                  *
828                  *    - Stop parsing the RX ring and return immediately.
829                  *
830                  * This policy do not drop the packet received in the RX
831                  * descriptor for which the allocation of a new mbuf failed.
832                  * Thus, it allows that packet to be later retrieved if
833                  * mbuf have been freed in the mean time.
834                  * As a side effect, holding RX descriptors instead of
835                  * systematically giving them back to the NIC may lead to
836                  * RX ring exhaustion situations.
837                  * However, the NIC can gracefully prevent such situations
838                  * to happen by sending specific "back-pressure" flow control
839                  * frames to its peer(s).
840                  */
841                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
842                            "staterr=0x%x pkt_len=%u",
843                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
844                            (unsigned) rx_id, (unsigned) staterr,
845                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
846
847                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
848                 if (nmb == NULL) {
849                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
850                                    "queue_id=%u", (unsigned) rxq->port_id,
851                                    (unsigned) rxq->queue_id);
852                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
853                         break;
854                 }
855
856                 nb_hold++;
857                 rxe = &sw_ring[rx_id];
858                 rx_id++;
859                 if (rx_id == rxq->nb_rx_desc)
860                         rx_id = 0;
861
862                 /* Prefetch next mbuf while processing current one. */
863                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
864
865                 /*
866                  * When next RX descriptor is on a cache-line boundary,
867                  * prefetch the next 4 RX descriptors and the next 8 pointers
868                  * to mbufs.
869                  */
870                 if ((rx_id & 0x3) == 0) {
871                         rte_igb_prefetch(&rx_ring[rx_id]);
872                         rte_igb_prefetch(&sw_ring[rx_id]);
873                 }
874
875                 rxm = rxe->mbuf;
876                 rxe->mbuf = nmb;
877                 dma_addr =
878                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
879                 rxdp->read.hdr_addr = 0;
880                 rxdp->read.pkt_addr = dma_addr;
881
882                 /*
883                  * Initialize the returned mbuf.
884                  * 1) setup generic mbuf fields:
885                  *    - number of segments,
886                  *    - next segment,
887                  *    - packet length,
888                  *    - RX port identifier.
889                  * 2) integrate hardware offload data, if any:
890                  *    - RSS flag & hash,
891                  *    - IP checksum flag,
892                  *    - VLAN TCI, if any,
893                  *    - error flags.
894                  */
895                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
896                                       rxq->crc_len);
897                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
898                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
899                 rxm->nb_segs = 1;
900                 rxm->next = NULL;
901                 rxm->pkt_len = pkt_len;
902                 rxm->data_len = pkt_len;
903                 rxm->port = rxq->port_id;
904
905                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
906                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
907                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
908                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
909
910                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
911                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
912                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
913                 rxm->ol_flags = pkt_flags;
914                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
915                                                 lo_dword.hs_rss.pkt_info);
916
917                 /*
918                  * Store the mbuf address into the next entry of the array
919                  * of returned packets.
920                  */
921                 rx_pkts[nb_rx++] = rxm;
922         }
923         rxq->rx_tail = rx_id;
924
925         /*
926          * If the number of free RX descriptors is greater than the RX free
927          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
928          * register.
929          * Update the RDT with the value of the last processed RX descriptor
930          * minus 1, to guarantee that the RDT register is never equal to the
931          * RDH register, which creates a "full" ring situtation from the
932          * hardware point of view...
933          */
934         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
935         if (nb_hold > rxq->rx_free_thresh) {
936                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
937                            "nb_hold=%u nb_rx=%u",
938                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
939                            (unsigned) rx_id, (unsigned) nb_hold,
940                            (unsigned) nb_rx);
941                 rx_id = (uint16_t) ((rx_id == 0) ?
942                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
943                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
944                 nb_hold = 0;
945         }
946         rxq->nb_rx_hold = nb_hold;
947         return (nb_rx);
948 }
949
950 uint16_t
951 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
952                          uint16_t nb_pkts)
953 {
954         struct igb_rx_queue *rxq;
955         volatile union e1000_adv_rx_desc *rx_ring;
956         volatile union e1000_adv_rx_desc *rxdp;
957         struct igb_rx_entry *sw_ring;
958         struct igb_rx_entry *rxe;
959         struct rte_mbuf *first_seg;
960         struct rte_mbuf *last_seg;
961         struct rte_mbuf *rxm;
962         struct rte_mbuf *nmb;
963         union e1000_adv_rx_desc rxd;
964         uint64_t dma; /* Physical address of mbuf data buffer */
965         uint32_t staterr;
966         uint32_t hlen_type_rss;
967         uint16_t rx_id;
968         uint16_t nb_rx;
969         uint16_t nb_hold;
970         uint16_t data_len;
971         uint64_t pkt_flags;
972
973         nb_rx = 0;
974         nb_hold = 0;
975         rxq = rx_queue;
976         rx_id = rxq->rx_tail;
977         rx_ring = rxq->rx_ring;
978         sw_ring = rxq->sw_ring;
979
980         /*
981          * Retrieve RX context of current packet, if any.
982          */
983         first_seg = rxq->pkt_first_seg;
984         last_seg = rxq->pkt_last_seg;
985
986         while (nb_rx < nb_pkts) {
987         next_desc:
988                 /*
989                  * The order of operations here is important as the DD status
990                  * bit must not be read after any other descriptor fields.
991                  * rx_ring and rxdp are pointing to volatile data so the order
992                  * of accesses cannot be reordered by the compiler. If they were
993                  * not volatile, they could be reordered which could lead to
994                  * using invalid descriptor fields when read from rxd.
995                  */
996                 rxdp = &rx_ring[rx_id];
997                 staterr = rxdp->wb.upper.status_error;
998                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
999                         break;
1000                 rxd = *rxdp;
1001
1002                 /*
1003                  * Descriptor done.
1004                  *
1005                  * Allocate a new mbuf to replenish the RX ring descriptor.
1006                  * If the allocation fails:
1007                  *    - arrange for that RX descriptor to be the first one
1008                  *      being parsed the next time the receive function is
1009                  *      invoked [on the same queue].
1010                  *
1011                  *    - Stop parsing the RX ring and return immediately.
1012                  *
1013                  * This policy does not drop the packet received in the RX
1014                  * descriptor for which the allocation of a new mbuf failed.
1015                  * Thus, it allows that packet to be later retrieved if
1016                  * mbuf have been freed in the mean time.
1017                  * As a side effect, holding RX descriptors instead of
1018                  * systematically giving them back to the NIC may lead to
1019                  * RX ring exhaustion situations.
1020                  * However, the NIC can gracefully prevent such situations
1021                  * to happen by sending specific "back-pressure" flow control
1022                  * frames to its peer(s).
1023                  */
1024                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1025                            "staterr=0x%x data_len=%u",
1026                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1027                            (unsigned) rx_id, (unsigned) staterr,
1028                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1029
1030                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1031                 if (nmb == NULL) {
1032                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1033                                    "queue_id=%u", (unsigned) rxq->port_id,
1034                                    (unsigned) rxq->queue_id);
1035                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1036                         break;
1037                 }
1038
1039                 nb_hold++;
1040                 rxe = &sw_ring[rx_id];
1041                 rx_id++;
1042                 if (rx_id == rxq->nb_rx_desc)
1043                         rx_id = 0;
1044
1045                 /* Prefetch next mbuf while processing current one. */
1046                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1047
1048                 /*
1049                  * When next RX descriptor is on a cache-line boundary,
1050                  * prefetch the next 4 RX descriptors and the next 8 pointers
1051                  * to mbufs.
1052                  */
1053                 if ((rx_id & 0x3) == 0) {
1054                         rte_igb_prefetch(&rx_ring[rx_id]);
1055                         rte_igb_prefetch(&sw_ring[rx_id]);
1056                 }
1057
1058                 /*
1059                  * Update RX descriptor with the physical address of the new
1060                  * data buffer of the new allocated mbuf.
1061                  */
1062                 rxm = rxe->mbuf;
1063                 rxe->mbuf = nmb;
1064                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1065                 rxdp->read.pkt_addr = dma;
1066                 rxdp->read.hdr_addr = 0;
1067
1068                 /*
1069                  * Set data length & data buffer address of mbuf.
1070                  */
1071                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1072                 rxm->data_len = data_len;
1073                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1074
1075                 /*
1076                  * If this is the first buffer of the received packet,
1077                  * set the pointer to the first mbuf of the packet and
1078                  * initialize its context.
1079                  * Otherwise, update the total length and the number of segments
1080                  * of the current scattered packet, and update the pointer to
1081                  * the last mbuf of the current packet.
1082                  */
1083                 if (first_seg == NULL) {
1084                         first_seg = rxm;
1085                         first_seg->pkt_len = data_len;
1086                         first_seg->nb_segs = 1;
1087                 } else {
1088                         first_seg->pkt_len += data_len;
1089                         first_seg->nb_segs++;
1090                         last_seg->next = rxm;
1091                 }
1092
1093                 /*
1094                  * If this is not the last buffer of the received packet,
1095                  * update the pointer to the last mbuf of the current scattered
1096                  * packet and continue to parse the RX ring.
1097                  */
1098                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1099                         last_seg = rxm;
1100                         goto next_desc;
1101                 }
1102
1103                 /*
1104                  * This is the last buffer of the received packet.
1105                  * If the CRC is not stripped by the hardware:
1106                  *   - Subtract the CRC length from the total packet length.
1107                  *   - If the last buffer only contains the whole CRC or a part
1108                  *     of it, free the mbuf associated to the last buffer.
1109                  *     If part of the CRC is also contained in the previous
1110                  *     mbuf, subtract the length of that CRC part from the
1111                  *     data length of the previous mbuf.
1112                  */
1113                 rxm->next = NULL;
1114                 if (unlikely(rxq->crc_len > 0)) {
1115                         first_seg->pkt_len -= ETHER_CRC_LEN;
1116                         if (data_len <= ETHER_CRC_LEN) {
1117                                 rte_pktmbuf_free_seg(rxm);
1118                                 first_seg->nb_segs--;
1119                                 last_seg->data_len = (uint16_t)
1120                                         (last_seg->data_len -
1121                                          (ETHER_CRC_LEN - data_len));
1122                                 last_seg->next = NULL;
1123                         } else
1124                                 rxm->data_len =
1125                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1126                 }
1127
1128                 /*
1129                  * Initialize the first mbuf of the returned packet:
1130                  *    - RX port identifier,
1131                  *    - hardware offload data, if any:
1132                  *      - RSS flag & hash,
1133                  *      - IP checksum flag,
1134                  *      - VLAN TCI, if any,
1135                  *      - error flags.
1136                  */
1137                 first_seg->port = rxq->port_id;
1138                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1139
1140                 /*
1141                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1142                  * set in the pkt_flags field.
1143                  */
1144                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1145                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1146                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1147                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1148                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1149                 first_seg->ol_flags = pkt_flags;
1150                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1151                                         lower.lo_dword.hs_rss.pkt_info);
1152
1153                 /* Prefetch data of first segment, if configured to do so. */
1154                 rte_packet_prefetch((char *)first_seg->buf_addr +
1155                         first_seg->data_off);
1156
1157                 /*
1158                  * Store the mbuf address into the next entry of the array
1159                  * of returned packets.
1160                  */
1161                 rx_pkts[nb_rx++] = first_seg;
1162
1163                 /*
1164                  * Setup receipt context for a new packet.
1165                  */
1166                 first_seg = NULL;
1167         }
1168
1169         /*
1170          * Record index of the next RX descriptor to probe.
1171          */
1172         rxq->rx_tail = rx_id;
1173
1174         /*
1175          * Save receive context.
1176          */
1177         rxq->pkt_first_seg = first_seg;
1178         rxq->pkt_last_seg = last_seg;
1179
1180         /*
1181          * If the number of free RX descriptors is greater than the RX free
1182          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1183          * register.
1184          * Update the RDT with the value of the last processed RX descriptor
1185          * minus 1, to guarantee that the RDT register is never equal to the
1186          * RDH register, which creates a "full" ring situtation from the
1187          * hardware point of view...
1188          */
1189         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1190         if (nb_hold > rxq->rx_free_thresh) {
1191                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1192                            "nb_hold=%u nb_rx=%u",
1193                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1194                            (unsigned) rx_id, (unsigned) nb_hold,
1195                            (unsigned) nb_rx);
1196                 rx_id = (uint16_t) ((rx_id == 0) ?
1197                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1198                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1199                 nb_hold = 0;
1200         }
1201         rxq->nb_rx_hold = nb_hold;
1202         return (nb_rx);
1203 }
1204
1205 /*
1206  * Maximum number of Ring Descriptors.
1207  *
1208  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1209  * desscriptors should meet the following condition:
1210  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1211  */
1212 static const struct rte_memzone *
1213 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1214                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1215 {
1216         char z_name[RTE_MEMZONE_NAMESIZE];
1217         const struct rte_memzone *mz;
1218
1219         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1220                         dev->driver->pci_drv.name, ring_name,
1221                                 dev->data->port_id, queue_id);
1222         mz = rte_memzone_lookup(z_name);
1223         if (mz)
1224                 return mz;
1225
1226 #ifdef RTE_LIBRTE_XEN_DOM0
1227         return rte_memzone_reserve_bounded(z_name, ring_size,
1228                         socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);
1229 #else
1230         return rte_memzone_reserve_aligned(z_name, ring_size,
1231                         socket_id, 0, E1000_ALIGN);
1232 #endif
1233 }
1234
1235 static void
1236 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1237 {
1238         unsigned i;
1239
1240         if (txq->sw_ring != NULL) {
1241                 for (i = 0; i < txq->nb_tx_desc; i++) {
1242                         if (txq->sw_ring[i].mbuf != NULL) {
1243                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1244                                 txq->sw_ring[i].mbuf = NULL;
1245                         }
1246                 }
1247         }
1248 }
1249
1250 static void
1251 igb_tx_queue_release(struct igb_tx_queue *txq)
1252 {
1253         if (txq != NULL) {
1254                 igb_tx_queue_release_mbufs(txq);
1255                 rte_free(txq->sw_ring);
1256                 rte_free(txq);
1257         }
1258 }
1259
1260 void
1261 eth_igb_tx_queue_release(void *txq)
1262 {
1263         igb_tx_queue_release(txq);
1264 }
1265
1266 static void
1267 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1268 {
1269         txq->tx_head = 0;
1270         txq->tx_tail = 0;
1271         txq->ctx_curr = 0;
1272         memset((void*)&txq->ctx_cache, 0,
1273                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1274 }
1275
1276 static void
1277 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1278 {
1279         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1280         struct igb_tx_entry *txe = txq->sw_ring;
1281         uint16_t i, prev;
1282         struct e1000_hw *hw;
1283
1284         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1285         /* Zero out HW ring memory */
1286         for (i = 0; i < txq->nb_tx_desc; i++) {
1287                 txq->tx_ring[i] = zeroed_desc;
1288         }
1289
1290         /* Initialize ring entries */
1291         prev = (uint16_t)(txq->nb_tx_desc - 1);
1292         for (i = 0; i < txq->nb_tx_desc; i++) {
1293                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1294
1295                 txd->wb.status = E1000_TXD_STAT_DD;
1296                 txe[i].mbuf = NULL;
1297                 txe[i].last_id = i;
1298                 txe[prev].next_id = i;
1299                 prev = i;
1300         }
1301
1302         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1303         /* 82575 specific, each tx queue will use 2 hw contexts */
1304         if (hw->mac.type == e1000_82575)
1305                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1306
1307         igb_reset_tx_queue_stat(txq);
1308 }
1309
1310 int
1311 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1312                          uint16_t queue_idx,
1313                          uint16_t nb_desc,
1314                          unsigned int socket_id,
1315                          const struct rte_eth_txconf *tx_conf)
1316 {
1317         const struct rte_memzone *tz;
1318         struct igb_tx_queue *txq;
1319         struct e1000_hw     *hw;
1320         uint32_t size;
1321
1322         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1323
1324         /*
1325          * Validate number of transmit descriptors.
1326          * It must not exceed hardware maximum, and must be multiple
1327          * of E1000_ALIGN.
1328          */
1329         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1330                         (nb_desc > E1000_MAX_RING_DESC) ||
1331                         (nb_desc < E1000_MIN_RING_DESC)) {
1332                 return -EINVAL;
1333         }
1334
1335         /*
1336          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1337          * driver.
1338          */
1339         if (tx_conf->tx_free_thresh != 0)
1340                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1341                              "used for the 1G driver.");
1342         if (tx_conf->tx_rs_thresh != 0)
1343                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1344                              "used for the 1G driver.");
1345         if (tx_conf->tx_thresh.wthresh == 0)
1346                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1347                              "consider setting the TX WTHRESH value to 4, 8, "
1348                              "or 16.");
1349
1350         /* Free memory prior to re-allocation if needed */
1351         if (dev->data->tx_queues[queue_idx] != NULL) {
1352                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1353                 dev->data->tx_queues[queue_idx] = NULL;
1354         }
1355
1356         /* First allocate the tx queue data structure */
1357         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1358                                                         RTE_CACHE_LINE_SIZE);
1359         if (txq == NULL)
1360                 return (-ENOMEM);
1361
1362         /*
1363          * Allocate TX ring hardware descriptors. A memzone large enough to
1364          * handle the maximum ring size is allocated in order to allow for
1365          * resizing in later calls to the queue setup function.
1366          */
1367         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1368         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1369                                         size, socket_id);
1370         if (tz == NULL) {
1371                 igb_tx_queue_release(txq);
1372                 return (-ENOMEM);
1373         }
1374
1375         txq->nb_tx_desc = nb_desc;
1376         txq->pthresh = tx_conf->tx_thresh.pthresh;
1377         txq->hthresh = tx_conf->tx_thresh.hthresh;
1378         txq->wthresh = tx_conf->tx_thresh.wthresh;
1379         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1380                 txq->wthresh = 1;
1381         txq->queue_id = queue_idx;
1382         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1383                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1384         txq->port_id = dev->data->port_id;
1385
1386         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1387 #ifndef RTE_LIBRTE_XEN_DOM0
1388         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1389 #else
1390         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1391 #endif
1392          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1393         /* Allocate software ring */
1394         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1395                                    sizeof(struct igb_tx_entry) * nb_desc,
1396                                    RTE_CACHE_LINE_SIZE);
1397         if (txq->sw_ring == NULL) {
1398                 igb_tx_queue_release(txq);
1399                 return (-ENOMEM);
1400         }
1401         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1402                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1403
1404         igb_reset_tx_queue(txq, dev);
1405         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1406         dev->data->tx_queues[queue_idx] = txq;
1407
1408         return (0);
1409 }
1410
1411 static void
1412 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1413 {
1414         unsigned i;
1415
1416         if (rxq->sw_ring != NULL) {
1417                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1418                         if (rxq->sw_ring[i].mbuf != NULL) {
1419                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1420                                 rxq->sw_ring[i].mbuf = NULL;
1421                         }
1422                 }
1423         }
1424 }
1425
1426 static void
1427 igb_rx_queue_release(struct igb_rx_queue *rxq)
1428 {
1429         if (rxq != NULL) {
1430                 igb_rx_queue_release_mbufs(rxq);
1431                 rte_free(rxq->sw_ring);
1432                 rte_free(rxq);
1433         }
1434 }
1435
1436 void
1437 eth_igb_rx_queue_release(void *rxq)
1438 {
1439         igb_rx_queue_release(rxq);
1440 }
1441
1442 static void
1443 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1444 {
1445         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1446         unsigned i;
1447
1448         /* Zero out HW ring memory */
1449         for (i = 0; i < rxq->nb_rx_desc; i++) {
1450                 rxq->rx_ring[i] = zeroed_desc;
1451         }
1452
1453         rxq->rx_tail = 0;
1454         rxq->pkt_first_seg = NULL;
1455         rxq->pkt_last_seg = NULL;
1456 }
1457
1458 int
1459 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1460                          uint16_t queue_idx,
1461                          uint16_t nb_desc,
1462                          unsigned int socket_id,
1463                          const struct rte_eth_rxconf *rx_conf,
1464                          struct rte_mempool *mp)
1465 {
1466         const struct rte_memzone *rz;
1467         struct igb_rx_queue *rxq;
1468         struct e1000_hw     *hw;
1469         unsigned int size;
1470
1471         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1472
1473         /*
1474          * Validate number of receive descriptors.
1475          * It must not exceed hardware maximum, and must be multiple
1476          * of E1000_ALIGN.
1477          */
1478         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1479                         (nb_desc > E1000_MAX_RING_DESC) ||
1480                         (nb_desc < E1000_MIN_RING_DESC)) {
1481                 return (-EINVAL);
1482         }
1483
1484         /* Free memory prior to re-allocation if needed */
1485         if (dev->data->rx_queues[queue_idx] != NULL) {
1486                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1487                 dev->data->rx_queues[queue_idx] = NULL;
1488         }
1489
1490         /* First allocate the RX queue data structure. */
1491         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1492                           RTE_CACHE_LINE_SIZE);
1493         if (rxq == NULL)
1494                 return (-ENOMEM);
1495         rxq->mb_pool = mp;
1496         rxq->nb_rx_desc = nb_desc;
1497         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1498         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1499         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1500         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1501                 rxq->wthresh = 1;
1502         rxq->drop_en = rx_conf->rx_drop_en;
1503         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1504         rxq->queue_id = queue_idx;
1505         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1506                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1507         rxq->port_id = dev->data->port_id;
1508         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1509                                   ETHER_CRC_LEN);
1510
1511         /*
1512          *  Allocate RX ring hardware descriptors. A memzone large enough to
1513          *  handle the maximum ring size is allocated in order to allow for
1514          *  resizing in later calls to the queue setup function.
1515          */
1516         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1517         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1518         if (rz == NULL) {
1519                 igb_rx_queue_release(rxq);
1520                 return (-ENOMEM);
1521         }
1522         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1523         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1524 #ifndef RTE_LIBRTE_XEN_DOM0
1525         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1526 #else
1527         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1528 #endif
1529         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1530
1531         /* Allocate software ring. */
1532         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1533                                    sizeof(struct igb_rx_entry) * nb_desc,
1534                                    RTE_CACHE_LINE_SIZE);
1535         if (rxq->sw_ring == NULL) {
1536                 igb_rx_queue_release(rxq);
1537                 return (-ENOMEM);
1538         }
1539         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1540                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1541
1542         dev->data->rx_queues[queue_idx] = rxq;
1543         igb_reset_rx_queue(rxq);
1544
1545         return 0;
1546 }
1547
1548 uint32_t
1549 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1550 {
1551 #define IGB_RXQ_SCAN_INTERVAL 4
1552         volatile union e1000_adv_rx_desc *rxdp;
1553         struct igb_rx_queue *rxq;
1554         uint32_t desc = 0;
1555
1556         if (rx_queue_id >= dev->data->nb_rx_queues) {
1557                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1558                 return 0;
1559         }
1560
1561         rxq = dev->data->rx_queues[rx_queue_id];
1562         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1563
1564         while ((desc < rxq->nb_rx_desc) &&
1565                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1566                 desc += IGB_RXQ_SCAN_INTERVAL;
1567                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1568                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1569                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1570                                 desc - rxq->nb_rx_desc]);
1571         }
1572
1573         return 0;
1574 }
1575
1576 int
1577 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1578 {
1579         volatile union e1000_adv_rx_desc *rxdp;
1580         struct igb_rx_queue *rxq = rx_queue;
1581         uint32_t desc;
1582
1583         if (unlikely(offset >= rxq->nb_rx_desc))
1584                 return 0;
1585         desc = rxq->rx_tail + offset;
1586         if (desc >= rxq->nb_rx_desc)
1587                 desc -= rxq->nb_rx_desc;
1588
1589         rxdp = &rxq->rx_ring[desc];
1590         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1591 }
1592
1593 void
1594 igb_dev_clear_queues(struct rte_eth_dev *dev)
1595 {
1596         uint16_t i;
1597         struct igb_tx_queue *txq;
1598         struct igb_rx_queue *rxq;
1599
1600         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1601                 txq = dev->data->tx_queues[i];
1602                 if (txq != NULL) {
1603                         igb_tx_queue_release_mbufs(txq);
1604                         igb_reset_tx_queue(txq, dev);
1605                 }
1606         }
1607
1608         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1609                 rxq = dev->data->rx_queues[i];
1610                 if (rxq != NULL) {
1611                         igb_rx_queue_release_mbufs(rxq);
1612                         igb_reset_rx_queue(rxq);
1613                 }
1614         }
1615 }
1616
1617 void
1618 igb_dev_free_queues(struct rte_eth_dev *dev)
1619 {
1620         uint16_t i;
1621
1622         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1623                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1624                 dev->data->rx_queues[i] = NULL;
1625         }
1626         dev->data->nb_rx_queues = 0;
1627
1628         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1629                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1630                 dev->data->tx_queues[i] = NULL;
1631         }
1632         dev->data->nb_tx_queues = 0;
1633 }
1634
1635 /**
1636  * Receive Side Scaling (RSS).
1637  * See section 7.1.1.7 in the following document:
1638  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1639  *
1640  * Principles:
1641  * The source and destination IP addresses of the IP header and the source and
1642  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1643  * against a configurable random key to compute a 32-bit RSS hash result.
1644  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1645  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1646  * RSS output index which is used as the RX queue index where to store the
1647  * received packets.
1648  * The following output is supplied in the RX write-back descriptor:
1649  *     - 32-bit result of the Microsoft RSS hash function,
1650  *     - 4-bit RSS type field.
1651  */
1652
1653 /*
1654  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1655  * Used as the default key.
1656  */
1657 static uint8_t rss_intel_key[40] = {
1658         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1659         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1660         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1661         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1662         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1663 };
1664
1665 static void
1666 igb_rss_disable(struct rte_eth_dev *dev)
1667 {
1668         struct e1000_hw *hw;
1669         uint32_t mrqc;
1670
1671         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1673         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1674         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1675 }
1676
1677 static void
1678 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1679 {
1680         uint8_t  *hash_key;
1681         uint32_t rss_key;
1682         uint32_t mrqc;
1683         uint64_t rss_hf;
1684         uint16_t i;
1685
1686         hash_key = rss_conf->rss_key;
1687         if (hash_key != NULL) {
1688                 /* Fill in RSS hash key */
1689                 for (i = 0; i < 10; i++) {
1690                         rss_key  = hash_key[(i * 4)];
1691                         rss_key |= hash_key[(i * 4) + 1] << 8;
1692                         rss_key |= hash_key[(i * 4) + 2] << 16;
1693                         rss_key |= hash_key[(i * 4) + 3] << 24;
1694                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1695                 }
1696         }
1697
1698         /* Set configured hashing protocols in MRQC register */
1699         rss_hf = rss_conf->rss_hf;
1700         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1701         if (rss_hf & ETH_RSS_IPV4)
1702                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1703         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1704                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1705         if (rss_hf & ETH_RSS_IPV6)
1706                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1707         if (rss_hf & ETH_RSS_IPV6_EX)
1708                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1709         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1710                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1711         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1712                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1713         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1714                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1715         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1716                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1717         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1718                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1719         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1720 }
1721
1722 int
1723 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1724                         struct rte_eth_rss_conf *rss_conf)
1725 {
1726         struct e1000_hw *hw;
1727         uint32_t mrqc;
1728         uint64_t rss_hf;
1729
1730         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1731
1732         /*
1733          * Before changing anything, first check that the update RSS operation
1734          * does not attempt to disable RSS, if RSS was enabled at
1735          * initialization time, or does not attempt to enable RSS, if RSS was
1736          * disabled at initialization time.
1737          */
1738         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1739         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1740         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1741                 if (rss_hf != 0) /* Enable RSS */
1742                         return -(EINVAL);
1743                 return 0; /* Nothing to do */
1744         }
1745         /* RSS enabled */
1746         if (rss_hf == 0) /* Disable RSS */
1747                 return -(EINVAL);
1748         igb_hw_rss_hash_set(hw, rss_conf);
1749         return 0;
1750 }
1751
1752 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1753                               struct rte_eth_rss_conf *rss_conf)
1754 {
1755         struct e1000_hw *hw;
1756         uint8_t *hash_key;
1757         uint32_t rss_key;
1758         uint32_t mrqc;
1759         uint64_t rss_hf;
1760         uint16_t i;
1761
1762         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1763         hash_key = rss_conf->rss_key;
1764         if (hash_key != NULL) {
1765                 /* Return RSS hash key */
1766                 for (i = 0; i < 10; i++) {
1767                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1768                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1769                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1770                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1771                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1772                 }
1773         }
1774
1775         /* Get RSS functions configured in MRQC register */
1776         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1777         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1778                 rss_conf->rss_hf = 0;
1779                 return 0;
1780         }
1781         rss_hf = 0;
1782         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1783                 rss_hf |= ETH_RSS_IPV4;
1784         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1785                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1786         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1787                 rss_hf |= ETH_RSS_IPV6;
1788         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1789                 rss_hf |= ETH_RSS_IPV6_EX;
1790         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1791                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1792         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1793                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1794         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1795                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1796         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1797                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1798         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1799                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1800         rss_conf->rss_hf = rss_hf;
1801         return 0;
1802 }
1803
1804 static void
1805 igb_rss_configure(struct rte_eth_dev *dev)
1806 {
1807         struct rte_eth_rss_conf rss_conf;
1808         struct e1000_hw *hw;
1809         uint32_t shift;
1810         uint16_t i;
1811
1812         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1813
1814         /* Fill in redirection table. */
1815         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1816         for (i = 0; i < 128; i++) {
1817                 union e1000_reta {
1818                         uint32_t dword;
1819                         uint8_t  bytes[4];
1820                 } reta;
1821                 uint8_t q_idx;
1822
1823                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1824                                    i % dev->data->nb_rx_queues : 0);
1825                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1826                 if ((i & 3) == 3)
1827                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1828         }
1829
1830         /*
1831          * Configure the RSS key and the RSS protocols used to compute
1832          * the RSS hash of input packets.
1833          */
1834         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1835         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1836                 igb_rss_disable(dev);
1837                 return;
1838         }
1839         if (rss_conf.rss_key == NULL)
1840                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1841         igb_hw_rss_hash_set(hw, &rss_conf);
1842 }
1843
1844 /*
1845  * Check if the mac type support VMDq or not.
1846  * Return 1 if it supports, otherwise, return 0.
1847  */
1848 static int
1849 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1850 {
1851         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1852
1853         switch (hw->mac.type) {
1854         case e1000_82576:
1855         case e1000_82580:
1856         case e1000_i350:
1857                 return 1;
1858         case e1000_82540:
1859         case e1000_82541:
1860         case e1000_82542:
1861         case e1000_82543:
1862         case e1000_82544:
1863         case e1000_82545:
1864         case e1000_82546:
1865         case e1000_82547:
1866         case e1000_82571:
1867         case e1000_82572:
1868         case e1000_82573:
1869         case e1000_82574:
1870         case e1000_82583:
1871         case e1000_i210:
1872         case e1000_i211:
1873         default:
1874                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1875                 return 0;
1876         }
1877 }
1878
1879 static int
1880 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1881 {
1882         struct rte_eth_vmdq_rx_conf *cfg;
1883         struct e1000_hw *hw;
1884         uint32_t mrqc, vt_ctl, vmolr, rctl;
1885         int i;
1886
1887         PMD_INIT_FUNC_TRACE();
1888
1889         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1890         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1891
1892         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1893         if (igb_is_vmdq_supported(dev) == 0)
1894                 return -1;
1895
1896         igb_rss_disable(dev);
1897
1898         /* RCTL: eanble VLAN filter */
1899         rctl = E1000_READ_REG(hw, E1000_RCTL);
1900         rctl |= E1000_RCTL_VFE;
1901         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1902
1903         /* MRQC: enable vmdq */
1904         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1905         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1906         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1907
1908         /* VTCTL:  pool selection according to VLAN tag */
1909         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1910         if (cfg->enable_default_pool)
1911                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1912         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1913         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1914
1915         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1916                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1917                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1918                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1919                         E1000_VMOLR_MPME);
1920
1921                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1922                         vmolr |= E1000_VMOLR_AUPE;
1923                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1924                         vmolr |= E1000_VMOLR_ROMPE;
1925                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1926                         vmolr |= E1000_VMOLR_ROPE;
1927                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1928                         vmolr |= E1000_VMOLR_BAM;
1929                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1930                         vmolr |= E1000_VMOLR_MPME;
1931
1932                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1933         }
1934
1935         /*
1936          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1937          * Both 82576 and 82580 support it
1938          */
1939         if (hw->mac.type != e1000_i350) {
1940                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1941                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1942                         vmolr |= E1000_VMOLR_STRVLAN;
1943                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1944                 }
1945         }
1946
1947         /* VFTA - enable all vlan filters */
1948         for (i = 0; i < IGB_VFTA_SIZE; i++)
1949                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1950
1951         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1952         if (hw->mac.type != e1000_82580)
1953                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1954
1955         /*
1956          * RAH/RAL - allow pools to read specific mac addresses
1957          * In this case, all pools should be able to read from mac addr 0
1958          */
1959         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1960         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1961
1962         /* VLVF: set up filters for vlan tags as configured */
1963         for (i = 0; i < cfg->nb_pool_maps; i++) {
1964                 /* set vlan id in VF register and set the valid bit */
1965                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1966                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1967                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1968                         E1000_VLVF_POOLSEL_MASK)));
1969         }
1970
1971         E1000_WRITE_FLUSH(hw);
1972
1973         return 0;
1974 }
1975
1976
1977 /*********************************************************************
1978  *
1979  *  Enable receive unit.
1980  *
1981  **********************************************************************/
1982
1983 static int
1984 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1985 {
1986         struct igb_rx_entry *rxe = rxq->sw_ring;
1987         uint64_t dma_addr;
1988         unsigned i;
1989
1990         /* Initialize software ring entries. */
1991         for (i = 0; i < rxq->nb_rx_desc; i++) {
1992                 volatile union e1000_adv_rx_desc *rxd;
1993                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1994
1995                 if (mbuf == NULL) {
1996                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1997                                      "queue_id=%hu", rxq->queue_id);
1998                         return (-ENOMEM);
1999                 }
2000                 dma_addr =
2001                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
2002                 rxd = &rxq->rx_ring[i];
2003                 rxd->read.hdr_addr = 0;
2004                 rxd->read.pkt_addr = dma_addr;
2005                 rxe[i].mbuf = mbuf;
2006         }
2007
2008         return 0;
2009 }
2010
2011 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2012 static int
2013 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2014 {
2015         struct e1000_hw *hw =
2016                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017         uint32_t mrqc;
2018
2019         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2020                 /*
2021                  * SRIOV active scheme
2022                  * FIXME if support RSS together with VMDq & SRIOV
2023                  */
2024                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2025                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2026                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2027                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2028         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2029                 /*
2030                  * SRIOV inactive scheme
2031                  */
2032                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2033                         case ETH_MQ_RX_RSS:
2034                                 igb_rss_configure(dev);
2035                                 break;
2036                         case ETH_MQ_RX_VMDQ_ONLY:
2037                                 /*Configure general VMDQ only RX parameters*/
2038                                 igb_vmdq_rx_hw_configure(dev);
2039                                 break;
2040                         case ETH_MQ_RX_NONE:
2041                                 /* if mq_mode is none, disable rss mode.*/
2042                         default:
2043                                 igb_rss_disable(dev);
2044                                 break;
2045                 }
2046         }
2047
2048         return 0;
2049 }
2050
2051 int
2052 eth_igb_rx_init(struct rte_eth_dev *dev)
2053 {
2054         struct e1000_hw     *hw;
2055         struct igb_rx_queue *rxq;
2056         uint32_t rctl;
2057         uint32_t rxcsum;
2058         uint32_t srrctl;
2059         uint16_t buf_size;
2060         uint16_t rctl_bsize;
2061         uint16_t i;
2062         int ret;
2063
2064         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065         srrctl = 0;
2066
2067         /*
2068          * Make sure receives are disabled while setting
2069          * up the descriptor ring.
2070          */
2071         rctl = E1000_READ_REG(hw, E1000_RCTL);
2072         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2073
2074         /*
2075          * Configure support of jumbo frames, if any.
2076          */
2077         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2078                 rctl |= E1000_RCTL_LPE;
2079
2080                 /*
2081                  * Set maximum packet length by default, and might be updated
2082                  * together with enabling/disabling dual VLAN.
2083                  */
2084                 E1000_WRITE_REG(hw, E1000_RLPML,
2085                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2086                                                 VLAN_TAG_SIZE);
2087         } else
2088                 rctl &= ~E1000_RCTL_LPE;
2089
2090         /* Configure and enable each RX queue. */
2091         rctl_bsize = 0;
2092         dev->rx_pkt_burst = eth_igb_recv_pkts;
2093         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2094                 uint64_t bus_addr;
2095                 uint32_t rxdctl;
2096
2097                 rxq = dev->data->rx_queues[i];
2098
2099                 /* Allocate buffers for descriptor rings and set up queue */
2100                 ret = igb_alloc_rx_queue_mbufs(rxq);
2101                 if (ret)
2102                         return ret;
2103
2104                 /*
2105                  * Reset crc_len in case it was changed after queue setup by a
2106                  *  call to configure
2107                  */
2108                 rxq->crc_len =
2109                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2110                                                         0 : ETHER_CRC_LEN);
2111
2112                 bus_addr = rxq->rx_ring_phys_addr;
2113                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2114                                 rxq->nb_rx_desc *
2115                                 sizeof(union e1000_adv_rx_desc));
2116                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2117                                 (uint32_t)(bus_addr >> 32));
2118                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2119
2120                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2121
2122                 /*
2123                  * Configure RX buffer size.
2124                  */
2125                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2126                         RTE_PKTMBUF_HEADROOM);
2127                 if (buf_size >= 1024) {
2128                         /*
2129                          * Configure the BSIZEPACKET field of the SRRCTL
2130                          * register of the queue.
2131                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2132                          * If this field is equal to 0b, then RCTL.BSIZE
2133                          * determines the RX packet buffer size.
2134                          */
2135                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2136                                    E1000_SRRCTL_BSIZEPKT_MASK);
2137                         buf_size = (uint16_t) ((srrctl &
2138                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2139                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2140
2141                         /* It adds dual VLAN length for supporting dual VLAN */
2142                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2143                                                 2 * VLAN_TAG_SIZE) > buf_size){
2144                                 if (!dev->data->scattered_rx)
2145                                         PMD_INIT_LOG(DEBUG,
2146                                                      "forcing scatter mode");
2147                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2148                                 dev->data->scattered_rx = 1;
2149                         }
2150                 } else {
2151                         /*
2152                          * Use BSIZE field of the device RCTL register.
2153                          */
2154                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2155                                 rctl_bsize = buf_size;
2156                         if (!dev->data->scattered_rx)
2157                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2158                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2159                         dev->data->scattered_rx = 1;
2160                 }
2161
2162                 /* Set if packets are dropped when no descriptors available */
2163                 if (rxq->drop_en)
2164                         srrctl |= E1000_SRRCTL_DROP_EN;
2165
2166                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2167
2168                 /* Enable this RX queue. */
2169                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2170                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2171                 rxdctl &= 0xFFF00000;
2172                 rxdctl |= (rxq->pthresh & 0x1F);
2173                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2174                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2175                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2176         }
2177
2178         if (dev->data->dev_conf.rxmode.enable_scatter) {
2179                 if (!dev->data->scattered_rx)
2180                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2181                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2182                 dev->data->scattered_rx = 1;
2183         }
2184
2185         /*
2186          * Setup BSIZE field of RCTL register, if needed.
2187          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2188          * register, since the code above configures the SRRCTL register of
2189          * the RX queue in such a case.
2190          * All configurable sizes are:
2191          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2192          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2193          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2194          *  2048: rctl |= E1000_RCTL_SZ_2048;
2195          *  1024: rctl |= E1000_RCTL_SZ_1024;
2196          *   512: rctl |= E1000_RCTL_SZ_512;
2197          *   256: rctl |= E1000_RCTL_SZ_256;
2198          */
2199         if (rctl_bsize > 0) {
2200                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2201                         rctl |= E1000_RCTL_SZ_512;
2202                 else /* 256 <= buf_size < 512 - use 256 */
2203                         rctl |= E1000_RCTL_SZ_256;
2204         }
2205
2206         /*
2207          * Configure RSS if device configured with multiple RX queues.
2208          */
2209         igb_dev_mq_rx_configure(dev);
2210
2211         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2212         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2213
2214         /*
2215          * Setup the Checksum Register.
2216          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2217          */
2218         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2219         rxcsum |= E1000_RXCSUM_PCSD;
2220
2221         /* Enable both L3/L4 rx checksum offload */
2222         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2223                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2224         else
2225                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2226         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2227
2228         /* Setup the Receive Control Register. */
2229         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2230                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2231
2232                 /* set STRCRC bit in all queues */
2233                 if (hw->mac.type == e1000_i350 ||
2234                     hw->mac.type == e1000_i210 ||
2235                     hw->mac.type == e1000_i211 ||
2236                     hw->mac.type == e1000_i354) {
2237                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2238                                 rxq = dev->data->rx_queues[i];
2239                                 uint32_t dvmolr = E1000_READ_REG(hw,
2240                                         E1000_DVMOLR(rxq->reg_idx));
2241                                 dvmolr |= E1000_DVMOLR_STRCRC;
2242                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2243                         }
2244                 }
2245         } else {
2246                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2247
2248                 /* clear STRCRC bit in all queues */
2249                 if (hw->mac.type == e1000_i350 ||
2250                     hw->mac.type == e1000_i210 ||
2251                     hw->mac.type == e1000_i211 ||
2252                     hw->mac.type == e1000_i354) {
2253                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2254                                 rxq = dev->data->rx_queues[i];
2255                                 uint32_t dvmolr = E1000_READ_REG(hw,
2256                                         E1000_DVMOLR(rxq->reg_idx));
2257                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2258                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2259                         }
2260                 }
2261         }
2262
2263         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2264         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2265                 E1000_RCTL_RDMTS_HALF |
2266                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2267
2268         /* Make sure VLAN Filters are off. */
2269         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2270                 rctl &= ~E1000_RCTL_VFE;
2271         /* Don't store bad packets. */
2272         rctl &= ~E1000_RCTL_SBP;
2273
2274         /* Enable Receives. */
2275         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2276
2277         /*
2278          * Setup the HW Rx Head and Tail Descriptor Pointers.
2279          * This needs to be done after enable.
2280          */
2281         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2282                 rxq = dev->data->rx_queues[i];
2283                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2284                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2285         }
2286
2287         return 0;
2288 }
2289
2290 /*********************************************************************
2291  *
2292  *  Enable transmit unit.
2293  *
2294  **********************************************************************/
2295 void
2296 eth_igb_tx_init(struct rte_eth_dev *dev)
2297 {
2298         struct e1000_hw     *hw;
2299         struct igb_tx_queue *txq;
2300         uint32_t tctl;
2301         uint32_t txdctl;
2302         uint16_t i;
2303
2304         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305
2306         /* Setup the Base and Length of the Tx Descriptor Rings. */
2307         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2308                 uint64_t bus_addr;
2309                 txq = dev->data->tx_queues[i];
2310                 bus_addr = txq->tx_ring_phys_addr;
2311
2312                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2313                                 txq->nb_tx_desc *
2314                                 sizeof(union e1000_adv_tx_desc));
2315                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2316                                 (uint32_t)(bus_addr >> 32));
2317                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2318
2319                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2320                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2321                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2322
2323                 /* Setup Transmit threshold registers. */
2324                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2325                 txdctl |= txq->pthresh & 0x1F;
2326                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2327                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2328                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2329                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2330         }
2331
2332         /* Program the Transmit Control Register. */
2333         tctl = E1000_READ_REG(hw, E1000_TCTL);
2334         tctl &= ~E1000_TCTL_CT;
2335         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2336                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2337
2338         e1000_config_collision_dist(hw);
2339
2340         /* This write will effectively turn on the transmit unit. */
2341         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2342 }
2343
2344 /*********************************************************************
2345  *
2346  *  Enable VF receive unit.
2347  *
2348  **********************************************************************/
2349 int
2350 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2351 {
2352         struct e1000_hw     *hw;
2353         struct igb_rx_queue *rxq;
2354         uint32_t srrctl;
2355         uint16_t buf_size;
2356         uint16_t rctl_bsize;
2357         uint16_t i;
2358         int ret;
2359
2360         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2361
2362         /* setup MTU */
2363         e1000_rlpml_set_vf(hw,
2364                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2365                 VLAN_TAG_SIZE));
2366
2367         /* Configure and enable each RX queue. */
2368         rctl_bsize = 0;
2369         dev->rx_pkt_burst = eth_igb_recv_pkts;
2370         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2371                 uint64_t bus_addr;
2372                 uint32_t rxdctl;
2373
2374                 rxq = dev->data->rx_queues[i];
2375
2376                 /* Allocate buffers for descriptor rings and set up queue */
2377                 ret = igb_alloc_rx_queue_mbufs(rxq);
2378                 if (ret)
2379                         return ret;
2380
2381                 bus_addr = rxq->rx_ring_phys_addr;
2382                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2383                                 rxq->nb_rx_desc *
2384                                 sizeof(union e1000_adv_rx_desc));
2385                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2386                                 (uint32_t)(bus_addr >> 32));
2387                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2388
2389                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2390
2391                 /*
2392                  * Configure RX buffer size.
2393                  */
2394                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2395                         RTE_PKTMBUF_HEADROOM);
2396                 if (buf_size >= 1024) {
2397                         /*
2398                          * Configure the BSIZEPACKET field of the SRRCTL
2399                          * register of the queue.
2400                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2401                          * If this field is equal to 0b, then RCTL.BSIZE
2402                          * determines the RX packet buffer size.
2403                          */
2404                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2405                                    E1000_SRRCTL_BSIZEPKT_MASK);
2406                         buf_size = (uint16_t) ((srrctl &
2407                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2408                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2409
2410                         /* It adds dual VLAN length for supporting dual VLAN */
2411                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2412                                                 2 * VLAN_TAG_SIZE) > buf_size){
2413                                 if (!dev->data->scattered_rx)
2414                                         PMD_INIT_LOG(DEBUG,
2415                                                      "forcing scatter mode");
2416                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2417                                 dev->data->scattered_rx = 1;
2418                         }
2419                 } else {
2420                         /*
2421                          * Use BSIZE field of the device RCTL register.
2422                          */
2423                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2424                                 rctl_bsize = buf_size;
2425                         if (!dev->data->scattered_rx)
2426                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2427                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2428                         dev->data->scattered_rx = 1;
2429                 }
2430
2431                 /* Set if packets are dropped when no descriptors available */
2432                 if (rxq->drop_en)
2433                         srrctl |= E1000_SRRCTL_DROP_EN;
2434
2435                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2436
2437                 /* Enable this RX queue. */
2438                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2439                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2440                 rxdctl &= 0xFFF00000;
2441                 rxdctl |= (rxq->pthresh & 0x1F);
2442                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2443                 if (hw->mac.type == e1000_vfadapt) {
2444                         /*
2445                          * Workaround of 82576 VF Erratum
2446                          * force set WTHRESH to 1
2447                          * to avoid Write-Back not triggered sometimes
2448                          */
2449                         rxdctl |= 0x10000;
2450                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2451                 }
2452                 else
2453                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2454                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2455         }
2456
2457         if (dev->data->dev_conf.rxmode.enable_scatter) {
2458                 if (!dev->data->scattered_rx)
2459                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2460                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2461                 dev->data->scattered_rx = 1;
2462         }
2463
2464         /*
2465          * Setup the HW Rx Head and Tail Descriptor Pointers.
2466          * This needs to be done after enable.
2467          */
2468         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2469                 rxq = dev->data->rx_queues[i];
2470                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2471                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2472         }
2473
2474         return 0;
2475 }
2476
2477 /*********************************************************************
2478  *
2479  *  Enable VF transmit unit.
2480  *
2481  **********************************************************************/
2482 void
2483 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2484 {
2485         struct e1000_hw     *hw;
2486         struct igb_tx_queue *txq;
2487         uint32_t txdctl;
2488         uint16_t i;
2489
2490         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2491
2492         /* Setup the Base and Length of the Tx Descriptor Rings. */
2493         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2494                 uint64_t bus_addr;
2495
2496                 txq = dev->data->tx_queues[i];
2497                 bus_addr = txq->tx_ring_phys_addr;
2498                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2499                                 txq->nb_tx_desc *
2500                                 sizeof(union e1000_adv_tx_desc));
2501                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2502                                 (uint32_t)(bus_addr >> 32));
2503                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2504
2505                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2506                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2507                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2508
2509                 /* Setup Transmit threshold registers. */
2510                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2511                 txdctl |= txq->pthresh & 0x1F;
2512                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2513                 if (hw->mac.type == e1000_82576) {
2514                         /*
2515                          * Workaround of 82576 VF Erratum
2516                          * force set WTHRESH to 1
2517                          * to avoid Write-Back not triggered sometimes
2518                          */
2519                         txdctl |= 0x10000;
2520                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2521                 }
2522                 else
2523                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2524                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2525                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2526         }
2527
2528 }
2529
2530 void
2531 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2532         struct rte_eth_rxq_info *qinfo)
2533 {
2534         struct igb_rx_queue *rxq;
2535
2536         rxq = dev->data->rx_queues[queue_id];
2537
2538         qinfo->mp = rxq->mb_pool;
2539         qinfo->scattered_rx = dev->data->scattered_rx;
2540         qinfo->nb_desc = rxq->nb_rx_desc;
2541
2542         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2543         qinfo->conf.rx_drop_en = rxq->drop_en;
2544 }
2545
2546 void
2547 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2548         struct rte_eth_txq_info *qinfo)
2549 {
2550         struct igb_tx_queue *txq;
2551
2552         txq = dev->data->tx_queues[queue_id];
2553
2554         qinfo->nb_desc = txq->nb_tx_desc;
2555
2556         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2557         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2558         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2559 }