igb: fix ieee1588 frame identification in i210
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK |                 \
80                 PKT_TX_TCP_SEG)
81
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
84 {
85         struct rte_mbuf *m;
86
87         m = __rte_mbuf_raw_alloc(mp);
88         __rte_mbuf_sanity_check_raw(m, 0);
89         return (m);
90 }
91
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
94
95 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
96         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97
98 /**
99  * Structure associated with each descriptor of the RX ring of a RX queue.
100  */
101 struct igb_rx_entry {
102         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
103 };
104
105 /**
106  * Structure associated with each descriptor of the TX ring of a TX queue.
107  */
108 struct igb_tx_entry {
109         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
110         uint16_t next_id; /**< Index of next descriptor in ring. */
111         uint16_t last_id; /**< Index of last scattered descriptor. */
112 };
113
114 /**
115  * Structure associated with each RX queue.
116  */
117 struct igb_rx_queue {
118         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
119         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
120         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
121         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
122         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
123         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
124         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
125         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
126         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
127         uint16_t            rx_tail;    /**< current value of RDT register. */
128         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
129         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
130         uint16_t            queue_id;   /**< RX queue index. */
131         uint16_t            reg_idx;    /**< RX queue register index. */
132         uint8_t             port_id;    /**< Device port identifier. */
133         uint8_t             pthresh;    /**< Prefetch threshold register. */
134         uint8_t             hthresh;    /**< Host threshold register. */
135         uint8_t             wthresh;    /**< Write-back threshold register. */
136         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
137         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
138 };
139
140 /**
141  * Hardware context number
142  */
143 enum igb_advctx_num {
144         IGB_CTX_0    = 0, /**< CTX0    */
145         IGB_CTX_1    = 1, /**< CTX1    */
146         IGB_CTX_NUM  = 2, /**< CTX_NUM */
147 };
148
149 /** Offload features */
150 union igb_tx_offload {
151         uint64_t data;
152         struct {
153                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
154                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
155                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
156                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
157                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
158
159                 /* uint64_t unused:8; */
160         };
161 };
162
163 /*
164  * Compare mask for igb_tx_offload.data,
165  * should be in sync with igb_tx_offload layout.
166  * */
167 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
168 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
169 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
170 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
171 /** Mac + IP + TCP + Mss mask. */
172 #define TX_TSO_CMP_MASK \
173         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
174
175 /**
176  * Strucutre to check if new context need be built
177  */
178 struct igb_advctx_info {
179         uint64_t flags;           /**< ol_flags related to context build. */
180         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
181         union igb_tx_offload tx_offload;
182         /** compare mask for tx offload. */
183         union igb_tx_offload tx_offload_mask;
184 };
185
186 /**
187  * Structure associated with each TX queue.
188  */
189 struct igb_tx_queue {
190         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
191         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
192         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
193         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
194         uint32_t               txd_type;      /**< Device-specific TXD type */
195         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
196         uint16_t               tx_tail; /**< Current value of TDT register. */
197         uint16_t               tx_head;
198         /**< Index of first used TX descriptor. */
199         uint16_t               queue_id; /**< TX queue index. */
200         uint16_t               reg_idx;  /**< TX queue register index. */
201         uint8_t                port_id;  /**< Device port identifier. */
202         uint8_t                pthresh;  /**< Prefetch threshold register. */
203         uint8_t                hthresh;  /**< Host threshold register. */
204         uint8_t                wthresh;  /**< Write-back threshold register. */
205         uint32_t               ctx_curr;
206         /**< Current used hardware descriptor. */
207         uint32_t               ctx_start;
208         /**< Start context position for transmit queue. */
209         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
210         /**< Hardware context history.*/
211 };
212
213 #if 1
214 #define RTE_PMD_USE_PREFETCH
215 #endif
216
217 #ifdef RTE_PMD_USE_PREFETCH
218 #define rte_igb_prefetch(p)     rte_prefetch0(p)
219 #else
220 #define rte_igb_prefetch(p)     do {} while(0)
221 #endif
222
223 #ifdef RTE_PMD_PACKET_PREFETCH
224 #define rte_packet_prefetch(p) rte_prefetch1(p)
225 #else
226 #define rte_packet_prefetch(p)  do {} while(0)
227 #endif
228
229 /*
230  * Macro for VMDq feature for 1 GbE NIC.
231  */
232 #define E1000_VMOLR_SIZE                        (8)
233 #define IGB_TSO_MAX_HDRLEN                      (512)
234 #define IGB_TSO_MAX_MSS                         (9216)
235
236 /*********************************************************************
237  *
238  *  TX function
239  *
240  **********************************************************************/
241
242 /*
243  *There're some limitations in hardware for TCP segmentation offload. We
244  *should check whether the parameters are valid.
245  */
246 static inline uint64_t
247 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
248 {
249         if (!(ol_req & PKT_TX_TCP_SEG))
250                 return ol_req;
251         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
252                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
253                 ol_req &= ~PKT_TX_TCP_SEG;
254                 ol_req |= PKT_TX_TCP_CKSUM;
255         }
256         return ol_req;
257 }
258
259 /*
260  * Advanced context descriptor are almost same between igb/ixgbe
261  * This is a separate function, looking for optimization opportunity here
262  * Rework required to go with the pre-defined values.
263  */
264
265 static inline void
266 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
267                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
268                 uint64_t ol_flags, union igb_tx_offload tx_offload)
269 {
270         uint32_t type_tucmd_mlhl;
271         uint32_t mss_l4len_idx;
272         uint32_t ctx_idx, ctx_curr;
273         uint32_t vlan_macip_lens;
274         union igb_tx_offload tx_offload_mask;
275
276         ctx_curr = txq->ctx_curr;
277         ctx_idx = ctx_curr + txq->ctx_start;
278
279         tx_offload_mask.data = 0;
280         type_tucmd_mlhl = 0;
281
282         /* Specify which HW CTX to upload. */
283         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
284
285         if (ol_flags & PKT_TX_VLAN_PKT)
286                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
287
288         /* check if TCP segmentation required for this packet */
289         if (ol_flags & PKT_TX_TCP_SEG) {
290                 /* implies IP cksum in IPv4 */
291                 if (ol_flags & PKT_TX_IP_CKSUM)
292                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
293                                 E1000_ADVTXD_TUCMD_L4T_TCP |
294                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
295                 else
296                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
297                                 E1000_ADVTXD_TUCMD_L4T_TCP |
298                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
299
300                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
301                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
302                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
303         } else { /* no TSO, check if hardware checksum is needed */
304                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
305                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
306
307                 if (ol_flags & PKT_TX_IP_CKSUM)
308                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
309
310                 switch (ol_flags & PKT_TX_L4_MASK) {
311                 case PKT_TX_UDP_CKSUM:
312                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
313                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
314                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
315                         break;
316                 case PKT_TX_TCP_CKSUM:
317                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
318                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
319                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
320                         break;
321                 case PKT_TX_SCTP_CKSUM:
322                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
323                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
324                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
325                         break;
326                 default:
327                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
328                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
329                         break;
330                 }
331         }
332
333         txq->ctx_cache[ctx_curr].flags = ol_flags;
334         txq->ctx_cache[ctx_idx].tx_offload.data =
335                 tx_offload_mask.data & tx_offload.data;
336         txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
337
338         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
339         vlan_macip_lens = (uint32_t)tx_offload.data;
340         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
341         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
342         ctx_txd->seqnum_seed = 0;
343 }
344
345 /*
346  * Check which hardware context can be used. Use the existing match
347  * or create a new context descriptor.
348  */
349 static inline uint32_t
350 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
351                 union igb_tx_offload tx_offload)
352 {
353         /* If match with the current context */
354         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
355                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
356                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
357                         return txq->ctx_curr;
358         }
359
360         /* If match with the second context */
361         txq->ctx_curr ^= 1;
362         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
363                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
364                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
365                         return txq->ctx_curr;
366         }
367
368         /* Mismatch, use the previous context */
369         return (IGB_CTX_NUM);
370 }
371
372 static inline uint32_t
373 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
374 {
375         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
376         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
377         uint32_t tmp;
378
379         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
380         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
381         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
382         return tmp;
383 }
384
385 static inline uint32_t
386 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
387 {
388         uint32_t cmdtype;
389         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
390         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
391         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
392         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
393         return cmdtype;
394 }
395
396 uint16_t
397 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
398                uint16_t nb_pkts)
399 {
400         struct igb_tx_queue *txq;
401         struct igb_tx_entry *sw_ring;
402         struct igb_tx_entry *txe, *txn;
403         volatile union e1000_adv_tx_desc *txr;
404         volatile union e1000_adv_tx_desc *txd;
405         struct rte_mbuf     *tx_pkt;
406         struct rte_mbuf     *m_seg;
407         uint64_t buf_dma_addr;
408         uint32_t olinfo_status;
409         uint32_t cmd_type_len;
410         uint32_t pkt_len;
411         uint16_t slen;
412         uint64_t ol_flags;
413         uint16_t tx_end;
414         uint16_t tx_id;
415         uint16_t tx_last;
416         uint16_t nb_tx;
417         uint64_t tx_ol_req;
418         uint32_t new_ctx = 0;
419         uint32_t ctx = 0;
420         union igb_tx_offload tx_offload = {0};
421
422         txq = tx_queue;
423         sw_ring = txq->sw_ring;
424         txr     = txq->tx_ring;
425         tx_id   = txq->tx_tail;
426         txe = &sw_ring[tx_id];
427
428         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
429                 tx_pkt = *tx_pkts++;
430                 pkt_len = tx_pkt->pkt_len;
431
432                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
433
434                 /*
435                  * The number of descriptors that must be allocated for a
436                  * packet is the number of segments of that packet, plus 1
437                  * Context Descriptor for the VLAN Tag Identifier, if any.
438                  * Determine the last TX descriptor to allocate in the TX ring
439                  * for the packet, starting from the current position (tx_id)
440                  * in the ring.
441                  */
442                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
443
444                 ol_flags = tx_pkt->ol_flags;
445                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
446
447                 /* If a Context Descriptor need be built . */
448                 if (tx_ol_req) {
449                         tx_offload.l2_len = tx_pkt->l2_len;
450                         tx_offload.l3_len = tx_pkt->l3_len;
451                         tx_offload.l4_len = tx_pkt->l4_len;
452                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
453                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
454                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
455
456                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
457                         /* Only allocate context descriptor if required*/
458                         new_ctx = (ctx == IGB_CTX_NUM);
459                         ctx = txq->ctx_curr;
460                         tx_last = (uint16_t) (tx_last + new_ctx);
461                 }
462                 if (tx_last >= txq->nb_tx_desc)
463                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
464
465                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
466                            " tx_first=%u tx_last=%u",
467                            (unsigned) txq->port_id,
468                            (unsigned) txq->queue_id,
469                            (unsigned) pkt_len,
470                            (unsigned) tx_id,
471                            (unsigned) tx_last);
472
473                 /*
474                  * Check if there are enough free descriptors in the TX ring
475                  * to transmit the next packet.
476                  * This operation is based on the two following rules:
477                  *
478                  *   1- Only check that the last needed TX descriptor can be
479                  *      allocated (by construction, if that descriptor is free,
480                  *      all intermediate ones are also free).
481                  *
482                  *      For this purpose, the index of the last TX descriptor
483                  *      used for a packet (the "last descriptor" of a packet)
484                  *      is recorded in the TX entries (the last one included)
485                  *      that are associated with all TX descriptors allocated
486                  *      for that packet.
487                  *
488                  *   2- Avoid to allocate the last free TX descriptor of the
489                  *      ring, in order to never set the TDT register with the
490                  *      same value stored in parallel by the NIC in the TDH
491                  *      register, which makes the TX engine of the NIC enter
492                  *      in a deadlock situation.
493                  *
494                  *      By extension, avoid to allocate a free descriptor that
495                  *      belongs to the last set of free descriptors allocated
496                  *      to the same packet previously transmitted.
497                  */
498
499                 /*
500                  * The "last descriptor" of the previously sent packet, if any,
501                  * which used the last descriptor to allocate.
502                  */
503                 tx_end = sw_ring[tx_last].last_id;
504
505                 /*
506                  * The next descriptor following that "last descriptor" in the
507                  * ring.
508                  */
509                 tx_end = sw_ring[tx_end].next_id;
510
511                 /*
512                  * The "last descriptor" associated with that next descriptor.
513                  */
514                 tx_end = sw_ring[tx_end].last_id;
515
516                 /*
517                  * Check that this descriptor is free.
518                  */
519                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
520                         if (nb_tx == 0)
521                                 return (0);
522                         goto end_of_tx;
523                 }
524
525                 /*
526                  * Set common flags of all TX Data Descriptors.
527                  *
528                  * The following bits must be set in all Data Descriptors:
529                  *   - E1000_ADVTXD_DTYP_DATA
530                  *   - E1000_ADVTXD_DCMD_DEXT
531                  *
532                  * The following bits must be set in the first Data Descriptor
533                  * and are ignored in the other ones:
534                  *   - E1000_ADVTXD_DCMD_IFCS
535                  *   - E1000_ADVTXD_MAC_1588
536                  *   - E1000_ADVTXD_DCMD_VLE
537                  *
538                  * The following bits must only be set in the last Data
539                  * Descriptor:
540                  *   - E1000_TXD_CMD_EOP
541                  *
542                  * The following bits can be set in any Data Descriptor, but
543                  * are only set in the last Data Descriptor:
544                  *   - E1000_TXD_CMD_RS
545                  */
546                 cmd_type_len = txq->txd_type |
547                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
548                 if (tx_ol_req & PKT_TX_TCP_SEG)
549                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
550                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
551 #if defined(RTE_LIBRTE_IEEE1588)
552                 if (ol_flags & PKT_TX_IEEE1588_TMST)
553                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
554 #endif
555                 if (tx_ol_req) {
556                         /* Setup TX Advanced context descriptor if required */
557                         if (new_ctx) {
558                                 volatile struct e1000_adv_tx_context_desc *
559                                     ctx_txd;
560
561                                 ctx_txd = (volatile struct
562                                     e1000_adv_tx_context_desc *)
563                                     &txr[tx_id];
564
565                                 txn = &sw_ring[txe->next_id];
566                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
567
568                                 if (txe->mbuf != NULL) {
569                                         rte_pktmbuf_free_seg(txe->mbuf);
570                                         txe->mbuf = NULL;
571                                 }
572
573                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
574
575                                 txe->last_id = tx_last;
576                                 tx_id = txe->next_id;
577                                 txe = txn;
578                         }
579
580                         /* Setup the TX Advanced Data Descriptor */
581                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
582                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
583                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
584                 }
585
586                 m_seg = tx_pkt;
587                 do {
588                         txn = &sw_ring[txe->next_id];
589                         txd = &txr[tx_id];
590
591                         if (txe->mbuf != NULL)
592                                 rte_pktmbuf_free_seg(txe->mbuf);
593                         txe->mbuf = m_seg;
594
595                         /*
596                          * Set up transmit descriptor.
597                          */
598                         slen = (uint16_t) m_seg->data_len;
599                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
600                         txd->read.buffer_addr =
601                                 rte_cpu_to_le_64(buf_dma_addr);
602                         txd->read.cmd_type_len =
603                                 rte_cpu_to_le_32(cmd_type_len | slen);
604                         txd->read.olinfo_status =
605                                 rte_cpu_to_le_32(olinfo_status);
606                         txe->last_id = tx_last;
607                         tx_id = txe->next_id;
608                         txe = txn;
609                         m_seg = m_seg->next;
610                 } while (m_seg != NULL);
611
612                 /*
613                  * The last packet data descriptor needs End Of Packet (EOP)
614                  * and Report Status (RS).
615                  */
616                 txd->read.cmd_type_len |=
617                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
618         }
619  end_of_tx:
620         rte_wmb();
621
622         /*
623          * Set the Transmit Descriptor Tail (TDT).
624          */
625         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
626         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
627                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
628                    (unsigned) tx_id, (unsigned) nb_tx);
629         txq->tx_tail = tx_id;
630
631         return (nb_tx);
632 }
633
634 /*********************************************************************
635  *
636  *  RX functions
637  *
638  **********************************************************************/
639 #define IGB_PACKET_TYPE_IPV4              0X01
640 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
641 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
642 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
643 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
644 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
645 #define IGB_PACKET_TYPE_IPV6              0X04
646 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
647 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
648 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
649 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
650 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
651 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
652 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
653 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
654 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
655 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
656 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
657 #define IGB_PACKET_TYPE_MAX               0X80
658 #define IGB_PACKET_TYPE_MASK              0X7F
659 #define IGB_PACKET_TYPE_SHIFT             0X04
660 static inline uint32_t
661 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
662 {
663         static const uint32_t
664                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
665                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
666                         RTE_PTYPE_L3_IPV4,
667                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
668                         RTE_PTYPE_L3_IPV4_EXT,
669                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
670                         RTE_PTYPE_L3_IPV6,
671                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
672                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673                         RTE_PTYPE_INNER_L3_IPV6,
674                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
675                         RTE_PTYPE_L3_IPV6_EXT,
676                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
677                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
678                         RTE_PTYPE_INNER_L3_IPV6_EXT,
679                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
680                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
681                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
682                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
683                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
684                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
686                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
687                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
688                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
689                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
690                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
691                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
692                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
693                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
694                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
695                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
696                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
698                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
700                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
703                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
705                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
706                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
707         };
708         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
709                 return RTE_PTYPE_UNKNOWN;
710
711         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
712
713         return ptype_table[pkt_info];
714 }
715
716 static inline uint64_t
717 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
718 {
719         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
720
721 #if defined(RTE_LIBRTE_IEEE1588)
722         static uint32_t ip_pkt_etqf_map[8] = {
723                 0, 0, 0, PKT_RX_IEEE1588_PTP,
724                 0, 0, 0, 0,
725         };
726
727         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
728         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
729
730         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
731         if (hw->mac.type == e1000_i210)
732                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
733         else
734                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
735 #endif
736
737         return pkt_flags;
738 }
739
740 static inline uint64_t
741 rx_desc_status_to_pkt_flags(uint32_t rx_status)
742 {
743         uint64_t pkt_flags;
744
745         /* Check if VLAN present */
746         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
747
748 #if defined(RTE_LIBRTE_IEEE1588)
749         if (rx_status & E1000_RXD_STAT_TMST)
750                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
751 #endif
752         return pkt_flags;
753 }
754
755 static inline uint64_t
756 rx_desc_error_to_pkt_flags(uint32_t rx_status)
757 {
758         /*
759          * Bit 30: IPE, IPv4 checksum error
760          * Bit 29: L4I, L4I integrity error
761          */
762
763         static uint64_t error_to_pkt_flags_map[4] = {
764                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
765                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
766         };
767         return error_to_pkt_flags_map[(rx_status >>
768                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
769 }
770
771 uint16_t
772 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
773                uint16_t nb_pkts)
774 {
775         struct igb_rx_queue *rxq;
776         volatile union e1000_adv_rx_desc *rx_ring;
777         volatile union e1000_adv_rx_desc *rxdp;
778         struct igb_rx_entry *sw_ring;
779         struct igb_rx_entry *rxe;
780         struct rte_mbuf *rxm;
781         struct rte_mbuf *nmb;
782         union e1000_adv_rx_desc rxd;
783         uint64_t dma_addr;
784         uint32_t staterr;
785         uint32_t hlen_type_rss;
786         uint16_t pkt_len;
787         uint16_t rx_id;
788         uint16_t nb_rx;
789         uint16_t nb_hold;
790         uint64_t pkt_flags;
791
792         nb_rx = 0;
793         nb_hold = 0;
794         rxq = rx_queue;
795         rx_id = rxq->rx_tail;
796         rx_ring = rxq->rx_ring;
797         sw_ring = rxq->sw_ring;
798         while (nb_rx < nb_pkts) {
799                 /*
800                  * The order of operations here is important as the DD status
801                  * bit must not be read after any other descriptor fields.
802                  * rx_ring and rxdp are pointing to volatile data so the order
803                  * of accesses cannot be reordered by the compiler. If they were
804                  * not volatile, they could be reordered which could lead to
805                  * using invalid descriptor fields when read from rxd.
806                  */
807                 rxdp = &rx_ring[rx_id];
808                 staterr = rxdp->wb.upper.status_error;
809                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
810                         break;
811                 rxd = *rxdp;
812
813                 /*
814                  * End of packet.
815                  *
816                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
817                  * likely to be invalid and to be dropped by the various
818                  * validation checks performed by the network stack.
819                  *
820                  * Allocate a new mbuf to replenish the RX ring descriptor.
821                  * If the allocation fails:
822                  *    - arrange for that RX descriptor to be the first one
823                  *      being parsed the next time the receive function is
824                  *      invoked [on the same queue].
825                  *
826                  *    - Stop parsing the RX ring and return immediately.
827                  *
828                  * This policy do not drop the packet received in the RX
829                  * descriptor for which the allocation of a new mbuf failed.
830                  * Thus, it allows that packet to be later retrieved if
831                  * mbuf have been freed in the mean time.
832                  * As a side effect, holding RX descriptors instead of
833                  * systematically giving them back to the NIC may lead to
834                  * RX ring exhaustion situations.
835                  * However, the NIC can gracefully prevent such situations
836                  * to happen by sending specific "back-pressure" flow control
837                  * frames to its peer(s).
838                  */
839                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
840                            "staterr=0x%x pkt_len=%u",
841                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
842                            (unsigned) rx_id, (unsigned) staterr,
843                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
844
845                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
846                 if (nmb == NULL) {
847                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
848                                    "queue_id=%u", (unsigned) rxq->port_id,
849                                    (unsigned) rxq->queue_id);
850                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
851                         break;
852                 }
853
854                 nb_hold++;
855                 rxe = &sw_ring[rx_id];
856                 rx_id++;
857                 if (rx_id == rxq->nb_rx_desc)
858                         rx_id = 0;
859
860                 /* Prefetch next mbuf while processing current one. */
861                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
862
863                 /*
864                  * When next RX descriptor is on a cache-line boundary,
865                  * prefetch the next 4 RX descriptors and the next 8 pointers
866                  * to mbufs.
867                  */
868                 if ((rx_id & 0x3) == 0) {
869                         rte_igb_prefetch(&rx_ring[rx_id]);
870                         rte_igb_prefetch(&sw_ring[rx_id]);
871                 }
872
873                 rxm = rxe->mbuf;
874                 rxe->mbuf = nmb;
875                 dma_addr =
876                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
877                 rxdp->read.hdr_addr = 0;
878                 rxdp->read.pkt_addr = dma_addr;
879
880                 /*
881                  * Initialize the returned mbuf.
882                  * 1) setup generic mbuf fields:
883                  *    - number of segments,
884                  *    - next segment,
885                  *    - packet length,
886                  *    - RX port identifier.
887                  * 2) integrate hardware offload data, if any:
888                  *    - RSS flag & hash,
889                  *    - IP checksum flag,
890                  *    - VLAN TCI, if any,
891                  *    - error flags.
892                  */
893                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
894                                       rxq->crc_len);
895                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
896                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
897                 rxm->nb_segs = 1;
898                 rxm->next = NULL;
899                 rxm->pkt_len = pkt_len;
900                 rxm->data_len = pkt_len;
901                 rxm->port = rxq->port_id;
902
903                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
904                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
905                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
906                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
907
908                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
909                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
910                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
911                 rxm->ol_flags = pkt_flags;
912                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
913                                                 lo_dword.hs_rss.pkt_info);
914
915                 /*
916                  * Store the mbuf address into the next entry of the array
917                  * of returned packets.
918                  */
919                 rx_pkts[nb_rx++] = rxm;
920         }
921         rxq->rx_tail = rx_id;
922
923         /*
924          * If the number of free RX descriptors is greater than the RX free
925          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
926          * register.
927          * Update the RDT with the value of the last processed RX descriptor
928          * minus 1, to guarantee that the RDT register is never equal to the
929          * RDH register, which creates a "full" ring situtation from the
930          * hardware point of view...
931          */
932         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
933         if (nb_hold > rxq->rx_free_thresh) {
934                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
935                            "nb_hold=%u nb_rx=%u",
936                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
937                            (unsigned) rx_id, (unsigned) nb_hold,
938                            (unsigned) nb_rx);
939                 rx_id = (uint16_t) ((rx_id == 0) ?
940                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
941                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
942                 nb_hold = 0;
943         }
944         rxq->nb_rx_hold = nb_hold;
945         return (nb_rx);
946 }
947
948 uint16_t
949 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
950                          uint16_t nb_pkts)
951 {
952         struct igb_rx_queue *rxq;
953         volatile union e1000_adv_rx_desc *rx_ring;
954         volatile union e1000_adv_rx_desc *rxdp;
955         struct igb_rx_entry *sw_ring;
956         struct igb_rx_entry *rxe;
957         struct rte_mbuf *first_seg;
958         struct rte_mbuf *last_seg;
959         struct rte_mbuf *rxm;
960         struct rte_mbuf *nmb;
961         union e1000_adv_rx_desc rxd;
962         uint64_t dma; /* Physical address of mbuf data buffer */
963         uint32_t staterr;
964         uint32_t hlen_type_rss;
965         uint16_t rx_id;
966         uint16_t nb_rx;
967         uint16_t nb_hold;
968         uint16_t data_len;
969         uint64_t pkt_flags;
970
971         nb_rx = 0;
972         nb_hold = 0;
973         rxq = rx_queue;
974         rx_id = rxq->rx_tail;
975         rx_ring = rxq->rx_ring;
976         sw_ring = rxq->sw_ring;
977
978         /*
979          * Retrieve RX context of current packet, if any.
980          */
981         first_seg = rxq->pkt_first_seg;
982         last_seg = rxq->pkt_last_seg;
983
984         while (nb_rx < nb_pkts) {
985         next_desc:
986                 /*
987                  * The order of operations here is important as the DD status
988                  * bit must not be read after any other descriptor fields.
989                  * rx_ring and rxdp are pointing to volatile data so the order
990                  * of accesses cannot be reordered by the compiler. If they were
991                  * not volatile, they could be reordered which could lead to
992                  * using invalid descriptor fields when read from rxd.
993                  */
994                 rxdp = &rx_ring[rx_id];
995                 staterr = rxdp->wb.upper.status_error;
996                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
997                         break;
998                 rxd = *rxdp;
999
1000                 /*
1001                  * Descriptor done.
1002                  *
1003                  * Allocate a new mbuf to replenish the RX ring descriptor.
1004                  * If the allocation fails:
1005                  *    - arrange for that RX descriptor to be the first one
1006                  *      being parsed the next time the receive function is
1007                  *      invoked [on the same queue].
1008                  *
1009                  *    - Stop parsing the RX ring and return immediately.
1010                  *
1011                  * This policy does not drop the packet received in the RX
1012                  * descriptor for which the allocation of a new mbuf failed.
1013                  * Thus, it allows that packet to be later retrieved if
1014                  * mbuf have been freed in the mean time.
1015                  * As a side effect, holding RX descriptors instead of
1016                  * systematically giving them back to the NIC may lead to
1017                  * RX ring exhaustion situations.
1018                  * However, the NIC can gracefully prevent such situations
1019                  * to happen by sending specific "back-pressure" flow control
1020                  * frames to its peer(s).
1021                  */
1022                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1023                            "staterr=0x%x data_len=%u",
1024                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1025                            (unsigned) rx_id, (unsigned) staterr,
1026                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1027
1028                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1029                 if (nmb == NULL) {
1030                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1031                                    "queue_id=%u", (unsigned) rxq->port_id,
1032                                    (unsigned) rxq->queue_id);
1033                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1034                         break;
1035                 }
1036
1037                 nb_hold++;
1038                 rxe = &sw_ring[rx_id];
1039                 rx_id++;
1040                 if (rx_id == rxq->nb_rx_desc)
1041                         rx_id = 0;
1042
1043                 /* Prefetch next mbuf while processing current one. */
1044                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1045
1046                 /*
1047                  * When next RX descriptor is on a cache-line boundary,
1048                  * prefetch the next 4 RX descriptors and the next 8 pointers
1049                  * to mbufs.
1050                  */
1051                 if ((rx_id & 0x3) == 0) {
1052                         rte_igb_prefetch(&rx_ring[rx_id]);
1053                         rte_igb_prefetch(&sw_ring[rx_id]);
1054                 }
1055
1056                 /*
1057                  * Update RX descriptor with the physical address of the new
1058                  * data buffer of the new allocated mbuf.
1059                  */
1060                 rxm = rxe->mbuf;
1061                 rxe->mbuf = nmb;
1062                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1063                 rxdp->read.pkt_addr = dma;
1064                 rxdp->read.hdr_addr = 0;
1065
1066                 /*
1067                  * Set data length & data buffer address of mbuf.
1068                  */
1069                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1070                 rxm->data_len = data_len;
1071                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1072
1073                 /*
1074                  * If this is the first buffer of the received packet,
1075                  * set the pointer to the first mbuf of the packet and
1076                  * initialize its context.
1077                  * Otherwise, update the total length and the number of segments
1078                  * of the current scattered packet, and update the pointer to
1079                  * the last mbuf of the current packet.
1080                  */
1081                 if (first_seg == NULL) {
1082                         first_seg = rxm;
1083                         first_seg->pkt_len = data_len;
1084                         first_seg->nb_segs = 1;
1085                 } else {
1086                         first_seg->pkt_len += data_len;
1087                         first_seg->nb_segs++;
1088                         last_seg->next = rxm;
1089                 }
1090
1091                 /*
1092                  * If this is not the last buffer of the received packet,
1093                  * update the pointer to the last mbuf of the current scattered
1094                  * packet and continue to parse the RX ring.
1095                  */
1096                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1097                         last_seg = rxm;
1098                         goto next_desc;
1099                 }
1100
1101                 /*
1102                  * This is the last buffer of the received packet.
1103                  * If the CRC is not stripped by the hardware:
1104                  *   - Subtract the CRC length from the total packet length.
1105                  *   - If the last buffer only contains the whole CRC or a part
1106                  *     of it, free the mbuf associated to the last buffer.
1107                  *     If part of the CRC is also contained in the previous
1108                  *     mbuf, subtract the length of that CRC part from the
1109                  *     data length of the previous mbuf.
1110                  */
1111                 rxm->next = NULL;
1112                 if (unlikely(rxq->crc_len > 0)) {
1113                         first_seg->pkt_len -= ETHER_CRC_LEN;
1114                         if (data_len <= ETHER_CRC_LEN) {
1115                                 rte_pktmbuf_free_seg(rxm);
1116                                 first_seg->nb_segs--;
1117                                 last_seg->data_len = (uint16_t)
1118                                         (last_seg->data_len -
1119                                          (ETHER_CRC_LEN - data_len));
1120                                 last_seg->next = NULL;
1121                         } else
1122                                 rxm->data_len =
1123                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1124                 }
1125
1126                 /*
1127                  * Initialize the first mbuf of the returned packet:
1128                  *    - RX port identifier,
1129                  *    - hardware offload data, if any:
1130                  *      - RSS flag & hash,
1131                  *      - IP checksum flag,
1132                  *      - VLAN TCI, if any,
1133                  *      - error flags.
1134                  */
1135                 first_seg->port = rxq->port_id;
1136                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1137
1138                 /*
1139                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1140                  * set in the pkt_flags field.
1141                  */
1142                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1143                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1144                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1145                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1146                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1147                 first_seg->ol_flags = pkt_flags;
1148                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1149                                         lower.lo_dword.hs_rss.pkt_info);
1150
1151                 /* Prefetch data of first segment, if configured to do so. */
1152                 rte_packet_prefetch((char *)first_seg->buf_addr +
1153                         first_seg->data_off);
1154
1155                 /*
1156                  * Store the mbuf address into the next entry of the array
1157                  * of returned packets.
1158                  */
1159                 rx_pkts[nb_rx++] = first_seg;
1160
1161                 /*
1162                  * Setup receipt context for a new packet.
1163                  */
1164                 first_seg = NULL;
1165         }
1166
1167         /*
1168          * Record index of the next RX descriptor to probe.
1169          */
1170         rxq->rx_tail = rx_id;
1171
1172         /*
1173          * Save receive context.
1174          */
1175         rxq->pkt_first_seg = first_seg;
1176         rxq->pkt_last_seg = last_seg;
1177
1178         /*
1179          * If the number of free RX descriptors is greater than the RX free
1180          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1181          * register.
1182          * Update the RDT with the value of the last processed RX descriptor
1183          * minus 1, to guarantee that the RDT register is never equal to the
1184          * RDH register, which creates a "full" ring situtation from the
1185          * hardware point of view...
1186          */
1187         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1188         if (nb_hold > rxq->rx_free_thresh) {
1189                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1190                            "nb_hold=%u nb_rx=%u",
1191                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1192                            (unsigned) rx_id, (unsigned) nb_hold,
1193                            (unsigned) nb_rx);
1194                 rx_id = (uint16_t) ((rx_id == 0) ?
1195                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1196                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1197                 nb_hold = 0;
1198         }
1199         rxq->nb_rx_hold = nb_hold;
1200         return (nb_rx);
1201 }
1202
1203 /*
1204  * Rings setup and release.
1205  *
1206  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1207  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1208  * This will also optimize cache line size effect.
1209  * H/W supports up to cache line size 128.
1210  */
1211 #define IGB_ALIGN 128
1212
1213 /*
1214  * Maximum number of Ring Descriptors.
1215  *
1216  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1217  * desscriptors should meet the following condition:
1218  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1219  */
1220 #define IGB_MIN_RING_DESC 32
1221 #define IGB_MAX_RING_DESC 4096
1222
1223 static const struct rte_memzone *
1224 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1225                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1226 {
1227         char z_name[RTE_MEMZONE_NAMESIZE];
1228         const struct rte_memzone *mz;
1229
1230         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1231                         dev->driver->pci_drv.name, ring_name,
1232                                 dev->data->port_id, queue_id);
1233         mz = rte_memzone_lookup(z_name);
1234         if (mz)
1235                 return mz;
1236
1237 #ifdef RTE_LIBRTE_XEN_DOM0
1238         return rte_memzone_reserve_bounded(z_name, ring_size,
1239                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1240 #else
1241         return rte_memzone_reserve_aligned(z_name, ring_size,
1242                         socket_id, 0, IGB_ALIGN);
1243 #endif
1244 }
1245
1246 static void
1247 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1248 {
1249         unsigned i;
1250
1251         if (txq->sw_ring != NULL) {
1252                 for (i = 0; i < txq->nb_tx_desc; i++) {
1253                         if (txq->sw_ring[i].mbuf != NULL) {
1254                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1255                                 txq->sw_ring[i].mbuf = NULL;
1256                         }
1257                 }
1258         }
1259 }
1260
1261 static void
1262 igb_tx_queue_release(struct igb_tx_queue *txq)
1263 {
1264         if (txq != NULL) {
1265                 igb_tx_queue_release_mbufs(txq);
1266                 rte_free(txq->sw_ring);
1267                 rte_free(txq);
1268         }
1269 }
1270
1271 void
1272 eth_igb_tx_queue_release(void *txq)
1273 {
1274         igb_tx_queue_release(txq);
1275 }
1276
1277 static void
1278 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1279 {
1280         txq->tx_head = 0;
1281         txq->tx_tail = 0;
1282         txq->ctx_curr = 0;
1283         memset((void*)&txq->ctx_cache, 0,
1284                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1285 }
1286
1287 static void
1288 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1289 {
1290         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1291         struct igb_tx_entry *txe = txq->sw_ring;
1292         uint16_t i, prev;
1293         struct e1000_hw *hw;
1294
1295         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1296         /* Zero out HW ring memory */
1297         for (i = 0; i < txq->nb_tx_desc; i++) {
1298                 txq->tx_ring[i] = zeroed_desc;
1299         }
1300
1301         /* Initialize ring entries */
1302         prev = (uint16_t)(txq->nb_tx_desc - 1);
1303         for (i = 0; i < txq->nb_tx_desc; i++) {
1304                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1305
1306                 txd->wb.status = E1000_TXD_STAT_DD;
1307                 txe[i].mbuf = NULL;
1308                 txe[i].last_id = i;
1309                 txe[prev].next_id = i;
1310                 prev = i;
1311         }
1312
1313         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1314         /* 82575 specific, each tx queue will use 2 hw contexts */
1315         if (hw->mac.type == e1000_82575)
1316                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1317
1318         igb_reset_tx_queue_stat(txq);
1319 }
1320
1321 int
1322 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1323                          uint16_t queue_idx,
1324                          uint16_t nb_desc,
1325                          unsigned int socket_id,
1326                          const struct rte_eth_txconf *tx_conf)
1327 {
1328         const struct rte_memzone *tz;
1329         struct igb_tx_queue *txq;
1330         struct e1000_hw     *hw;
1331         uint32_t size;
1332
1333         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1334
1335         /*
1336          * Validate number of transmit descriptors.
1337          * It must not exceed hardware maximum, and must be multiple
1338          * of IGB_ALIGN.
1339          */
1340         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1341             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1342                 return -EINVAL;
1343         }
1344
1345         /*
1346          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1347          * driver.
1348          */
1349         if (tx_conf->tx_free_thresh != 0)
1350                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1351                              "used for the 1G driver.");
1352         if (tx_conf->tx_rs_thresh != 0)
1353                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1354                              "used for the 1G driver.");
1355         if (tx_conf->tx_thresh.wthresh == 0)
1356                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1357                              "consider setting the TX WTHRESH value to 4, 8, "
1358                              "or 16.");
1359
1360         /* Free memory prior to re-allocation if needed */
1361         if (dev->data->tx_queues[queue_idx] != NULL) {
1362                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1363                 dev->data->tx_queues[queue_idx] = NULL;
1364         }
1365
1366         /* First allocate the tx queue data structure */
1367         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1368                                                         RTE_CACHE_LINE_SIZE);
1369         if (txq == NULL)
1370                 return (-ENOMEM);
1371
1372         /*
1373          * Allocate TX ring hardware descriptors. A memzone large enough to
1374          * handle the maximum ring size is allocated in order to allow for
1375          * resizing in later calls to the queue setup function.
1376          */
1377         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1378         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1379                                         size, socket_id);
1380         if (tz == NULL) {
1381                 igb_tx_queue_release(txq);
1382                 return (-ENOMEM);
1383         }
1384
1385         txq->nb_tx_desc = nb_desc;
1386         txq->pthresh = tx_conf->tx_thresh.pthresh;
1387         txq->hthresh = tx_conf->tx_thresh.hthresh;
1388         txq->wthresh = tx_conf->tx_thresh.wthresh;
1389         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1390                 txq->wthresh = 1;
1391         txq->queue_id = queue_idx;
1392         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1393                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1394         txq->port_id = dev->data->port_id;
1395
1396         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1397 #ifndef RTE_LIBRTE_XEN_DOM0
1398         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1399 #else
1400         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1401 #endif
1402          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1403         /* Allocate software ring */
1404         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1405                                    sizeof(struct igb_tx_entry) * nb_desc,
1406                                    RTE_CACHE_LINE_SIZE);
1407         if (txq->sw_ring == NULL) {
1408                 igb_tx_queue_release(txq);
1409                 return (-ENOMEM);
1410         }
1411         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1412                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1413
1414         igb_reset_tx_queue(txq, dev);
1415         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1416         dev->data->tx_queues[queue_idx] = txq;
1417
1418         return (0);
1419 }
1420
1421 static void
1422 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1423 {
1424         unsigned i;
1425
1426         if (rxq->sw_ring != NULL) {
1427                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1428                         if (rxq->sw_ring[i].mbuf != NULL) {
1429                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1430                                 rxq->sw_ring[i].mbuf = NULL;
1431                         }
1432                 }
1433         }
1434 }
1435
1436 static void
1437 igb_rx_queue_release(struct igb_rx_queue *rxq)
1438 {
1439         if (rxq != NULL) {
1440                 igb_rx_queue_release_mbufs(rxq);
1441                 rte_free(rxq->sw_ring);
1442                 rte_free(rxq);
1443         }
1444 }
1445
1446 void
1447 eth_igb_rx_queue_release(void *rxq)
1448 {
1449         igb_rx_queue_release(rxq);
1450 }
1451
1452 static void
1453 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1454 {
1455         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1456         unsigned i;
1457
1458         /* Zero out HW ring memory */
1459         for (i = 0; i < rxq->nb_rx_desc; i++) {
1460                 rxq->rx_ring[i] = zeroed_desc;
1461         }
1462
1463         rxq->rx_tail = 0;
1464         rxq->pkt_first_seg = NULL;
1465         rxq->pkt_last_seg = NULL;
1466 }
1467
1468 int
1469 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1470                          uint16_t queue_idx,
1471                          uint16_t nb_desc,
1472                          unsigned int socket_id,
1473                          const struct rte_eth_rxconf *rx_conf,
1474                          struct rte_mempool *mp)
1475 {
1476         const struct rte_memzone *rz;
1477         struct igb_rx_queue *rxq;
1478         struct e1000_hw     *hw;
1479         unsigned int size;
1480
1481         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1482
1483         /*
1484          * Validate number of receive descriptors.
1485          * It must not exceed hardware maximum, and must be multiple
1486          * of IGB_ALIGN.
1487          */
1488         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1489             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1490                 return (-EINVAL);
1491         }
1492
1493         /* Free memory prior to re-allocation if needed */
1494         if (dev->data->rx_queues[queue_idx] != NULL) {
1495                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1496                 dev->data->rx_queues[queue_idx] = NULL;
1497         }
1498
1499         /* First allocate the RX queue data structure. */
1500         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1501                           RTE_CACHE_LINE_SIZE);
1502         if (rxq == NULL)
1503                 return (-ENOMEM);
1504         rxq->mb_pool = mp;
1505         rxq->nb_rx_desc = nb_desc;
1506         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1507         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1508         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1509         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1510                 rxq->wthresh = 1;
1511         rxq->drop_en = rx_conf->rx_drop_en;
1512         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1513         rxq->queue_id = queue_idx;
1514         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1515                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1516         rxq->port_id = dev->data->port_id;
1517         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1518                                   ETHER_CRC_LEN);
1519
1520         /*
1521          *  Allocate RX ring hardware descriptors. A memzone large enough to
1522          *  handle the maximum ring size is allocated in order to allow for
1523          *  resizing in later calls to the queue setup function.
1524          */
1525         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1526         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1527         if (rz == NULL) {
1528                 igb_rx_queue_release(rxq);
1529                 return (-ENOMEM);
1530         }
1531         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1532         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1533 #ifndef RTE_LIBRTE_XEN_DOM0
1534         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1535 #else
1536         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1537 #endif
1538         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1539
1540         /* Allocate software ring. */
1541         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1542                                    sizeof(struct igb_rx_entry) * nb_desc,
1543                                    RTE_CACHE_LINE_SIZE);
1544         if (rxq->sw_ring == NULL) {
1545                 igb_rx_queue_release(rxq);
1546                 return (-ENOMEM);
1547         }
1548         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1549                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1550
1551         dev->data->rx_queues[queue_idx] = rxq;
1552         igb_reset_rx_queue(rxq);
1553
1554         return 0;
1555 }
1556
1557 uint32_t
1558 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1559 {
1560 #define IGB_RXQ_SCAN_INTERVAL 4
1561         volatile union e1000_adv_rx_desc *rxdp;
1562         struct igb_rx_queue *rxq;
1563         uint32_t desc = 0;
1564
1565         if (rx_queue_id >= dev->data->nb_rx_queues) {
1566                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1567                 return 0;
1568         }
1569
1570         rxq = dev->data->rx_queues[rx_queue_id];
1571         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1572
1573         while ((desc < rxq->nb_rx_desc) &&
1574                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1575                 desc += IGB_RXQ_SCAN_INTERVAL;
1576                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1577                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1578                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1579                                 desc - rxq->nb_rx_desc]);
1580         }
1581
1582         return 0;
1583 }
1584
1585 int
1586 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1587 {
1588         volatile union e1000_adv_rx_desc *rxdp;
1589         struct igb_rx_queue *rxq = rx_queue;
1590         uint32_t desc;
1591
1592         if (unlikely(offset >= rxq->nb_rx_desc))
1593                 return 0;
1594         desc = rxq->rx_tail + offset;
1595         if (desc >= rxq->nb_rx_desc)
1596                 desc -= rxq->nb_rx_desc;
1597
1598         rxdp = &rxq->rx_ring[desc];
1599         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1600 }
1601
1602 void
1603 igb_dev_clear_queues(struct rte_eth_dev *dev)
1604 {
1605         uint16_t i;
1606         struct igb_tx_queue *txq;
1607         struct igb_rx_queue *rxq;
1608
1609         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1610                 txq = dev->data->tx_queues[i];
1611                 if (txq != NULL) {
1612                         igb_tx_queue_release_mbufs(txq);
1613                         igb_reset_tx_queue(txq, dev);
1614                 }
1615         }
1616
1617         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1618                 rxq = dev->data->rx_queues[i];
1619                 if (rxq != NULL) {
1620                         igb_rx_queue_release_mbufs(rxq);
1621                         igb_reset_rx_queue(rxq);
1622                 }
1623         }
1624 }
1625
1626 void
1627 igb_dev_free_queues(struct rte_eth_dev *dev)
1628 {
1629         uint16_t i;
1630
1631         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1632                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1633                 dev->data->rx_queues[i] = NULL;
1634         }
1635         dev->data->nb_rx_queues = 0;
1636
1637         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1638                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1639                 dev->data->tx_queues[i] = NULL;
1640         }
1641         dev->data->nb_tx_queues = 0;
1642 }
1643
1644 /**
1645  * Receive Side Scaling (RSS).
1646  * See section 7.1.1.7 in the following document:
1647  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1648  *
1649  * Principles:
1650  * The source and destination IP addresses of the IP header and the source and
1651  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1652  * against a configurable random key to compute a 32-bit RSS hash result.
1653  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1654  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1655  * RSS output index which is used as the RX queue index where to store the
1656  * received packets.
1657  * The following output is supplied in the RX write-back descriptor:
1658  *     - 32-bit result of the Microsoft RSS hash function,
1659  *     - 4-bit RSS type field.
1660  */
1661
1662 /*
1663  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1664  * Used as the default key.
1665  */
1666 static uint8_t rss_intel_key[40] = {
1667         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1668         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1669         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1670         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1671         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1672 };
1673
1674 static void
1675 igb_rss_disable(struct rte_eth_dev *dev)
1676 {
1677         struct e1000_hw *hw;
1678         uint32_t mrqc;
1679
1680         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1681         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1682         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1683         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1684 }
1685
1686 static void
1687 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1688 {
1689         uint8_t  *hash_key;
1690         uint32_t rss_key;
1691         uint32_t mrqc;
1692         uint64_t rss_hf;
1693         uint16_t i;
1694
1695         hash_key = rss_conf->rss_key;
1696         if (hash_key != NULL) {
1697                 /* Fill in RSS hash key */
1698                 for (i = 0; i < 10; i++) {
1699                         rss_key  = hash_key[(i * 4)];
1700                         rss_key |= hash_key[(i * 4) + 1] << 8;
1701                         rss_key |= hash_key[(i * 4) + 2] << 16;
1702                         rss_key |= hash_key[(i * 4) + 3] << 24;
1703                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1704                 }
1705         }
1706
1707         /* Set configured hashing protocols in MRQC register */
1708         rss_hf = rss_conf->rss_hf;
1709         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1710         if (rss_hf & ETH_RSS_IPV4)
1711                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1712         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1713                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1714         if (rss_hf & ETH_RSS_IPV6)
1715                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1716         if (rss_hf & ETH_RSS_IPV6_EX)
1717                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1718         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1719                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1720         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1721                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1722         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1723                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1724         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1725                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1726         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1727                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1728         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1729 }
1730
1731 int
1732 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1733                         struct rte_eth_rss_conf *rss_conf)
1734 {
1735         struct e1000_hw *hw;
1736         uint32_t mrqc;
1737         uint64_t rss_hf;
1738
1739         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1740
1741         /*
1742          * Before changing anything, first check that the update RSS operation
1743          * does not attempt to disable RSS, if RSS was enabled at
1744          * initialization time, or does not attempt to enable RSS, if RSS was
1745          * disabled at initialization time.
1746          */
1747         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1748         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1749         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1750                 if (rss_hf != 0) /* Enable RSS */
1751                         return -(EINVAL);
1752                 return 0; /* Nothing to do */
1753         }
1754         /* RSS enabled */
1755         if (rss_hf == 0) /* Disable RSS */
1756                 return -(EINVAL);
1757         igb_hw_rss_hash_set(hw, rss_conf);
1758         return 0;
1759 }
1760
1761 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1762                               struct rte_eth_rss_conf *rss_conf)
1763 {
1764         struct e1000_hw *hw;
1765         uint8_t *hash_key;
1766         uint32_t rss_key;
1767         uint32_t mrqc;
1768         uint64_t rss_hf;
1769         uint16_t i;
1770
1771         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772         hash_key = rss_conf->rss_key;
1773         if (hash_key != NULL) {
1774                 /* Return RSS hash key */
1775                 for (i = 0; i < 10; i++) {
1776                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1777                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1778                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1779                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1780                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1781                 }
1782         }
1783
1784         /* Get RSS functions configured in MRQC register */
1785         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1786         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1787                 rss_conf->rss_hf = 0;
1788                 return 0;
1789         }
1790         rss_hf = 0;
1791         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1792                 rss_hf |= ETH_RSS_IPV4;
1793         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1794                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1795         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1796                 rss_hf |= ETH_RSS_IPV6;
1797         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1798                 rss_hf |= ETH_RSS_IPV6_EX;
1799         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1800                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1801         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1802                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1803         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1804                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1805         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1806                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1807         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1808                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1809         rss_conf->rss_hf = rss_hf;
1810         return 0;
1811 }
1812
1813 static void
1814 igb_rss_configure(struct rte_eth_dev *dev)
1815 {
1816         struct rte_eth_rss_conf rss_conf;
1817         struct e1000_hw *hw;
1818         uint32_t shift;
1819         uint16_t i;
1820
1821         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822
1823         /* Fill in redirection table. */
1824         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1825         for (i = 0; i < 128; i++) {
1826                 union e1000_reta {
1827                         uint32_t dword;
1828                         uint8_t  bytes[4];
1829                 } reta;
1830                 uint8_t q_idx;
1831
1832                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1833                                    i % dev->data->nb_rx_queues : 0);
1834                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1835                 if ((i & 3) == 3)
1836                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1837         }
1838
1839         /*
1840          * Configure the RSS key and the RSS protocols used to compute
1841          * the RSS hash of input packets.
1842          */
1843         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1844         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1845                 igb_rss_disable(dev);
1846                 return;
1847         }
1848         if (rss_conf.rss_key == NULL)
1849                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1850         igb_hw_rss_hash_set(hw, &rss_conf);
1851 }
1852
1853 /*
1854  * Check if the mac type support VMDq or not.
1855  * Return 1 if it supports, otherwise, return 0.
1856  */
1857 static int
1858 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1859 {
1860         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1861
1862         switch (hw->mac.type) {
1863         case e1000_82576:
1864         case e1000_82580:
1865         case e1000_i350:
1866                 return 1;
1867         case e1000_82540:
1868         case e1000_82541:
1869         case e1000_82542:
1870         case e1000_82543:
1871         case e1000_82544:
1872         case e1000_82545:
1873         case e1000_82546:
1874         case e1000_82547:
1875         case e1000_82571:
1876         case e1000_82572:
1877         case e1000_82573:
1878         case e1000_82574:
1879         case e1000_82583:
1880         case e1000_i210:
1881         case e1000_i211:
1882         default:
1883                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1884                 return 0;
1885         }
1886 }
1887
1888 static int
1889 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1890 {
1891         struct rte_eth_vmdq_rx_conf *cfg;
1892         struct e1000_hw *hw;
1893         uint32_t mrqc, vt_ctl, vmolr, rctl;
1894         int i;
1895
1896         PMD_INIT_FUNC_TRACE();
1897
1898         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1900
1901         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1902         if (igb_is_vmdq_supported(dev) == 0)
1903                 return -1;
1904
1905         igb_rss_disable(dev);
1906
1907         /* RCTL: eanble VLAN filter */
1908         rctl = E1000_READ_REG(hw, E1000_RCTL);
1909         rctl |= E1000_RCTL_VFE;
1910         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1911
1912         /* MRQC: enable vmdq */
1913         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1914         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1915         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1916
1917         /* VTCTL:  pool selection according to VLAN tag */
1918         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1919         if (cfg->enable_default_pool)
1920                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1921         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1922         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1923
1924         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1925                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1926                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1927                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1928                         E1000_VMOLR_MPME);
1929
1930                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1931                         vmolr |= E1000_VMOLR_AUPE;
1932                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1933                         vmolr |= E1000_VMOLR_ROMPE;
1934                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1935                         vmolr |= E1000_VMOLR_ROPE;
1936                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1937                         vmolr |= E1000_VMOLR_BAM;
1938                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1939                         vmolr |= E1000_VMOLR_MPME;
1940
1941                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1942         }
1943
1944         /*
1945          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1946          * Both 82576 and 82580 support it
1947          */
1948         if (hw->mac.type != e1000_i350) {
1949                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1950                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1951                         vmolr |= E1000_VMOLR_STRVLAN;
1952                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1953                 }
1954         }
1955
1956         /* VFTA - enable all vlan filters */
1957         for (i = 0; i < IGB_VFTA_SIZE; i++)
1958                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1959
1960         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1961         if (hw->mac.type != e1000_82580)
1962                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1963
1964         /*
1965          * RAH/RAL - allow pools to read specific mac addresses
1966          * In this case, all pools should be able to read from mac addr 0
1967          */
1968         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1969         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1970
1971         /* VLVF: set up filters for vlan tags as configured */
1972         for (i = 0; i < cfg->nb_pool_maps; i++) {
1973                 /* set vlan id in VF register and set the valid bit */
1974                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1975                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1976                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1977                         E1000_VLVF_POOLSEL_MASK)));
1978         }
1979
1980         E1000_WRITE_FLUSH(hw);
1981
1982         return 0;
1983 }
1984
1985
1986 /*********************************************************************
1987  *
1988  *  Enable receive unit.
1989  *
1990  **********************************************************************/
1991
1992 static int
1993 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1994 {
1995         struct igb_rx_entry *rxe = rxq->sw_ring;
1996         uint64_t dma_addr;
1997         unsigned i;
1998
1999         /* Initialize software ring entries. */
2000         for (i = 0; i < rxq->nb_rx_desc; i++) {
2001                 volatile union e1000_adv_rx_desc *rxd;
2002                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
2003
2004                 if (mbuf == NULL) {
2005                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2006                                      "queue_id=%hu", rxq->queue_id);
2007                         return (-ENOMEM);
2008                 }
2009                 dma_addr =
2010                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
2011                 rxd = &rxq->rx_ring[i];
2012                 rxd->read.hdr_addr = 0;
2013                 rxd->read.pkt_addr = dma_addr;
2014                 rxe[i].mbuf = mbuf;
2015         }
2016
2017         return 0;
2018 }
2019
2020 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2021 static int
2022 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2023 {
2024         struct e1000_hw *hw =
2025                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2026         uint32_t mrqc;
2027
2028         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2029                 /*
2030                  * SRIOV active scheme
2031                  * FIXME if support RSS together with VMDq & SRIOV
2032                  */
2033                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2034                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2035                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2036                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2037         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2038                 /*
2039                  * SRIOV inactive scheme
2040                  */
2041                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2042                         case ETH_MQ_RX_RSS:
2043                                 igb_rss_configure(dev);
2044                                 break;
2045                         case ETH_MQ_RX_VMDQ_ONLY:
2046                                 /*Configure general VMDQ only RX parameters*/
2047                                 igb_vmdq_rx_hw_configure(dev);
2048                                 break;
2049                         case ETH_MQ_RX_NONE:
2050                                 /* if mq_mode is none, disable rss mode.*/
2051                         default:
2052                                 igb_rss_disable(dev);
2053                                 break;
2054                 }
2055         }
2056
2057         return 0;
2058 }
2059
2060 int
2061 eth_igb_rx_init(struct rte_eth_dev *dev)
2062 {
2063         struct e1000_hw     *hw;
2064         struct igb_rx_queue *rxq;
2065         uint32_t rctl;
2066         uint32_t rxcsum;
2067         uint32_t srrctl;
2068         uint16_t buf_size;
2069         uint16_t rctl_bsize;
2070         uint16_t i;
2071         int ret;
2072
2073         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2074         srrctl = 0;
2075
2076         /*
2077          * Make sure receives are disabled while setting
2078          * up the descriptor ring.
2079          */
2080         rctl = E1000_READ_REG(hw, E1000_RCTL);
2081         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2082
2083         /*
2084          * Configure support of jumbo frames, if any.
2085          */
2086         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2087                 rctl |= E1000_RCTL_LPE;
2088
2089                 /*
2090                  * Set maximum packet length by default, and might be updated
2091                  * together with enabling/disabling dual VLAN.
2092                  */
2093                 E1000_WRITE_REG(hw, E1000_RLPML,
2094                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2095                                                 VLAN_TAG_SIZE);
2096         } else
2097                 rctl &= ~E1000_RCTL_LPE;
2098
2099         /* Configure and enable each RX queue. */
2100         rctl_bsize = 0;
2101         dev->rx_pkt_burst = eth_igb_recv_pkts;
2102         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2103                 uint64_t bus_addr;
2104                 uint32_t rxdctl;
2105
2106                 rxq = dev->data->rx_queues[i];
2107
2108                 /* Allocate buffers for descriptor rings and set up queue */
2109                 ret = igb_alloc_rx_queue_mbufs(rxq);
2110                 if (ret)
2111                         return ret;
2112
2113                 /*
2114                  * Reset crc_len in case it was changed after queue setup by a
2115                  *  call to configure
2116                  */
2117                 rxq->crc_len =
2118                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2119                                                         0 : ETHER_CRC_LEN);
2120
2121                 bus_addr = rxq->rx_ring_phys_addr;
2122                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2123                                 rxq->nb_rx_desc *
2124                                 sizeof(union e1000_adv_rx_desc));
2125                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2126                                 (uint32_t)(bus_addr >> 32));
2127                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2128
2129                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2130
2131                 /*
2132                  * Configure RX buffer size.
2133                  */
2134                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2135                         RTE_PKTMBUF_HEADROOM);
2136                 if (buf_size >= 1024) {
2137                         /*
2138                          * Configure the BSIZEPACKET field of the SRRCTL
2139                          * register of the queue.
2140                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2141                          * If this field is equal to 0b, then RCTL.BSIZE
2142                          * determines the RX packet buffer size.
2143                          */
2144                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2145                                    E1000_SRRCTL_BSIZEPKT_MASK);
2146                         buf_size = (uint16_t) ((srrctl &
2147                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2148                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2149
2150                         /* It adds dual VLAN length for supporting dual VLAN */
2151                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2152                                                 2 * VLAN_TAG_SIZE) > buf_size){
2153                                 if (!dev->data->scattered_rx)
2154                                         PMD_INIT_LOG(DEBUG,
2155                                                      "forcing scatter mode");
2156                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2157                                 dev->data->scattered_rx = 1;
2158                         }
2159                 } else {
2160                         /*
2161                          * Use BSIZE field of the device RCTL register.
2162                          */
2163                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2164                                 rctl_bsize = buf_size;
2165                         if (!dev->data->scattered_rx)
2166                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2167                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2168                         dev->data->scattered_rx = 1;
2169                 }
2170
2171                 /* Set if packets are dropped when no descriptors available */
2172                 if (rxq->drop_en)
2173                         srrctl |= E1000_SRRCTL_DROP_EN;
2174
2175                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2176
2177                 /* Enable this RX queue. */
2178                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2179                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2180                 rxdctl &= 0xFFF00000;
2181                 rxdctl |= (rxq->pthresh & 0x1F);
2182                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2183                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2184                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2185         }
2186
2187         if (dev->data->dev_conf.rxmode.enable_scatter) {
2188                 if (!dev->data->scattered_rx)
2189                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2190                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2191                 dev->data->scattered_rx = 1;
2192         }
2193
2194         /*
2195          * Setup BSIZE field of RCTL register, if needed.
2196          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2197          * register, since the code above configures the SRRCTL register of
2198          * the RX queue in such a case.
2199          * All configurable sizes are:
2200          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2201          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2202          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2203          *  2048: rctl |= E1000_RCTL_SZ_2048;
2204          *  1024: rctl |= E1000_RCTL_SZ_1024;
2205          *   512: rctl |= E1000_RCTL_SZ_512;
2206          *   256: rctl |= E1000_RCTL_SZ_256;
2207          */
2208         if (rctl_bsize > 0) {
2209                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2210                         rctl |= E1000_RCTL_SZ_512;
2211                 else /* 256 <= buf_size < 512 - use 256 */
2212                         rctl |= E1000_RCTL_SZ_256;
2213         }
2214
2215         /*
2216          * Configure RSS if device configured with multiple RX queues.
2217          */
2218         igb_dev_mq_rx_configure(dev);
2219
2220         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2221         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2222
2223         /*
2224          * Setup the Checksum Register.
2225          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2226          */
2227         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2228         rxcsum |= E1000_RXCSUM_PCSD;
2229
2230         /* Enable both L3/L4 rx checksum offload */
2231         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2232                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2233         else
2234                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2235         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2236
2237         /* Setup the Receive Control Register. */
2238         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2239                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2240
2241                 /* set STRCRC bit in all queues */
2242                 if (hw->mac.type == e1000_i350 ||
2243                     hw->mac.type == e1000_i210 ||
2244                     hw->mac.type == e1000_i211 ||
2245                     hw->mac.type == e1000_i354) {
2246                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2247                                 rxq = dev->data->rx_queues[i];
2248                                 uint32_t dvmolr = E1000_READ_REG(hw,
2249                                         E1000_DVMOLR(rxq->reg_idx));
2250                                 dvmolr |= E1000_DVMOLR_STRCRC;
2251                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2252                         }
2253                 }
2254         } else {
2255                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2256
2257                 /* clear STRCRC bit in all queues */
2258                 if (hw->mac.type == e1000_i350 ||
2259                     hw->mac.type == e1000_i210 ||
2260                     hw->mac.type == e1000_i211 ||
2261                     hw->mac.type == e1000_i354) {
2262                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2263                                 rxq = dev->data->rx_queues[i];
2264                                 uint32_t dvmolr = E1000_READ_REG(hw,
2265                                         E1000_DVMOLR(rxq->reg_idx));
2266                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2267                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2268                         }
2269                 }
2270         }
2271
2272         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2273         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2274                 E1000_RCTL_RDMTS_HALF |
2275                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2276
2277         /* Make sure VLAN Filters are off. */
2278         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2279                 rctl &= ~E1000_RCTL_VFE;
2280         /* Don't store bad packets. */
2281         rctl &= ~E1000_RCTL_SBP;
2282
2283         /* Enable Receives. */
2284         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2285
2286         /*
2287          * Setup the HW Rx Head and Tail Descriptor Pointers.
2288          * This needs to be done after enable.
2289          */
2290         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2291                 rxq = dev->data->rx_queues[i];
2292                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2293                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2294         }
2295
2296         return 0;
2297 }
2298
2299 /*********************************************************************
2300  *
2301  *  Enable transmit unit.
2302  *
2303  **********************************************************************/
2304 void
2305 eth_igb_tx_init(struct rte_eth_dev *dev)
2306 {
2307         struct e1000_hw     *hw;
2308         struct igb_tx_queue *txq;
2309         uint32_t tctl;
2310         uint32_t txdctl;
2311         uint16_t i;
2312
2313         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2314
2315         /* Setup the Base and Length of the Tx Descriptor Rings. */
2316         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2317                 uint64_t bus_addr;
2318                 txq = dev->data->tx_queues[i];
2319                 bus_addr = txq->tx_ring_phys_addr;
2320
2321                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2322                                 txq->nb_tx_desc *
2323                                 sizeof(union e1000_adv_tx_desc));
2324                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2325                                 (uint32_t)(bus_addr >> 32));
2326                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2327
2328                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2329                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2330                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2331
2332                 /* Setup Transmit threshold registers. */
2333                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2334                 txdctl |= txq->pthresh & 0x1F;
2335                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2336                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2337                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2338                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2339         }
2340
2341         /* Program the Transmit Control Register. */
2342         tctl = E1000_READ_REG(hw, E1000_TCTL);
2343         tctl &= ~E1000_TCTL_CT;
2344         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2345                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2346
2347         e1000_config_collision_dist(hw);
2348
2349         /* This write will effectively turn on the transmit unit. */
2350         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2351 }
2352
2353 /*********************************************************************
2354  *
2355  *  Enable VF receive unit.
2356  *
2357  **********************************************************************/
2358 int
2359 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2360 {
2361         struct e1000_hw     *hw;
2362         struct igb_rx_queue *rxq;
2363         uint32_t srrctl;
2364         uint16_t buf_size;
2365         uint16_t rctl_bsize;
2366         uint16_t i;
2367         int ret;
2368
2369         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2370
2371         /* setup MTU */
2372         e1000_rlpml_set_vf(hw,
2373                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2374                 VLAN_TAG_SIZE));
2375
2376         /* Configure and enable each RX queue. */
2377         rctl_bsize = 0;
2378         dev->rx_pkt_burst = eth_igb_recv_pkts;
2379         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2380                 uint64_t bus_addr;
2381                 uint32_t rxdctl;
2382
2383                 rxq = dev->data->rx_queues[i];
2384
2385                 /* Allocate buffers for descriptor rings and set up queue */
2386                 ret = igb_alloc_rx_queue_mbufs(rxq);
2387                 if (ret)
2388                         return ret;
2389
2390                 bus_addr = rxq->rx_ring_phys_addr;
2391                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2392                                 rxq->nb_rx_desc *
2393                                 sizeof(union e1000_adv_rx_desc));
2394                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2395                                 (uint32_t)(bus_addr >> 32));
2396                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2397
2398                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2399
2400                 /*
2401                  * Configure RX buffer size.
2402                  */
2403                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2404                         RTE_PKTMBUF_HEADROOM);
2405                 if (buf_size >= 1024) {
2406                         /*
2407                          * Configure the BSIZEPACKET field of the SRRCTL
2408                          * register of the queue.
2409                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2410                          * If this field is equal to 0b, then RCTL.BSIZE
2411                          * determines the RX packet buffer size.
2412                          */
2413                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2414                                    E1000_SRRCTL_BSIZEPKT_MASK);
2415                         buf_size = (uint16_t) ((srrctl &
2416                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2417                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2418
2419                         /* It adds dual VLAN length for supporting dual VLAN */
2420                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2421                                                 2 * VLAN_TAG_SIZE) > buf_size){
2422                                 if (!dev->data->scattered_rx)
2423                                         PMD_INIT_LOG(DEBUG,
2424                                                      "forcing scatter mode");
2425                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2426                                 dev->data->scattered_rx = 1;
2427                         }
2428                 } else {
2429                         /*
2430                          * Use BSIZE field of the device RCTL register.
2431                          */
2432                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2433                                 rctl_bsize = buf_size;
2434                         if (!dev->data->scattered_rx)
2435                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2436                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2437                         dev->data->scattered_rx = 1;
2438                 }
2439
2440                 /* Set if packets are dropped when no descriptors available */
2441                 if (rxq->drop_en)
2442                         srrctl |= E1000_SRRCTL_DROP_EN;
2443
2444                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2445
2446                 /* Enable this RX queue. */
2447                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2448                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2449                 rxdctl &= 0xFFF00000;
2450                 rxdctl |= (rxq->pthresh & 0x1F);
2451                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2452                 if (hw->mac.type == e1000_vfadapt) {
2453                         /*
2454                          * Workaround of 82576 VF Erratum
2455                          * force set WTHRESH to 1
2456                          * to avoid Write-Back not triggered sometimes
2457                          */
2458                         rxdctl |= 0x10000;
2459                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2460                 }
2461                 else
2462                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2463                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2464         }
2465
2466         if (dev->data->dev_conf.rxmode.enable_scatter) {
2467                 if (!dev->data->scattered_rx)
2468                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2469                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2470                 dev->data->scattered_rx = 1;
2471         }
2472
2473         /*
2474          * Setup the HW Rx Head and Tail Descriptor Pointers.
2475          * This needs to be done after enable.
2476          */
2477         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2478                 rxq = dev->data->rx_queues[i];
2479                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2480                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2481         }
2482
2483         return 0;
2484 }
2485
2486 /*********************************************************************
2487  *
2488  *  Enable VF transmit unit.
2489  *
2490  **********************************************************************/
2491 void
2492 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2493 {
2494         struct e1000_hw     *hw;
2495         struct igb_tx_queue *txq;
2496         uint32_t txdctl;
2497         uint16_t i;
2498
2499         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2500
2501         /* Setup the Base and Length of the Tx Descriptor Rings. */
2502         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2503                 uint64_t bus_addr;
2504
2505                 txq = dev->data->tx_queues[i];
2506                 bus_addr = txq->tx_ring_phys_addr;
2507                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2508                                 txq->nb_tx_desc *
2509                                 sizeof(union e1000_adv_tx_desc));
2510                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2511                                 (uint32_t)(bus_addr >> 32));
2512                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2513
2514                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2515                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2516                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2517
2518                 /* Setup Transmit threshold registers. */
2519                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2520                 txdctl |= txq->pthresh & 0x1F;
2521                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2522                 if (hw->mac.type == e1000_82576) {
2523                         /*
2524                          * Workaround of 82576 VF Erratum
2525                          * force set WTHRESH to 1
2526                          * to avoid Write-Back not triggered sometimes
2527                          */
2528                         txdctl |= 0x10000;
2529                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2530                 }
2531                 else
2532                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2533                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2534                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2535         }
2536
2537 }