net/e1000: add Tx preparation
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
61 #include <rte_mbuf.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
65 #include <rte_udp.h>
66 #include <rte_tcp.h>
67 #include <rte_sctp.h>
68 #include <rte_net.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK |                 \
80                 PKT_TX_TCP_SEG)
81
82 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
83                 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
84
85 /**
86  * Structure associated with each descriptor of the RX ring of a RX queue.
87  */
88 struct igb_rx_entry {
89         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
90 };
91
92 /**
93  * Structure associated with each descriptor of the TX ring of a TX queue.
94  */
95 struct igb_tx_entry {
96         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
97         uint16_t next_id; /**< Index of next descriptor in ring. */
98         uint16_t last_id; /**< Index of last scattered descriptor. */
99 };
100
101 /**
102  * Structure associated with each RX queue.
103  */
104 struct igb_rx_queue {
105         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
106         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
107         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
108         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
109         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
110         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
111         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
112         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
113         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
114         uint16_t            rx_tail;    /**< current value of RDT register. */
115         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
116         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
117         uint16_t            queue_id;   /**< RX queue index. */
118         uint16_t            reg_idx;    /**< RX queue register index. */
119         uint8_t             port_id;    /**< Device port identifier. */
120         uint8_t             pthresh;    /**< Prefetch threshold register. */
121         uint8_t             hthresh;    /**< Host threshold register. */
122         uint8_t             wthresh;    /**< Write-back threshold register. */
123         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
124         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
125 };
126
127 /**
128  * Hardware context number
129  */
130 enum igb_advctx_num {
131         IGB_CTX_0    = 0, /**< CTX0    */
132         IGB_CTX_1    = 1, /**< CTX1    */
133         IGB_CTX_NUM  = 2, /**< CTX_NUM */
134 };
135
136 /** Offload features */
137 union igb_tx_offload {
138         uint64_t data;
139         struct {
140                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
141                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
142                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
143                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
144                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
145
146                 /* uint64_t unused:8; */
147         };
148 };
149
150 /*
151  * Compare mask for igb_tx_offload.data,
152  * should be in sync with igb_tx_offload layout.
153  * */
154 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
155 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
156 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
157 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
158 /** Mac + IP + TCP + Mss mask. */
159 #define TX_TSO_CMP_MASK \
160         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
161
162 /**
163  * Strucutre to check if new context need be built
164  */
165 struct igb_advctx_info {
166         uint64_t flags;           /**< ol_flags related to context build. */
167         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
168         union igb_tx_offload tx_offload;
169         /** compare mask for tx offload. */
170         union igb_tx_offload tx_offload_mask;
171 };
172
173 /**
174  * Structure associated with each TX queue.
175  */
176 struct igb_tx_queue {
177         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
178         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
179         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
180         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
181         uint32_t               txd_type;      /**< Device-specific TXD type */
182         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
183         uint16_t               tx_tail; /**< Current value of TDT register. */
184         uint16_t               tx_head;
185         /**< Index of first used TX descriptor. */
186         uint16_t               queue_id; /**< TX queue index. */
187         uint16_t               reg_idx;  /**< TX queue register index. */
188         uint8_t                port_id;  /**< Device port identifier. */
189         uint8_t                pthresh;  /**< Prefetch threshold register. */
190         uint8_t                hthresh;  /**< Host threshold register. */
191         uint8_t                wthresh;  /**< Write-back threshold register. */
192         uint32_t               ctx_curr;
193         /**< Current used hardware descriptor. */
194         uint32_t               ctx_start;
195         /**< Start context position for transmit queue. */
196         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
197         /**< Hardware context history.*/
198 };
199
200 #if 1
201 #define RTE_PMD_USE_PREFETCH
202 #endif
203
204 #ifdef RTE_PMD_USE_PREFETCH
205 #define rte_igb_prefetch(p)     rte_prefetch0(p)
206 #else
207 #define rte_igb_prefetch(p)     do {} while(0)
208 #endif
209
210 #ifdef RTE_PMD_PACKET_PREFETCH
211 #define rte_packet_prefetch(p) rte_prefetch1(p)
212 #else
213 #define rte_packet_prefetch(p)  do {} while(0)
214 #endif
215
216 /*
217  * Macro for VMDq feature for 1 GbE NIC.
218  */
219 #define E1000_VMOLR_SIZE                        (8)
220 #define IGB_TSO_MAX_HDRLEN                      (512)
221 #define IGB_TSO_MAX_MSS                         (9216)
222
223 /*********************************************************************
224  *
225  *  TX function
226  *
227  **********************************************************************/
228
229 /*
230  *There're some limitations in hardware for TCP segmentation offload. We
231  *should check whether the parameters are valid.
232  */
233 static inline uint64_t
234 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
235 {
236         if (!(ol_req & PKT_TX_TCP_SEG))
237                 return ol_req;
238         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
239                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
240                 ol_req &= ~PKT_TX_TCP_SEG;
241                 ol_req |= PKT_TX_TCP_CKSUM;
242         }
243         return ol_req;
244 }
245
246 /*
247  * Advanced context descriptor are almost same between igb/ixgbe
248  * This is a separate function, looking for optimization opportunity here
249  * Rework required to go with the pre-defined values.
250  */
251
252 static inline void
253 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
254                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
255                 uint64_t ol_flags, union igb_tx_offload tx_offload)
256 {
257         uint32_t type_tucmd_mlhl;
258         uint32_t mss_l4len_idx;
259         uint32_t ctx_idx, ctx_curr;
260         uint32_t vlan_macip_lens;
261         union igb_tx_offload tx_offload_mask;
262
263         ctx_curr = txq->ctx_curr;
264         ctx_idx = ctx_curr + txq->ctx_start;
265
266         tx_offload_mask.data = 0;
267         type_tucmd_mlhl = 0;
268
269         /* Specify which HW CTX to upload. */
270         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
271
272         if (ol_flags & PKT_TX_VLAN_PKT)
273                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
274
275         /* check if TCP segmentation required for this packet */
276         if (ol_flags & PKT_TX_TCP_SEG) {
277                 /* implies IP cksum in IPv4 */
278                 if (ol_flags & PKT_TX_IP_CKSUM)
279                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
280                                 E1000_ADVTXD_TUCMD_L4T_TCP |
281                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
282                 else
283                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
284                                 E1000_ADVTXD_TUCMD_L4T_TCP |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286
287                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
288                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
289                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
290         } else { /* no TSO, check if hardware checksum is needed */
291                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
292                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
293
294                 if (ol_flags & PKT_TX_IP_CKSUM)
295                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
296
297                 switch (ol_flags & PKT_TX_L4_MASK) {
298                 case PKT_TX_UDP_CKSUM:
299                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
300                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
301                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
302                         break;
303                 case PKT_TX_TCP_CKSUM:
304                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
305                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
306                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
307                         break;
308                 case PKT_TX_SCTP_CKSUM:
309                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
310                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
311                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
312                         break;
313                 default:
314                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
315                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
316                         break;
317                 }
318         }
319
320         txq->ctx_cache[ctx_curr].flags = ol_flags;
321         txq->ctx_cache[ctx_curr].tx_offload.data =
322                 tx_offload_mask.data & tx_offload.data;
323         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
324
325         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
326         vlan_macip_lens = (uint32_t)tx_offload.data;
327         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
328         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
329         ctx_txd->seqnum_seed = 0;
330 }
331
332 /*
333  * Check which hardware context can be used. Use the existing match
334  * or create a new context descriptor.
335  */
336 static inline uint32_t
337 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
338                 union igb_tx_offload tx_offload)
339 {
340         /* If match with the current context */
341         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
342                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
343                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
344                         return txq->ctx_curr;
345         }
346
347         /* If match with the second context */
348         txq->ctx_curr ^= 1;
349         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
350                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
351                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
352                         return txq->ctx_curr;
353         }
354
355         /* Mismatch, use the previous context */
356         return IGB_CTX_NUM;
357 }
358
359 static inline uint32_t
360 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
361 {
362         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
363         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
364         uint32_t tmp;
365
366         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
367         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
368         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
369         return tmp;
370 }
371
372 static inline uint32_t
373 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
374 {
375         uint32_t cmdtype;
376         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
377         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
378         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
379         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
380         return cmdtype;
381 }
382
383 uint16_t
384 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
385                uint16_t nb_pkts)
386 {
387         struct igb_tx_queue *txq;
388         struct igb_tx_entry *sw_ring;
389         struct igb_tx_entry *txe, *txn;
390         volatile union e1000_adv_tx_desc *txr;
391         volatile union e1000_adv_tx_desc *txd;
392         struct rte_mbuf     *tx_pkt;
393         struct rte_mbuf     *m_seg;
394         uint64_t buf_dma_addr;
395         uint32_t olinfo_status;
396         uint32_t cmd_type_len;
397         uint32_t pkt_len;
398         uint16_t slen;
399         uint64_t ol_flags;
400         uint16_t tx_end;
401         uint16_t tx_id;
402         uint16_t tx_last;
403         uint16_t nb_tx;
404         uint64_t tx_ol_req;
405         uint32_t new_ctx = 0;
406         uint32_t ctx = 0;
407         union igb_tx_offload tx_offload = {0};
408
409         txq = tx_queue;
410         sw_ring = txq->sw_ring;
411         txr     = txq->tx_ring;
412         tx_id   = txq->tx_tail;
413         txe = &sw_ring[tx_id];
414
415         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
416                 tx_pkt = *tx_pkts++;
417                 pkt_len = tx_pkt->pkt_len;
418
419                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
420
421                 /*
422                  * The number of descriptors that must be allocated for a
423                  * packet is the number of segments of that packet, plus 1
424                  * Context Descriptor for the VLAN Tag Identifier, if any.
425                  * Determine the last TX descriptor to allocate in the TX ring
426                  * for the packet, starting from the current position (tx_id)
427                  * in the ring.
428                  */
429                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
430
431                 ol_flags = tx_pkt->ol_flags;
432                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
433
434                 /* If a Context Descriptor need be built . */
435                 if (tx_ol_req) {
436                         tx_offload.l2_len = tx_pkt->l2_len;
437                         tx_offload.l3_len = tx_pkt->l3_len;
438                         tx_offload.l4_len = tx_pkt->l4_len;
439                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
440                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
441                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
442
443                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
444                         /* Only allocate context descriptor if required*/
445                         new_ctx = (ctx == IGB_CTX_NUM);
446                         ctx = txq->ctx_curr + txq->ctx_start;
447                         tx_last = (uint16_t) (tx_last + new_ctx);
448                 }
449                 if (tx_last >= txq->nb_tx_desc)
450                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
451
452                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
453                            " tx_first=%u tx_last=%u",
454                            (unsigned) txq->port_id,
455                            (unsigned) txq->queue_id,
456                            (unsigned) pkt_len,
457                            (unsigned) tx_id,
458                            (unsigned) tx_last);
459
460                 /*
461                  * Check if there are enough free descriptors in the TX ring
462                  * to transmit the next packet.
463                  * This operation is based on the two following rules:
464                  *
465                  *   1- Only check that the last needed TX descriptor can be
466                  *      allocated (by construction, if that descriptor is free,
467                  *      all intermediate ones are also free).
468                  *
469                  *      For this purpose, the index of the last TX descriptor
470                  *      used for a packet (the "last descriptor" of a packet)
471                  *      is recorded in the TX entries (the last one included)
472                  *      that are associated with all TX descriptors allocated
473                  *      for that packet.
474                  *
475                  *   2- Avoid to allocate the last free TX descriptor of the
476                  *      ring, in order to never set the TDT register with the
477                  *      same value stored in parallel by the NIC in the TDH
478                  *      register, which makes the TX engine of the NIC enter
479                  *      in a deadlock situation.
480                  *
481                  *      By extension, avoid to allocate a free descriptor that
482                  *      belongs to the last set of free descriptors allocated
483                  *      to the same packet previously transmitted.
484                  */
485
486                 /*
487                  * The "last descriptor" of the previously sent packet, if any,
488                  * which used the last descriptor to allocate.
489                  */
490                 tx_end = sw_ring[tx_last].last_id;
491
492                 /*
493                  * The next descriptor following that "last descriptor" in the
494                  * ring.
495                  */
496                 tx_end = sw_ring[tx_end].next_id;
497
498                 /*
499                  * The "last descriptor" associated with that next descriptor.
500                  */
501                 tx_end = sw_ring[tx_end].last_id;
502
503                 /*
504                  * Check that this descriptor is free.
505                  */
506                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
507                         if (nb_tx == 0)
508                                 return 0;
509                         goto end_of_tx;
510                 }
511
512                 /*
513                  * Set common flags of all TX Data Descriptors.
514                  *
515                  * The following bits must be set in all Data Descriptors:
516                  *   - E1000_ADVTXD_DTYP_DATA
517                  *   - E1000_ADVTXD_DCMD_DEXT
518                  *
519                  * The following bits must be set in the first Data Descriptor
520                  * and are ignored in the other ones:
521                  *   - E1000_ADVTXD_DCMD_IFCS
522                  *   - E1000_ADVTXD_MAC_1588
523                  *   - E1000_ADVTXD_DCMD_VLE
524                  *
525                  * The following bits must only be set in the last Data
526                  * Descriptor:
527                  *   - E1000_TXD_CMD_EOP
528                  *
529                  * The following bits can be set in any Data Descriptor, but
530                  * are only set in the last Data Descriptor:
531                  *   - E1000_TXD_CMD_RS
532                  */
533                 cmd_type_len = txq->txd_type |
534                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
535                 if (tx_ol_req & PKT_TX_TCP_SEG)
536                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
537                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
538 #if defined(RTE_LIBRTE_IEEE1588)
539                 if (ol_flags & PKT_TX_IEEE1588_TMST)
540                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
541 #endif
542                 if (tx_ol_req) {
543                         /* Setup TX Advanced context descriptor if required */
544                         if (new_ctx) {
545                                 volatile struct e1000_adv_tx_context_desc *
546                                     ctx_txd;
547
548                                 ctx_txd = (volatile struct
549                                     e1000_adv_tx_context_desc *)
550                                     &txr[tx_id];
551
552                                 txn = &sw_ring[txe->next_id];
553                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
554
555                                 if (txe->mbuf != NULL) {
556                                         rte_pktmbuf_free_seg(txe->mbuf);
557                                         txe->mbuf = NULL;
558                                 }
559
560                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
561
562                                 txe->last_id = tx_last;
563                                 tx_id = txe->next_id;
564                                 txe = txn;
565                         }
566
567                         /* Setup the TX Advanced Data Descriptor */
568                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
569                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
570                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
571                 }
572
573                 m_seg = tx_pkt;
574                 do {
575                         txn = &sw_ring[txe->next_id];
576                         txd = &txr[tx_id];
577
578                         if (txe->mbuf != NULL)
579                                 rte_pktmbuf_free_seg(txe->mbuf);
580                         txe->mbuf = m_seg;
581
582                         /*
583                          * Set up transmit descriptor.
584                          */
585                         slen = (uint16_t) m_seg->data_len;
586                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
587                         txd->read.buffer_addr =
588                                 rte_cpu_to_le_64(buf_dma_addr);
589                         txd->read.cmd_type_len =
590                                 rte_cpu_to_le_32(cmd_type_len | slen);
591                         txd->read.olinfo_status =
592                                 rte_cpu_to_le_32(olinfo_status);
593                         txe->last_id = tx_last;
594                         tx_id = txe->next_id;
595                         txe = txn;
596                         m_seg = m_seg->next;
597                 } while (m_seg != NULL);
598
599                 /*
600                  * The last packet data descriptor needs End Of Packet (EOP)
601                  * and Report Status (RS).
602                  */
603                 txd->read.cmd_type_len |=
604                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
605         }
606  end_of_tx:
607         rte_wmb();
608
609         /*
610          * Set the Transmit Descriptor Tail (TDT).
611          */
612         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
613         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
614                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
615                    (unsigned) tx_id, (unsigned) nb_tx);
616         txq->tx_tail = tx_id;
617
618         return nb_tx;
619 }
620
621 /*********************************************************************
622  *
623  *  TX prep functions
624  *
625  **********************************************************************/
626 uint16_t
627 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
628                 uint16_t nb_pkts)
629 {
630         int i, ret;
631         struct rte_mbuf *m;
632
633         for (i = 0; i < nb_pkts; i++) {
634                 m = tx_pkts[i];
635
636                 /* Check some limitations for TSO in hardware */
637                 if (m->ol_flags & PKT_TX_TCP_SEG)
638                         if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
639                                         (m->l2_len + m->l3_len + m->l4_len >
640                                         IGB_TSO_MAX_HDRLEN)) {
641                                 rte_errno = -EINVAL;
642                                 return i;
643                         }
644
645                 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
646                         rte_errno = -ENOTSUP;
647                         return i;
648                 }
649
650 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
651                 ret = rte_validate_tx_offload(m);
652                 if (ret != 0) {
653                         rte_errno = ret;
654                         return i;
655                 }
656 #endif
657                 ret = rte_net_intel_cksum_prepare(m);
658                 if (ret != 0) {
659                         rte_errno = ret;
660                         return i;
661                 }
662         }
663
664         return i;
665 }
666
667 /*********************************************************************
668  *
669  *  RX functions
670  *
671  **********************************************************************/
672 #define IGB_PACKET_TYPE_IPV4              0X01
673 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
674 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
675 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
676 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
677 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
678 #define IGB_PACKET_TYPE_IPV6              0X04
679 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
680 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
681 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
682 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
683 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
684 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
685 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
686 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
687 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
688 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
689 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
690 #define IGB_PACKET_TYPE_MAX               0X80
691 #define IGB_PACKET_TYPE_MASK              0X7F
692 #define IGB_PACKET_TYPE_SHIFT             0X04
693 static inline uint32_t
694 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
695 {
696         static const uint32_t
697                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
698                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV4,
700                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4_EXT,
702                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
703                         RTE_PTYPE_L3_IPV6,
704                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
705                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
706                         RTE_PTYPE_INNER_L3_IPV6,
707                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
708                         RTE_PTYPE_L3_IPV6_EXT,
709                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
710                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
711                         RTE_PTYPE_INNER_L3_IPV6_EXT,
712                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
713                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
714                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
715                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
716                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
717                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
718                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
719                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
720                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
721                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
722                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
723                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
724                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
725                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
726                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
727                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
728                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
729                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
730                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
731                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
732                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
733                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
734                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
735                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
736                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
737                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
738                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
739                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
740         };
741         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
742                 return RTE_PTYPE_UNKNOWN;
743
744         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
745
746         return ptype_table[pkt_info];
747 }
748
749 static inline uint64_t
750 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
751 {
752         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
753
754 #if defined(RTE_LIBRTE_IEEE1588)
755         static uint32_t ip_pkt_etqf_map[8] = {
756                 0, 0, 0, PKT_RX_IEEE1588_PTP,
757                 0, 0, 0, 0,
758         };
759
760         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
761         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
762
763         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
764         if (hw->mac.type == e1000_i210)
765                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
766         else
767                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
768 #else
769         RTE_SET_USED(rxq);
770 #endif
771
772         return pkt_flags;
773 }
774
775 static inline uint64_t
776 rx_desc_status_to_pkt_flags(uint32_t rx_status)
777 {
778         uint64_t pkt_flags;
779
780         /* Check if VLAN present */
781         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
782                 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
783
784 #if defined(RTE_LIBRTE_IEEE1588)
785         if (rx_status & E1000_RXD_STAT_TMST)
786                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
787 #endif
788         return pkt_flags;
789 }
790
791 static inline uint64_t
792 rx_desc_error_to_pkt_flags(uint32_t rx_status)
793 {
794         /*
795          * Bit 30: IPE, IPv4 checksum error
796          * Bit 29: L4I, L4I integrity error
797          */
798
799         static uint64_t error_to_pkt_flags_map[4] = {
800                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
801                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
802                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
803                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
804         };
805         return error_to_pkt_flags_map[(rx_status >>
806                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
807 }
808
809 uint16_t
810 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
811                uint16_t nb_pkts)
812 {
813         struct igb_rx_queue *rxq;
814         volatile union e1000_adv_rx_desc *rx_ring;
815         volatile union e1000_adv_rx_desc *rxdp;
816         struct igb_rx_entry *sw_ring;
817         struct igb_rx_entry *rxe;
818         struct rte_mbuf *rxm;
819         struct rte_mbuf *nmb;
820         union e1000_adv_rx_desc rxd;
821         uint64_t dma_addr;
822         uint32_t staterr;
823         uint32_t hlen_type_rss;
824         uint16_t pkt_len;
825         uint16_t rx_id;
826         uint16_t nb_rx;
827         uint16_t nb_hold;
828         uint64_t pkt_flags;
829
830         nb_rx = 0;
831         nb_hold = 0;
832         rxq = rx_queue;
833         rx_id = rxq->rx_tail;
834         rx_ring = rxq->rx_ring;
835         sw_ring = rxq->sw_ring;
836         while (nb_rx < nb_pkts) {
837                 /*
838                  * The order of operations here is important as the DD status
839                  * bit must not be read after any other descriptor fields.
840                  * rx_ring and rxdp are pointing to volatile data so the order
841                  * of accesses cannot be reordered by the compiler. If they were
842                  * not volatile, they could be reordered which could lead to
843                  * using invalid descriptor fields when read from rxd.
844                  */
845                 rxdp = &rx_ring[rx_id];
846                 staterr = rxdp->wb.upper.status_error;
847                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
848                         break;
849                 rxd = *rxdp;
850
851                 /*
852                  * End of packet.
853                  *
854                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
855                  * likely to be invalid and to be dropped by the various
856                  * validation checks performed by the network stack.
857                  *
858                  * Allocate a new mbuf to replenish the RX ring descriptor.
859                  * If the allocation fails:
860                  *    - arrange for that RX descriptor to be the first one
861                  *      being parsed the next time the receive function is
862                  *      invoked [on the same queue].
863                  *
864                  *    - Stop parsing the RX ring and return immediately.
865                  *
866                  * This policy do not drop the packet received in the RX
867                  * descriptor for which the allocation of a new mbuf failed.
868                  * Thus, it allows that packet to be later retrieved if
869                  * mbuf have been freed in the mean time.
870                  * As a side effect, holding RX descriptors instead of
871                  * systematically giving them back to the NIC may lead to
872                  * RX ring exhaustion situations.
873                  * However, the NIC can gracefully prevent such situations
874                  * to happen by sending specific "back-pressure" flow control
875                  * frames to its peer(s).
876                  */
877                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
878                            "staterr=0x%x pkt_len=%u",
879                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
880                            (unsigned) rx_id, (unsigned) staterr,
881                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
882
883                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
884                 if (nmb == NULL) {
885                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
886                                    "queue_id=%u", (unsigned) rxq->port_id,
887                                    (unsigned) rxq->queue_id);
888                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
889                         break;
890                 }
891
892                 nb_hold++;
893                 rxe = &sw_ring[rx_id];
894                 rx_id++;
895                 if (rx_id == rxq->nb_rx_desc)
896                         rx_id = 0;
897
898                 /* Prefetch next mbuf while processing current one. */
899                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
900
901                 /*
902                  * When next RX descriptor is on a cache-line boundary,
903                  * prefetch the next 4 RX descriptors and the next 8 pointers
904                  * to mbufs.
905                  */
906                 if ((rx_id & 0x3) == 0) {
907                         rte_igb_prefetch(&rx_ring[rx_id]);
908                         rte_igb_prefetch(&sw_ring[rx_id]);
909                 }
910
911                 rxm = rxe->mbuf;
912                 rxe->mbuf = nmb;
913                 dma_addr =
914                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
915                 rxdp->read.hdr_addr = 0;
916                 rxdp->read.pkt_addr = dma_addr;
917
918                 /*
919                  * Initialize the returned mbuf.
920                  * 1) setup generic mbuf fields:
921                  *    - number of segments,
922                  *    - next segment,
923                  *    - packet length,
924                  *    - RX port identifier.
925                  * 2) integrate hardware offload data, if any:
926                  *    - RSS flag & hash,
927                  *    - IP checksum flag,
928                  *    - VLAN TCI, if any,
929                  *    - error flags.
930                  */
931                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
932                                       rxq->crc_len);
933                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
934                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
935                 rxm->nb_segs = 1;
936                 rxm->next = NULL;
937                 rxm->pkt_len = pkt_len;
938                 rxm->data_len = pkt_len;
939                 rxm->port = rxq->port_id;
940
941                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
942                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
943                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
944                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
945
946                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
947                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
948                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
949                 rxm->ol_flags = pkt_flags;
950                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
951                                                 lo_dword.hs_rss.pkt_info);
952
953                 /*
954                  * Store the mbuf address into the next entry of the array
955                  * of returned packets.
956                  */
957                 rx_pkts[nb_rx++] = rxm;
958         }
959         rxq->rx_tail = rx_id;
960
961         /*
962          * If the number of free RX descriptors is greater than the RX free
963          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
964          * register.
965          * Update the RDT with the value of the last processed RX descriptor
966          * minus 1, to guarantee that the RDT register is never equal to the
967          * RDH register, which creates a "full" ring situtation from the
968          * hardware point of view...
969          */
970         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
971         if (nb_hold > rxq->rx_free_thresh) {
972                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
973                            "nb_hold=%u nb_rx=%u",
974                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
975                            (unsigned) rx_id, (unsigned) nb_hold,
976                            (unsigned) nb_rx);
977                 rx_id = (uint16_t) ((rx_id == 0) ?
978                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
979                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
980                 nb_hold = 0;
981         }
982         rxq->nb_rx_hold = nb_hold;
983         return nb_rx;
984 }
985
986 uint16_t
987 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
988                          uint16_t nb_pkts)
989 {
990         struct igb_rx_queue *rxq;
991         volatile union e1000_adv_rx_desc *rx_ring;
992         volatile union e1000_adv_rx_desc *rxdp;
993         struct igb_rx_entry *sw_ring;
994         struct igb_rx_entry *rxe;
995         struct rte_mbuf *first_seg;
996         struct rte_mbuf *last_seg;
997         struct rte_mbuf *rxm;
998         struct rte_mbuf *nmb;
999         union e1000_adv_rx_desc rxd;
1000         uint64_t dma; /* Physical address of mbuf data buffer */
1001         uint32_t staterr;
1002         uint32_t hlen_type_rss;
1003         uint16_t rx_id;
1004         uint16_t nb_rx;
1005         uint16_t nb_hold;
1006         uint16_t data_len;
1007         uint64_t pkt_flags;
1008
1009         nb_rx = 0;
1010         nb_hold = 0;
1011         rxq = rx_queue;
1012         rx_id = rxq->rx_tail;
1013         rx_ring = rxq->rx_ring;
1014         sw_ring = rxq->sw_ring;
1015
1016         /*
1017          * Retrieve RX context of current packet, if any.
1018          */
1019         first_seg = rxq->pkt_first_seg;
1020         last_seg = rxq->pkt_last_seg;
1021
1022         while (nb_rx < nb_pkts) {
1023         next_desc:
1024                 /*
1025                  * The order of operations here is important as the DD status
1026                  * bit must not be read after any other descriptor fields.
1027                  * rx_ring and rxdp are pointing to volatile data so the order
1028                  * of accesses cannot be reordered by the compiler. If they were
1029                  * not volatile, they could be reordered which could lead to
1030                  * using invalid descriptor fields when read from rxd.
1031                  */
1032                 rxdp = &rx_ring[rx_id];
1033                 staterr = rxdp->wb.upper.status_error;
1034                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1035                         break;
1036                 rxd = *rxdp;
1037
1038                 /*
1039                  * Descriptor done.
1040                  *
1041                  * Allocate a new mbuf to replenish the RX ring descriptor.
1042                  * If the allocation fails:
1043                  *    - arrange for that RX descriptor to be the first one
1044                  *      being parsed the next time the receive function is
1045                  *      invoked [on the same queue].
1046                  *
1047                  *    - Stop parsing the RX ring and return immediately.
1048                  *
1049                  * This policy does not drop the packet received in the RX
1050                  * descriptor for which the allocation of a new mbuf failed.
1051                  * Thus, it allows that packet to be later retrieved if
1052                  * mbuf have been freed in the mean time.
1053                  * As a side effect, holding RX descriptors instead of
1054                  * systematically giving them back to the NIC may lead to
1055                  * RX ring exhaustion situations.
1056                  * However, the NIC can gracefully prevent such situations
1057                  * to happen by sending specific "back-pressure" flow control
1058                  * frames to its peer(s).
1059                  */
1060                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1061                            "staterr=0x%x data_len=%u",
1062                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1063                            (unsigned) rx_id, (unsigned) staterr,
1064                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1065
1066                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1067                 if (nmb == NULL) {
1068                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1069                                    "queue_id=%u", (unsigned) rxq->port_id,
1070                                    (unsigned) rxq->queue_id);
1071                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1072                         break;
1073                 }
1074
1075                 nb_hold++;
1076                 rxe = &sw_ring[rx_id];
1077                 rx_id++;
1078                 if (rx_id == rxq->nb_rx_desc)
1079                         rx_id = 0;
1080
1081                 /* Prefetch next mbuf while processing current one. */
1082                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1083
1084                 /*
1085                  * When next RX descriptor is on a cache-line boundary,
1086                  * prefetch the next 4 RX descriptors and the next 8 pointers
1087                  * to mbufs.
1088                  */
1089                 if ((rx_id & 0x3) == 0) {
1090                         rte_igb_prefetch(&rx_ring[rx_id]);
1091                         rte_igb_prefetch(&sw_ring[rx_id]);
1092                 }
1093
1094                 /*
1095                  * Update RX descriptor with the physical address of the new
1096                  * data buffer of the new allocated mbuf.
1097                  */
1098                 rxm = rxe->mbuf;
1099                 rxe->mbuf = nmb;
1100                 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1101                 rxdp->read.pkt_addr = dma;
1102                 rxdp->read.hdr_addr = 0;
1103
1104                 /*
1105                  * Set data length & data buffer address of mbuf.
1106                  */
1107                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1108                 rxm->data_len = data_len;
1109                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1110
1111                 /*
1112                  * If this is the first buffer of the received packet,
1113                  * set the pointer to the first mbuf of the packet and
1114                  * initialize its context.
1115                  * Otherwise, update the total length and the number of segments
1116                  * of the current scattered packet, and update the pointer to
1117                  * the last mbuf of the current packet.
1118                  */
1119                 if (first_seg == NULL) {
1120                         first_seg = rxm;
1121                         first_seg->pkt_len = data_len;
1122                         first_seg->nb_segs = 1;
1123                 } else {
1124                         first_seg->pkt_len += data_len;
1125                         first_seg->nb_segs++;
1126                         last_seg->next = rxm;
1127                 }
1128
1129                 /*
1130                  * If this is not the last buffer of the received packet,
1131                  * update the pointer to the last mbuf of the current scattered
1132                  * packet and continue to parse the RX ring.
1133                  */
1134                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1135                         last_seg = rxm;
1136                         goto next_desc;
1137                 }
1138
1139                 /*
1140                  * This is the last buffer of the received packet.
1141                  * If the CRC is not stripped by the hardware:
1142                  *   - Subtract the CRC length from the total packet length.
1143                  *   - If the last buffer only contains the whole CRC or a part
1144                  *     of it, free the mbuf associated to the last buffer.
1145                  *     If part of the CRC is also contained in the previous
1146                  *     mbuf, subtract the length of that CRC part from the
1147                  *     data length of the previous mbuf.
1148                  */
1149                 rxm->next = NULL;
1150                 if (unlikely(rxq->crc_len > 0)) {
1151                         first_seg->pkt_len -= ETHER_CRC_LEN;
1152                         if (data_len <= ETHER_CRC_LEN) {
1153                                 rte_pktmbuf_free_seg(rxm);
1154                                 first_seg->nb_segs--;
1155                                 last_seg->data_len = (uint16_t)
1156                                         (last_seg->data_len -
1157                                          (ETHER_CRC_LEN - data_len));
1158                                 last_seg->next = NULL;
1159                         } else
1160                                 rxm->data_len =
1161                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1162                 }
1163
1164                 /*
1165                  * Initialize the first mbuf of the returned packet:
1166                  *    - RX port identifier,
1167                  *    - hardware offload data, if any:
1168                  *      - RSS flag & hash,
1169                  *      - IP checksum flag,
1170                  *      - VLAN TCI, if any,
1171                  *      - error flags.
1172                  */
1173                 first_seg->port = rxq->port_id;
1174                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1175
1176                 /*
1177                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1178                  * set in the pkt_flags field.
1179                  */
1180                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1181                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1182                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1183                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1184                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1185                 first_seg->ol_flags = pkt_flags;
1186                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1187                                         lower.lo_dword.hs_rss.pkt_info);
1188
1189                 /* Prefetch data of first segment, if configured to do so. */
1190                 rte_packet_prefetch((char *)first_seg->buf_addr +
1191                         first_seg->data_off);
1192
1193                 /*
1194                  * Store the mbuf address into the next entry of the array
1195                  * of returned packets.
1196                  */
1197                 rx_pkts[nb_rx++] = first_seg;
1198
1199                 /*
1200                  * Setup receipt context for a new packet.
1201                  */
1202                 first_seg = NULL;
1203         }
1204
1205         /*
1206          * Record index of the next RX descriptor to probe.
1207          */
1208         rxq->rx_tail = rx_id;
1209
1210         /*
1211          * Save receive context.
1212          */
1213         rxq->pkt_first_seg = first_seg;
1214         rxq->pkt_last_seg = last_seg;
1215
1216         /*
1217          * If the number of free RX descriptors is greater than the RX free
1218          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1219          * register.
1220          * Update the RDT with the value of the last processed RX descriptor
1221          * minus 1, to guarantee that the RDT register is never equal to the
1222          * RDH register, which creates a "full" ring situtation from the
1223          * hardware point of view...
1224          */
1225         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1226         if (nb_hold > rxq->rx_free_thresh) {
1227                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1228                            "nb_hold=%u nb_rx=%u",
1229                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1230                            (unsigned) rx_id, (unsigned) nb_hold,
1231                            (unsigned) nb_rx);
1232                 rx_id = (uint16_t) ((rx_id == 0) ?
1233                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1234                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1235                 nb_hold = 0;
1236         }
1237         rxq->nb_rx_hold = nb_hold;
1238         return nb_rx;
1239 }
1240
1241 /*
1242  * Maximum number of Ring Descriptors.
1243  *
1244  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1245  * desscriptors should meet the following condition:
1246  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1247  */
1248
1249 static void
1250 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1251 {
1252         unsigned i;
1253
1254         if (txq->sw_ring != NULL) {
1255                 for (i = 0; i < txq->nb_tx_desc; i++) {
1256                         if (txq->sw_ring[i].mbuf != NULL) {
1257                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1258                                 txq->sw_ring[i].mbuf = NULL;
1259                         }
1260                 }
1261         }
1262 }
1263
1264 static void
1265 igb_tx_queue_release(struct igb_tx_queue *txq)
1266 {
1267         if (txq != NULL) {
1268                 igb_tx_queue_release_mbufs(txq);
1269                 rte_free(txq->sw_ring);
1270                 rte_free(txq);
1271         }
1272 }
1273
1274 void
1275 eth_igb_tx_queue_release(void *txq)
1276 {
1277         igb_tx_queue_release(txq);
1278 }
1279
1280 static void
1281 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1282 {
1283         txq->tx_head = 0;
1284         txq->tx_tail = 0;
1285         txq->ctx_curr = 0;
1286         memset((void*)&txq->ctx_cache, 0,
1287                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1288 }
1289
1290 static void
1291 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1292 {
1293         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1294         struct igb_tx_entry *txe = txq->sw_ring;
1295         uint16_t i, prev;
1296         struct e1000_hw *hw;
1297
1298         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1299         /* Zero out HW ring memory */
1300         for (i = 0; i < txq->nb_tx_desc; i++) {
1301                 txq->tx_ring[i] = zeroed_desc;
1302         }
1303
1304         /* Initialize ring entries */
1305         prev = (uint16_t)(txq->nb_tx_desc - 1);
1306         for (i = 0; i < txq->nb_tx_desc; i++) {
1307                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1308
1309                 txd->wb.status = E1000_TXD_STAT_DD;
1310                 txe[i].mbuf = NULL;
1311                 txe[i].last_id = i;
1312                 txe[prev].next_id = i;
1313                 prev = i;
1314         }
1315
1316         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1317         /* 82575 specific, each tx queue will use 2 hw contexts */
1318         if (hw->mac.type == e1000_82575)
1319                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1320
1321         igb_reset_tx_queue_stat(txq);
1322 }
1323
1324 int
1325 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1326                          uint16_t queue_idx,
1327                          uint16_t nb_desc,
1328                          unsigned int socket_id,
1329                          const struct rte_eth_txconf *tx_conf)
1330 {
1331         const struct rte_memzone *tz;
1332         struct igb_tx_queue *txq;
1333         struct e1000_hw     *hw;
1334         uint32_t size;
1335
1336         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337
1338         /*
1339          * Validate number of transmit descriptors.
1340          * It must not exceed hardware maximum, and must be multiple
1341          * of E1000_ALIGN.
1342          */
1343         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1344                         (nb_desc > E1000_MAX_RING_DESC) ||
1345                         (nb_desc < E1000_MIN_RING_DESC)) {
1346                 return -EINVAL;
1347         }
1348
1349         /*
1350          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1351          * driver.
1352          */
1353         if (tx_conf->tx_free_thresh != 0)
1354                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1355                              "used for the 1G driver.");
1356         if (tx_conf->tx_rs_thresh != 0)
1357                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1358                              "used for the 1G driver.");
1359         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1360                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1361                              "consider setting the TX WTHRESH value to 4, 8, "
1362                              "or 16.");
1363
1364         /* Free memory prior to re-allocation if needed */
1365         if (dev->data->tx_queues[queue_idx] != NULL) {
1366                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1367                 dev->data->tx_queues[queue_idx] = NULL;
1368         }
1369
1370         /* First allocate the tx queue data structure */
1371         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1372                                                         RTE_CACHE_LINE_SIZE);
1373         if (txq == NULL)
1374                 return -ENOMEM;
1375
1376         /*
1377          * Allocate TX ring hardware descriptors. A memzone large enough to
1378          * handle the maximum ring size is allocated in order to allow for
1379          * resizing in later calls to the queue setup function.
1380          */
1381         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1382         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1383                                       E1000_ALIGN, socket_id);
1384         if (tz == NULL) {
1385                 igb_tx_queue_release(txq);
1386                 return -ENOMEM;
1387         }
1388
1389         txq->nb_tx_desc = nb_desc;
1390         txq->pthresh = tx_conf->tx_thresh.pthresh;
1391         txq->hthresh = tx_conf->tx_thresh.hthresh;
1392         txq->wthresh = tx_conf->tx_thresh.wthresh;
1393         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1394                 txq->wthresh = 1;
1395         txq->queue_id = queue_idx;
1396         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1397                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1398         txq->port_id = dev->data->port_id;
1399
1400         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1401         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1402
1403         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1404         /* Allocate software ring */
1405         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1406                                    sizeof(struct igb_tx_entry) * nb_desc,
1407                                    RTE_CACHE_LINE_SIZE);
1408         if (txq->sw_ring == NULL) {
1409                 igb_tx_queue_release(txq);
1410                 return -ENOMEM;
1411         }
1412         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1413                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1414
1415         igb_reset_tx_queue(txq, dev);
1416         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1417         dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1418         dev->data->tx_queues[queue_idx] = txq;
1419
1420         return 0;
1421 }
1422
1423 static void
1424 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1425 {
1426         unsigned i;
1427
1428         if (rxq->sw_ring != NULL) {
1429                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1430                         if (rxq->sw_ring[i].mbuf != NULL) {
1431                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1432                                 rxq->sw_ring[i].mbuf = NULL;
1433                         }
1434                 }
1435         }
1436 }
1437
1438 static void
1439 igb_rx_queue_release(struct igb_rx_queue *rxq)
1440 {
1441         if (rxq != NULL) {
1442                 igb_rx_queue_release_mbufs(rxq);
1443                 rte_free(rxq->sw_ring);
1444                 rte_free(rxq);
1445         }
1446 }
1447
1448 void
1449 eth_igb_rx_queue_release(void *rxq)
1450 {
1451         igb_rx_queue_release(rxq);
1452 }
1453
1454 static void
1455 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1456 {
1457         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1458         unsigned i;
1459
1460         /* Zero out HW ring memory */
1461         for (i = 0; i < rxq->nb_rx_desc; i++) {
1462                 rxq->rx_ring[i] = zeroed_desc;
1463         }
1464
1465         rxq->rx_tail = 0;
1466         rxq->pkt_first_seg = NULL;
1467         rxq->pkt_last_seg = NULL;
1468 }
1469
1470 int
1471 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1472                          uint16_t queue_idx,
1473                          uint16_t nb_desc,
1474                          unsigned int socket_id,
1475                          const struct rte_eth_rxconf *rx_conf,
1476                          struct rte_mempool *mp)
1477 {
1478         const struct rte_memzone *rz;
1479         struct igb_rx_queue *rxq;
1480         struct e1000_hw     *hw;
1481         unsigned int size;
1482
1483         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1484
1485         /*
1486          * Validate number of receive descriptors.
1487          * It must not exceed hardware maximum, and must be multiple
1488          * of E1000_ALIGN.
1489          */
1490         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1491                         (nb_desc > E1000_MAX_RING_DESC) ||
1492                         (nb_desc < E1000_MIN_RING_DESC)) {
1493                 return -EINVAL;
1494         }
1495
1496         /* Free memory prior to re-allocation if needed */
1497         if (dev->data->rx_queues[queue_idx] != NULL) {
1498                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1499                 dev->data->rx_queues[queue_idx] = NULL;
1500         }
1501
1502         /* First allocate the RX queue data structure. */
1503         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1504                           RTE_CACHE_LINE_SIZE);
1505         if (rxq == NULL)
1506                 return -ENOMEM;
1507         rxq->mb_pool = mp;
1508         rxq->nb_rx_desc = nb_desc;
1509         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1510         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1511         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1512         if (rxq->wthresh > 0 &&
1513             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1514                 rxq->wthresh = 1;
1515         rxq->drop_en = rx_conf->rx_drop_en;
1516         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1517         rxq->queue_id = queue_idx;
1518         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1519                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1520         rxq->port_id = dev->data->port_id;
1521         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1522                                   ETHER_CRC_LEN);
1523
1524         /*
1525          *  Allocate RX ring hardware descriptors. A memzone large enough to
1526          *  handle the maximum ring size is allocated in order to allow for
1527          *  resizing in later calls to the queue setup function.
1528          */
1529         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1530         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1531                                       E1000_ALIGN, socket_id);
1532         if (rz == NULL) {
1533                 igb_rx_queue_release(rxq);
1534                 return -ENOMEM;
1535         }
1536         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1537         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1538         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1539         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1540
1541         /* Allocate software ring. */
1542         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1543                                    sizeof(struct igb_rx_entry) * nb_desc,
1544                                    RTE_CACHE_LINE_SIZE);
1545         if (rxq->sw_ring == NULL) {
1546                 igb_rx_queue_release(rxq);
1547                 return -ENOMEM;
1548         }
1549         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1550                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1551
1552         dev->data->rx_queues[queue_idx] = rxq;
1553         igb_reset_rx_queue(rxq);
1554
1555         return 0;
1556 }
1557
1558 uint32_t
1559 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1560 {
1561 #define IGB_RXQ_SCAN_INTERVAL 4
1562         volatile union e1000_adv_rx_desc *rxdp;
1563         struct igb_rx_queue *rxq;
1564         uint32_t desc = 0;
1565
1566         if (rx_queue_id >= dev->data->nb_rx_queues) {
1567                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1568                 return 0;
1569         }
1570
1571         rxq = dev->data->rx_queues[rx_queue_id];
1572         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1573
1574         while ((desc < rxq->nb_rx_desc) &&
1575                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1576                 desc += IGB_RXQ_SCAN_INTERVAL;
1577                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1578                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1579                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1580                                 desc - rxq->nb_rx_desc]);
1581         }
1582
1583         return desc;
1584 }
1585
1586 int
1587 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1588 {
1589         volatile union e1000_adv_rx_desc *rxdp;
1590         struct igb_rx_queue *rxq = rx_queue;
1591         uint32_t desc;
1592
1593         if (unlikely(offset >= rxq->nb_rx_desc))
1594                 return 0;
1595         desc = rxq->rx_tail + offset;
1596         if (desc >= rxq->nb_rx_desc)
1597                 desc -= rxq->nb_rx_desc;
1598
1599         rxdp = &rxq->rx_ring[desc];
1600         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1601 }
1602
1603 void
1604 igb_dev_clear_queues(struct rte_eth_dev *dev)
1605 {
1606         uint16_t i;
1607         struct igb_tx_queue *txq;
1608         struct igb_rx_queue *rxq;
1609
1610         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1611                 txq = dev->data->tx_queues[i];
1612                 if (txq != NULL) {
1613                         igb_tx_queue_release_mbufs(txq);
1614                         igb_reset_tx_queue(txq, dev);
1615                 }
1616         }
1617
1618         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1619                 rxq = dev->data->rx_queues[i];
1620                 if (rxq != NULL) {
1621                         igb_rx_queue_release_mbufs(rxq);
1622                         igb_reset_rx_queue(rxq);
1623                 }
1624         }
1625 }
1626
1627 void
1628 igb_dev_free_queues(struct rte_eth_dev *dev)
1629 {
1630         uint16_t i;
1631
1632         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1633                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1634                 dev->data->rx_queues[i] = NULL;
1635         }
1636         dev->data->nb_rx_queues = 0;
1637
1638         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1639                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1640                 dev->data->tx_queues[i] = NULL;
1641         }
1642         dev->data->nb_tx_queues = 0;
1643 }
1644
1645 /**
1646  * Receive Side Scaling (RSS).
1647  * See section 7.1.1.7 in the following document:
1648  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1649  *
1650  * Principles:
1651  * The source and destination IP addresses of the IP header and the source and
1652  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1653  * against a configurable random key to compute a 32-bit RSS hash result.
1654  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1655  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1656  * RSS output index which is used as the RX queue index where to store the
1657  * received packets.
1658  * The following output is supplied in the RX write-back descriptor:
1659  *     - 32-bit result of the Microsoft RSS hash function,
1660  *     - 4-bit RSS type field.
1661  */
1662
1663 /*
1664  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1665  * Used as the default key.
1666  */
1667 static uint8_t rss_intel_key[40] = {
1668         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1669         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1670         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1671         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1672         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1673 };
1674
1675 static void
1676 igb_rss_disable(struct rte_eth_dev *dev)
1677 {
1678         struct e1000_hw *hw;
1679         uint32_t mrqc;
1680
1681         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1682         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1683         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1684         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1685 }
1686
1687 static void
1688 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1689 {
1690         uint8_t  *hash_key;
1691         uint32_t rss_key;
1692         uint32_t mrqc;
1693         uint64_t rss_hf;
1694         uint16_t i;
1695
1696         hash_key = rss_conf->rss_key;
1697         if (hash_key != NULL) {
1698                 /* Fill in RSS hash key */
1699                 for (i = 0; i < 10; i++) {
1700                         rss_key  = hash_key[(i * 4)];
1701                         rss_key |= hash_key[(i * 4) + 1] << 8;
1702                         rss_key |= hash_key[(i * 4) + 2] << 16;
1703                         rss_key |= hash_key[(i * 4) + 3] << 24;
1704                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1705                 }
1706         }
1707
1708         /* Set configured hashing protocols in MRQC register */
1709         rss_hf = rss_conf->rss_hf;
1710         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1711         if (rss_hf & ETH_RSS_IPV4)
1712                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1713         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1714                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1715         if (rss_hf & ETH_RSS_IPV6)
1716                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1717         if (rss_hf & ETH_RSS_IPV6_EX)
1718                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1719         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1720                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1721         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1722                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1723         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1724                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1725         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1726                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1727         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1728                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1729         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1730 }
1731
1732 int
1733 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1734                         struct rte_eth_rss_conf *rss_conf)
1735 {
1736         struct e1000_hw *hw;
1737         uint32_t mrqc;
1738         uint64_t rss_hf;
1739
1740         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1741
1742         /*
1743          * Before changing anything, first check that the update RSS operation
1744          * does not attempt to disable RSS, if RSS was enabled at
1745          * initialization time, or does not attempt to enable RSS, if RSS was
1746          * disabled at initialization time.
1747          */
1748         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1749         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1750         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1751                 if (rss_hf != 0) /* Enable RSS */
1752                         return -(EINVAL);
1753                 return 0; /* Nothing to do */
1754         }
1755         /* RSS enabled */
1756         if (rss_hf == 0) /* Disable RSS */
1757                 return -(EINVAL);
1758         igb_hw_rss_hash_set(hw, rss_conf);
1759         return 0;
1760 }
1761
1762 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1763                               struct rte_eth_rss_conf *rss_conf)
1764 {
1765         struct e1000_hw *hw;
1766         uint8_t *hash_key;
1767         uint32_t rss_key;
1768         uint32_t mrqc;
1769         uint64_t rss_hf;
1770         uint16_t i;
1771
1772         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773         hash_key = rss_conf->rss_key;
1774         if (hash_key != NULL) {
1775                 /* Return RSS hash key */
1776                 for (i = 0; i < 10; i++) {
1777                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1778                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1779                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1780                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1781                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1782                 }
1783         }
1784
1785         /* Get RSS functions configured in MRQC register */
1786         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1787         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1788                 rss_conf->rss_hf = 0;
1789                 return 0;
1790         }
1791         rss_hf = 0;
1792         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1793                 rss_hf |= ETH_RSS_IPV4;
1794         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1795                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1796         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1797                 rss_hf |= ETH_RSS_IPV6;
1798         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1799                 rss_hf |= ETH_RSS_IPV6_EX;
1800         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1801                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1802         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1803                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1804         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1805                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1806         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1807                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1808         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1809                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1810         rss_conf->rss_hf = rss_hf;
1811         return 0;
1812 }
1813
1814 static void
1815 igb_rss_configure(struct rte_eth_dev *dev)
1816 {
1817         struct rte_eth_rss_conf rss_conf;
1818         struct e1000_hw *hw;
1819         uint32_t shift;
1820         uint16_t i;
1821
1822         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1823
1824         /* Fill in redirection table. */
1825         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1826         for (i = 0; i < 128; i++) {
1827                 union e1000_reta {
1828                         uint32_t dword;
1829                         uint8_t  bytes[4];
1830                 } reta;
1831                 uint8_t q_idx;
1832
1833                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1834                                    i % dev->data->nb_rx_queues : 0);
1835                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1836                 if ((i & 3) == 3)
1837                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1838         }
1839
1840         /*
1841          * Configure the RSS key and the RSS protocols used to compute
1842          * the RSS hash of input packets.
1843          */
1844         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1845         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1846                 igb_rss_disable(dev);
1847                 return;
1848         }
1849         if (rss_conf.rss_key == NULL)
1850                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1851         igb_hw_rss_hash_set(hw, &rss_conf);
1852 }
1853
1854 /*
1855  * Check if the mac type support VMDq or not.
1856  * Return 1 if it supports, otherwise, return 0.
1857  */
1858 static int
1859 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1860 {
1861         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1862
1863         switch (hw->mac.type) {
1864         case e1000_82576:
1865         case e1000_82580:
1866         case e1000_i350:
1867                 return 1;
1868         case e1000_82540:
1869         case e1000_82541:
1870         case e1000_82542:
1871         case e1000_82543:
1872         case e1000_82544:
1873         case e1000_82545:
1874         case e1000_82546:
1875         case e1000_82547:
1876         case e1000_82571:
1877         case e1000_82572:
1878         case e1000_82573:
1879         case e1000_82574:
1880         case e1000_82583:
1881         case e1000_i210:
1882         case e1000_i211:
1883         default:
1884                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1885                 return 0;
1886         }
1887 }
1888
1889 static int
1890 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1891 {
1892         struct rte_eth_vmdq_rx_conf *cfg;
1893         struct e1000_hw *hw;
1894         uint32_t mrqc, vt_ctl, vmolr, rctl;
1895         int i;
1896
1897         PMD_INIT_FUNC_TRACE();
1898
1899         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1900         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1901
1902         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1903         if (igb_is_vmdq_supported(dev) == 0)
1904                 return -1;
1905
1906         igb_rss_disable(dev);
1907
1908         /* RCTL: eanble VLAN filter */
1909         rctl = E1000_READ_REG(hw, E1000_RCTL);
1910         rctl |= E1000_RCTL_VFE;
1911         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1912
1913         /* MRQC: enable vmdq */
1914         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1915         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1916         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1917
1918         /* VTCTL:  pool selection according to VLAN tag */
1919         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1920         if (cfg->enable_default_pool)
1921                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1922         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1923         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1924
1925         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1926                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1927                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1928                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1929                         E1000_VMOLR_MPME);
1930
1931                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1932                         vmolr |= E1000_VMOLR_AUPE;
1933                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1934                         vmolr |= E1000_VMOLR_ROMPE;
1935                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1936                         vmolr |= E1000_VMOLR_ROPE;
1937                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1938                         vmolr |= E1000_VMOLR_BAM;
1939                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1940                         vmolr |= E1000_VMOLR_MPME;
1941
1942                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1943         }
1944
1945         /*
1946          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1947          * Both 82576 and 82580 support it
1948          */
1949         if (hw->mac.type != e1000_i350) {
1950                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1951                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1952                         vmolr |= E1000_VMOLR_STRVLAN;
1953                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1954                 }
1955         }
1956
1957         /* VFTA - enable all vlan filters */
1958         for (i = 0; i < IGB_VFTA_SIZE; i++)
1959                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1960
1961         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1962         if (hw->mac.type != e1000_82580)
1963                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1964
1965         /*
1966          * RAH/RAL - allow pools to read specific mac addresses
1967          * In this case, all pools should be able to read from mac addr 0
1968          */
1969         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1970         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1971
1972         /* VLVF: set up filters for vlan tags as configured */
1973         for (i = 0; i < cfg->nb_pool_maps; i++) {
1974                 /* set vlan id in VF register and set the valid bit */
1975                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1976                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1977                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1978                         E1000_VLVF_POOLSEL_MASK)));
1979         }
1980
1981         E1000_WRITE_FLUSH(hw);
1982
1983         return 0;
1984 }
1985
1986
1987 /*********************************************************************
1988  *
1989  *  Enable receive unit.
1990  *
1991  **********************************************************************/
1992
1993 static int
1994 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1995 {
1996         struct igb_rx_entry *rxe = rxq->sw_ring;
1997         uint64_t dma_addr;
1998         unsigned i;
1999
2000         /* Initialize software ring entries. */
2001         for (i = 0; i < rxq->nb_rx_desc; i++) {
2002                 volatile union e1000_adv_rx_desc *rxd;
2003                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2004
2005                 if (mbuf == NULL) {
2006                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2007                                      "queue_id=%hu", rxq->queue_id);
2008                         return -ENOMEM;
2009                 }
2010                 dma_addr =
2011                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
2012                 rxd = &rxq->rx_ring[i];
2013                 rxd->read.hdr_addr = 0;
2014                 rxd->read.pkt_addr = dma_addr;
2015                 rxe[i].mbuf = mbuf;
2016         }
2017
2018         return 0;
2019 }
2020
2021 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2022 static int
2023 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2024 {
2025         struct e1000_hw *hw =
2026                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2027         uint32_t mrqc;
2028
2029         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2030                 /*
2031                  * SRIOV active scheme
2032                  * FIXME if support RSS together with VMDq & SRIOV
2033                  */
2034                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2035                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2036                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2037                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2038         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2039                 /*
2040                  * SRIOV inactive scheme
2041                  */
2042                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2043                         case ETH_MQ_RX_RSS:
2044                                 igb_rss_configure(dev);
2045                                 break;
2046                         case ETH_MQ_RX_VMDQ_ONLY:
2047                                 /*Configure general VMDQ only RX parameters*/
2048                                 igb_vmdq_rx_hw_configure(dev);
2049                                 break;
2050                         case ETH_MQ_RX_NONE:
2051                                 /* if mq_mode is none, disable rss mode.*/
2052                         default:
2053                                 igb_rss_disable(dev);
2054                                 break;
2055                 }
2056         }
2057
2058         return 0;
2059 }
2060
2061 int
2062 eth_igb_rx_init(struct rte_eth_dev *dev)
2063 {
2064         struct e1000_hw     *hw;
2065         struct igb_rx_queue *rxq;
2066         uint32_t rctl;
2067         uint32_t rxcsum;
2068         uint32_t srrctl;
2069         uint16_t buf_size;
2070         uint16_t rctl_bsize;
2071         uint16_t i;
2072         int ret;
2073
2074         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2075         srrctl = 0;
2076
2077         /*
2078          * Make sure receives are disabled while setting
2079          * up the descriptor ring.
2080          */
2081         rctl = E1000_READ_REG(hw, E1000_RCTL);
2082         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2083
2084         /*
2085          * Configure support of jumbo frames, if any.
2086          */
2087         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2088                 rctl |= E1000_RCTL_LPE;
2089
2090                 /*
2091                  * Set maximum packet length by default, and might be updated
2092                  * together with enabling/disabling dual VLAN.
2093                  */
2094                 E1000_WRITE_REG(hw, E1000_RLPML,
2095                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2096                                                 VLAN_TAG_SIZE);
2097         } else
2098                 rctl &= ~E1000_RCTL_LPE;
2099
2100         /* Configure and enable each RX queue. */
2101         rctl_bsize = 0;
2102         dev->rx_pkt_burst = eth_igb_recv_pkts;
2103         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2104                 uint64_t bus_addr;
2105                 uint32_t rxdctl;
2106
2107                 rxq = dev->data->rx_queues[i];
2108
2109                 /* Allocate buffers for descriptor rings and set up queue */
2110                 ret = igb_alloc_rx_queue_mbufs(rxq);
2111                 if (ret)
2112                         return ret;
2113
2114                 /*
2115                  * Reset crc_len in case it was changed after queue setup by a
2116                  *  call to configure
2117                  */
2118                 rxq->crc_len =
2119                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2120                                                         0 : ETHER_CRC_LEN);
2121
2122                 bus_addr = rxq->rx_ring_phys_addr;
2123                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2124                                 rxq->nb_rx_desc *
2125                                 sizeof(union e1000_adv_rx_desc));
2126                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2127                                 (uint32_t)(bus_addr >> 32));
2128                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2129
2130                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2131
2132                 /*
2133                  * Configure RX buffer size.
2134                  */
2135                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2136                         RTE_PKTMBUF_HEADROOM);
2137                 if (buf_size >= 1024) {
2138                         /*
2139                          * Configure the BSIZEPACKET field of the SRRCTL
2140                          * register of the queue.
2141                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2142                          * If this field is equal to 0b, then RCTL.BSIZE
2143                          * determines the RX packet buffer size.
2144                          */
2145                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2146                                    E1000_SRRCTL_BSIZEPKT_MASK);
2147                         buf_size = (uint16_t) ((srrctl &
2148                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2149                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2150
2151                         /* It adds dual VLAN length for supporting dual VLAN */
2152                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2153                                                 2 * VLAN_TAG_SIZE) > buf_size){
2154                                 if (!dev->data->scattered_rx)
2155                                         PMD_INIT_LOG(DEBUG,
2156                                                      "forcing scatter mode");
2157                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2158                                 dev->data->scattered_rx = 1;
2159                         }
2160                 } else {
2161                         /*
2162                          * Use BSIZE field of the device RCTL register.
2163                          */
2164                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2165                                 rctl_bsize = buf_size;
2166                         if (!dev->data->scattered_rx)
2167                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2168                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2169                         dev->data->scattered_rx = 1;
2170                 }
2171
2172                 /* Set if packets are dropped when no descriptors available */
2173                 if (rxq->drop_en)
2174                         srrctl |= E1000_SRRCTL_DROP_EN;
2175
2176                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2177
2178                 /* Enable this RX queue. */
2179                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2180                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2181                 rxdctl &= 0xFFF00000;
2182                 rxdctl |= (rxq->pthresh & 0x1F);
2183                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2184                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2185                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2186         }
2187
2188         if (dev->data->dev_conf.rxmode.enable_scatter) {
2189                 if (!dev->data->scattered_rx)
2190                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2191                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2192                 dev->data->scattered_rx = 1;
2193         }
2194
2195         /*
2196          * Setup BSIZE field of RCTL register, if needed.
2197          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2198          * register, since the code above configures the SRRCTL register of
2199          * the RX queue in such a case.
2200          * All configurable sizes are:
2201          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2202          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2203          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2204          *  2048: rctl |= E1000_RCTL_SZ_2048;
2205          *  1024: rctl |= E1000_RCTL_SZ_1024;
2206          *   512: rctl |= E1000_RCTL_SZ_512;
2207          *   256: rctl |= E1000_RCTL_SZ_256;
2208          */
2209         if (rctl_bsize > 0) {
2210                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2211                         rctl |= E1000_RCTL_SZ_512;
2212                 else /* 256 <= buf_size < 512 - use 256 */
2213                         rctl |= E1000_RCTL_SZ_256;
2214         }
2215
2216         /*
2217          * Configure RSS if device configured with multiple RX queues.
2218          */
2219         igb_dev_mq_rx_configure(dev);
2220
2221         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2222         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2223
2224         /*
2225          * Setup the Checksum Register.
2226          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2227          */
2228         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2229         rxcsum |= E1000_RXCSUM_PCSD;
2230
2231         /* Enable both L3/L4 rx checksum offload */
2232         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2233                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2234         else
2235                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2236         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2237
2238         /* Setup the Receive Control Register. */
2239         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2240                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2241
2242                 /* set STRCRC bit in all queues */
2243                 if (hw->mac.type == e1000_i350 ||
2244                     hw->mac.type == e1000_i210 ||
2245                     hw->mac.type == e1000_i211 ||
2246                     hw->mac.type == e1000_i354) {
2247                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2248                                 rxq = dev->data->rx_queues[i];
2249                                 uint32_t dvmolr = E1000_READ_REG(hw,
2250                                         E1000_DVMOLR(rxq->reg_idx));
2251                                 dvmolr |= E1000_DVMOLR_STRCRC;
2252                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2253                         }
2254                 }
2255         } else {
2256                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2257
2258                 /* clear STRCRC bit in all queues */
2259                 if (hw->mac.type == e1000_i350 ||
2260                     hw->mac.type == e1000_i210 ||
2261                     hw->mac.type == e1000_i211 ||
2262                     hw->mac.type == e1000_i354) {
2263                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2264                                 rxq = dev->data->rx_queues[i];
2265                                 uint32_t dvmolr = E1000_READ_REG(hw,
2266                                         E1000_DVMOLR(rxq->reg_idx));
2267                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2268                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2269                         }
2270                 }
2271         }
2272
2273         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2274         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2275                 E1000_RCTL_RDMTS_HALF |
2276                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2277
2278         /* Make sure VLAN Filters are off. */
2279         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2280                 rctl &= ~E1000_RCTL_VFE;
2281         /* Don't store bad packets. */
2282         rctl &= ~E1000_RCTL_SBP;
2283
2284         /* Enable Receives. */
2285         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2286
2287         /*
2288          * Setup the HW Rx Head and Tail Descriptor Pointers.
2289          * This needs to be done after enable.
2290          */
2291         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2292                 rxq = dev->data->rx_queues[i];
2293                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2294                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2295         }
2296
2297         return 0;
2298 }
2299
2300 /*********************************************************************
2301  *
2302  *  Enable transmit unit.
2303  *
2304  **********************************************************************/
2305 void
2306 eth_igb_tx_init(struct rte_eth_dev *dev)
2307 {
2308         struct e1000_hw     *hw;
2309         struct igb_tx_queue *txq;
2310         uint32_t tctl;
2311         uint32_t txdctl;
2312         uint16_t i;
2313
2314         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2315
2316         /* Setup the Base and Length of the Tx Descriptor Rings. */
2317         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2318                 uint64_t bus_addr;
2319                 txq = dev->data->tx_queues[i];
2320                 bus_addr = txq->tx_ring_phys_addr;
2321
2322                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2323                                 txq->nb_tx_desc *
2324                                 sizeof(union e1000_adv_tx_desc));
2325                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2326                                 (uint32_t)(bus_addr >> 32));
2327                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2328
2329                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2330                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2331                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2332
2333                 /* Setup Transmit threshold registers. */
2334                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2335                 txdctl |= txq->pthresh & 0x1F;
2336                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2337                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2338                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2339                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2340         }
2341
2342         /* Program the Transmit Control Register. */
2343         tctl = E1000_READ_REG(hw, E1000_TCTL);
2344         tctl &= ~E1000_TCTL_CT;
2345         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2346                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2347
2348         e1000_config_collision_dist(hw);
2349
2350         /* This write will effectively turn on the transmit unit. */
2351         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2352 }
2353
2354 /*********************************************************************
2355  *
2356  *  Enable VF receive unit.
2357  *
2358  **********************************************************************/
2359 int
2360 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2361 {
2362         struct e1000_hw     *hw;
2363         struct igb_rx_queue *rxq;
2364         uint32_t srrctl;
2365         uint16_t buf_size;
2366         uint16_t rctl_bsize;
2367         uint16_t i;
2368         int ret;
2369
2370         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2371
2372         /* setup MTU */
2373         e1000_rlpml_set_vf(hw,
2374                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2375                 VLAN_TAG_SIZE));
2376
2377         /* Configure and enable each RX queue. */
2378         rctl_bsize = 0;
2379         dev->rx_pkt_burst = eth_igb_recv_pkts;
2380         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2381                 uint64_t bus_addr;
2382                 uint32_t rxdctl;
2383
2384                 rxq = dev->data->rx_queues[i];
2385
2386                 /* Allocate buffers for descriptor rings and set up queue */
2387                 ret = igb_alloc_rx_queue_mbufs(rxq);
2388                 if (ret)
2389                         return ret;
2390
2391                 bus_addr = rxq->rx_ring_phys_addr;
2392                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2393                                 rxq->nb_rx_desc *
2394                                 sizeof(union e1000_adv_rx_desc));
2395                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2396                                 (uint32_t)(bus_addr >> 32));
2397                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2398
2399                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2400
2401                 /*
2402                  * Configure RX buffer size.
2403                  */
2404                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2405                         RTE_PKTMBUF_HEADROOM);
2406                 if (buf_size >= 1024) {
2407                         /*
2408                          * Configure the BSIZEPACKET field of the SRRCTL
2409                          * register of the queue.
2410                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2411                          * If this field is equal to 0b, then RCTL.BSIZE
2412                          * determines the RX packet buffer size.
2413                          */
2414                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2415                                    E1000_SRRCTL_BSIZEPKT_MASK);
2416                         buf_size = (uint16_t) ((srrctl &
2417                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2418                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2419
2420                         /* It adds dual VLAN length for supporting dual VLAN */
2421                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2422                                                 2 * VLAN_TAG_SIZE) > buf_size){
2423                                 if (!dev->data->scattered_rx)
2424                                         PMD_INIT_LOG(DEBUG,
2425                                                      "forcing scatter mode");
2426                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2427                                 dev->data->scattered_rx = 1;
2428                         }
2429                 } else {
2430                         /*
2431                          * Use BSIZE field of the device RCTL register.
2432                          */
2433                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2434                                 rctl_bsize = buf_size;
2435                         if (!dev->data->scattered_rx)
2436                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2437                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2438                         dev->data->scattered_rx = 1;
2439                 }
2440
2441                 /* Set if packets are dropped when no descriptors available */
2442                 if (rxq->drop_en)
2443                         srrctl |= E1000_SRRCTL_DROP_EN;
2444
2445                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2446
2447                 /* Enable this RX queue. */
2448                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2449                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2450                 rxdctl &= 0xFFF00000;
2451                 rxdctl |= (rxq->pthresh & 0x1F);
2452                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2453                 if (hw->mac.type == e1000_vfadapt) {
2454                         /*
2455                          * Workaround of 82576 VF Erratum
2456                          * force set WTHRESH to 1
2457                          * to avoid Write-Back not triggered sometimes
2458                          */
2459                         rxdctl |= 0x10000;
2460                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2461                 }
2462                 else
2463                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2464                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2465         }
2466
2467         if (dev->data->dev_conf.rxmode.enable_scatter) {
2468                 if (!dev->data->scattered_rx)
2469                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2470                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2471                 dev->data->scattered_rx = 1;
2472         }
2473
2474         /*
2475          * Setup the HW Rx Head and Tail Descriptor Pointers.
2476          * This needs to be done after enable.
2477          */
2478         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2479                 rxq = dev->data->rx_queues[i];
2480                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2481                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2482         }
2483
2484         return 0;
2485 }
2486
2487 /*********************************************************************
2488  *
2489  *  Enable VF transmit unit.
2490  *
2491  **********************************************************************/
2492 void
2493 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2494 {
2495         struct e1000_hw     *hw;
2496         struct igb_tx_queue *txq;
2497         uint32_t txdctl;
2498         uint16_t i;
2499
2500         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2501
2502         /* Setup the Base and Length of the Tx Descriptor Rings. */
2503         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2504                 uint64_t bus_addr;
2505
2506                 txq = dev->data->tx_queues[i];
2507                 bus_addr = txq->tx_ring_phys_addr;
2508                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2509                                 txq->nb_tx_desc *
2510                                 sizeof(union e1000_adv_tx_desc));
2511                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2512                                 (uint32_t)(bus_addr >> 32));
2513                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2514
2515                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2516                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2517                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2518
2519                 /* Setup Transmit threshold registers. */
2520                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2521                 txdctl |= txq->pthresh & 0x1F;
2522                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2523                 if (hw->mac.type == e1000_82576) {
2524                         /*
2525                          * Workaround of 82576 VF Erratum
2526                          * force set WTHRESH to 1
2527                          * to avoid Write-Back not triggered sometimes
2528                          */
2529                         txdctl |= 0x10000;
2530                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2531                 }
2532                 else
2533                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2534                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2535                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2536         }
2537
2538 }
2539
2540 void
2541 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2542         struct rte_eth_rxq_info *qinfo)
2543 {
2544         struct igb_rx_queue *rxq;
2545
2546         rxq = dev->data->rx_queues[queue_id];
2547
2548         qinfo->mp = rxq->mb_pool;
2549         qinfo->scattered_rx = dev->data->scattered_rx;
2550         qinfo->nb_desc = rxq->nb_rx_desc;
2551
2552         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2553         qinfo->conf.rx_drop_en = rxq->drop_en;
2554 }
2555
2556 void
2557 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2558         struct rte_eth_txq_info *qinfo)
2559 {
2560         struct igb_tx_queue *txq;
2561
2562         txq = dev->data->tx_queues[queue_id];
2563
2564         qinfo->nb_desc = txq->nb_tx_desc;
2565
2566         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2567         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2568         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2569 }