b40639765c092081c485e3b5ea0b67820ff5f98c
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ring.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
67 #include <rte_udp.h>
68 #include <rte_tcp.h>
69 #include <rte_sctp.h>
70 #include <rte_string_fns.h>
71
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
75
76 #define IGB_RSS_OFFLOAD_ALL ( \
77                 ETH_RSS_IPV4 | \
78                 ETH_RSS_IPV4_TCP | \
79                 ETH_RSS_IPV6 | \
80                 ETH_RSS_IPV6_EX | \
81                 ETH_RSS_IPV6_TCP | \
82                 ETH_RSS_IPV6_TCP_EX | \
83                 ETH_RSS_IPV4_UDP | \
84                 ETH_RSS_IPV6_UDP | \
85                 ETH_RSS_IPV6_UDP_EX)
86
87 static inline struct rte_mbuf *
88 rte_rxmbuf_alloc(struct rte_mempool *mp)
89 {
90         struct rte_mbuf *m;
91
92         m = __rte_mbuf_raw_alloc(mp);
93         __rte_mbuf_sanity_check_raw(m, 0);
94         return (m);
95 }
96
97 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
98         (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
99
100 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
101         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
102
103 /**
104  * Structure associated with each descriptor of the RX ring of a RX queue.
105  */
106 struct igb_rx_entry {
107         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
108 };
109
110 /**
111  * Structure associated with each descriptor of the TX ring of a TX queue.
112  */
113 struct igb_tx_entry {
114         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
115         uint16_t next_id; /**< Index of next descriptor in ring. */
116         uint16_t last_id; /**< Index of last scattered descriptor. */
117 };
118
119 /**
120  * Structure associated with each RX queue.
121  */
122 struct igb_rx_queue {
123         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
124         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
125         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
126         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
127         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
128         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
129         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
130         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
131         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
132         uint16_t            rx_tail;    /**< current value of RDT register. */
133         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
134         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
135         uint16_t            queue_id;   /**< RX queue index. */
136         uint16_t            reg_idx;    /**< RX queue register index. */
137         uint8_t             port_id;    /**< Device port identifier. */
138         uint8_t             pthresh;    /**< Prefetch threshold register. */
139         uint8_t             hthresh;    /**< Host threshold register. */
140         uint8_t             wthresh;    /**< Write-back threshold register. */
141         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
142         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
143 };
144
145 /**
146  * Hardware context number
147  */
148 enum igb_advctx_num {
149         IGB_CTX_0    = 0, /**< CTX0    */
150         IGB_CTX_1    = 1, /**< CTX1    */
151         IGB_CTX_NUM  = 2, /**< CTX_NUM */
152 };
153
154 /** Offload features */
155 union igb_vlan_macip {
156         uint32_t data;
157         struct {
158                 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
159                 uint16_t vlan_tci;
160                 /**< VLAN Tag Control Identifier (CPU order). */
161         } f;
162 };
163
164 /*
165  * Compare mask for vlan_macip_len.data,
166  * should be in sync with igb_vlan_macip.f layout.
167  * */
168 #define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
169 #define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
170 #define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
171 /** MAC+IP  length. */
172 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
173
174 /**
175  * Strucutre to check if new context need be built
176  */
177 struct igb_advctx_info {
178         uint64_t flags;           /**< ol_flags related to context build. */
179         uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
180         union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
181 };
182
183 /**
184  * Structure associated with each TX queue.
185  */
186 struct igb_tx_queue {
187         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
188         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
189         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
190         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
191         uint32_t               txd_type;      /**< Device-specific TXD type */
192         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
193         uint16_t               tx_tail; /**< Current value of TDT register. */
194         uint16_t               tx_head;
195         /**< Index of first used TX descriptor. */
196         uint16_t               queue_id; /**< TX queue index. */
197         uint16_t               reg_idx;  /**< TX queue register index. */
198         uint8_t                port_id;  /**< Device port identifier. */
199         uint8_t                pthresh;  /**< Prefetch threshold register. */
200         uint8_t                hthresh;  /**< Host threshold register. */
201         uint8_t                wthresh;  /**< Write-back threshold register. */
202         uint32_t               ctx_curr;
203         /**< Current used hardware descriptor. */
204         uint32_t               ctx_start;
205         /**< Start context position for transmit queue. */
206         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
207         /**< Hardware context history.*/
208 };
209
210 #if 1
211 #define RTE_PMD_USE_PREFETCH
212 #endif
213
214 #ifdef RTE_PMD_USE_PREFETCH
215 #define rte_igb_prefetch(p)     rte_prefetch0(p)
216 #else
217 #define rte_igb_prefetch(p)     do {} while(0)
218 #endif
219
220 #ifdef RTE_PMD_PACKET_PREFETCH
221 #define rte_packet_prefetch(p) rte_prefetch1(p)
222 #else
223 #define rte_packet_prefetch(p)  do {} while(0)
224 #endif
225
226 /*
227  * Macro for VMDq feature for 1 GbE NIC.
228  */
229 #define E1000_VMOLR_SIZE                        (8)
230
231 /*********************************************************************
232  *
233  *  TX function
234  *
235  **********************************************************************/
236
237 /*
238  * Advanced context descriptor are almost same between igb/ixgbe
239  * This is a separate function, looking for optimization opportunity here
240  * Rework required to go with the pre-defined values.
241  */
242
243 static inline void
244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
246                 uint64_t ol_flags, uint32_t vlan_macip_lens)
247 {
248         uint32_t type_tucmd_mlhl;
249         uint32_t mss_l4len_idx;
250         uint32_t ctx_idx, ctx_curr;
251         uint32_t cmp_mask;
252
253         ctx_curr = txq->ctx_curr;
254         ctx_idx = ctx_curr + txq->ctx_start;
255
256         cmp_mask = 0;
257         type_tucmd_mlhl = 0;
258
259         if (ol_flags & PKT_TX_VLAN_PKT) {
260                 cmp_mask |= TX_VLAN_CMP_MASK;
261         }
262
263         if (ol_flags & PKT_TX_IP_CKSUM) {
264                 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
265                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
266         }
267
268         /* Specify which HW CTX to upload. */
269         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
270         switch (ol_flags & PKT_TX_L4_MASK) {
271         case PKT_TX_UDP_CKSUM:
272                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
273                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274                 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
276                 break;
277         case PKT_TX_TCP_CKSUM:
278                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
279                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280                 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282                 break;
283         case PKT_TX_SCTP_CKSUM:
284                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
285                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286                 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287                 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
288                 break;
289         default:
290                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292                 break;
293         }
294
295         txq->ctx_cache[ctx_curr].flags           = ol_flags;
296         txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
297         txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
298                 vlan_macip_lens & cmp_mask;
299
300         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
301         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
302         ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
303         ctx_txd->seqnum_seed     = 0;
304 }
305
306 /*
307  * Check which hardware context can be used. Use the existing match
308  * or create a new context descriptor.
309  */
310 static inline uint32_t
311 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
312                 uint32_t vlan_macip_lens)
313 {
314         /* If match with the current context */
315         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
316                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
317                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
318                         return txq->ctx_curr;
319         }
320
321         /* If match with the second context */
322         txq->ctx_curr ^= 1;
323         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
324                 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
325                 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
326                         return txq->ctx_curr;
327         }
328
329         /* Mismatch, use the previous context */
330         return (IGB_CTX_NUM);
331 }
332
333 static inline uint32_t
334 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
335 {
336         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
337         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
338         uint32_t tmp;
339
340         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
341         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
342         return tmp;
343 }
344
345 static inline uint32_t
346 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
347 {
348         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
349         return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
350 }
351
352 uint16_t
353 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
354                uint16_t nb_pkts)
355 {
356         struct igb_tx_queue *txq;
357         struct igb_tx_entry *sw_ring;
358         struct igb_tx_entry *txe, *txn;
359         volatile union e1000_adv_tx_desc *txr;
360         volatile union e1000_adv_tx_desc *txd;
361         struct rte_mbuf     *tx_pkt;
362         struct rte_mbuf     *m_seg;
363         union igb_vlan_macip vlan_macip_lens;
364         uint64_t buf_dma_addr;
365         uint32_t olinfo_status;
366         uint32_t cmd_type_len;
367         uint32_t pkt_len;
368         uint16_t slen;
369         uint64_t ol_flags;
370         uint16_t tx_end;
371         uint16_t tx_id;
372         uint16_t tx_last;
373         uint16_t nb_tx;
374         uint64_t tx_ol_req;
375         uint32_t new_ctx = 0;
376         uint32_t ctx = 0;
377
378         txq = tx_queue;
379         sw_ring = txq->sw_ring;
380         txr     = txq->tx_ring;
381         tx_id   = txq->tx_tail;
382         txe = &sw_ring[tx_id];
383
384         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
385                 tx_pkt = *tx_pkts++;
386                 pkt_len = tx_pkt->pkt_len;
387
388                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
389
390                 /*
391                  * The number of descriptors that must be allocated for a
392                  * packet is the number of segments of that packet, plus 1
393                  * Context Descriptor for the VLAN Tag Identifier, if any.
394                  * Determine the last TX descriptor to allocate in the TX ring
395                  * for the packet, starting from the current position (tx_id)
396                  * in the ring.
397                  */
398                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
399
400                 ol_flags = tx_pkt->ol_flags;
401                 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
402                 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
403                 tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
404
405                 /* If a Context Descriptor need be built . */
406                 if (tx_ol_req) {
407                         ctx = what_advctx_update(txq, tx_ol_req,
408                                 vlan_macip_lens.data);
409                         /* Only allocate context descriptor if required*/
410                         new_ctx = (ctx == IGB_CTX_NUM);
411                         ctx = txq->ctx_curr;
412                         tx_last = (uint16_t) (tx_last + new_ctx);
413                 }
414                 if (tx_last >= txq->nb_tx_desc)
415                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
416
417                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
418                            " tx_first=%u tx_last=%u",
419                            (unsigned) txq->port_id,
420                            (unsigned) txq->queue_id,
421                            (unsigned) pkt_len,
422                            (unsigned) tx_id,
423                            (unsigned) tx_last);
424
425                 /*
426                  * Check if there are enough free descriptors in the TX ring
427                  * to transmit the next packet.
428                  * This operation is based on the two following rules:
429                  *
430                  *   1- Only check that the last needed TX descriptor can be
431                  *      allocated (by construction, if that descriptor is free,
432                  *      all intermediate ones are also free).
433                  *
434                  *      For this purpose, the index of the last TX descriptor
435                  *      used for a packet (the "last descriptor" of a packet)
436                  *      is recorded in the TX entries (the last one included)
437                  *      that are associated with all TX descriptors allocated
438                  *      for that packet.
439                  *
440                  *   2- Avoid to allocate the last free TX descriptor of the
441                  *      ring, in order to never set the TDT register with the
442                  *      same value stored in parallel by the NIC in the TDH
443                  *      register, which makes the TX engine of the NIC enter
444                  *      in a deadlock situation.
445                  *
446                  *      By extension, avoid to allocate a free descriptor that
447                  *      belongs to the last set of free descriptors allocated
448                  *      to the same packet previously transmitted.
449                  */
450
451                 /*
452                  * The "last descriptor" of the previously sent packet, if any,
453                  * which used the last descriptor to allocate.
454                  */
455                 tx_end = sw_ring[tx_last].last_id;
456
457                 /*
458                  * The next descriptor following that "last descriptor" in the
459                  * ring.
460                  */
461                 tx_end = sw_ring[tx_end].next_id;
462
463                 /*
464                  * The "last descriptor" associated with that next descriptor.
465                  */
466                 tx_end = sw_ring[tx_end].last_id;
467
468                 /*
469                  * Check that this descriptor is free.
470                  */
471                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
472                         if (nb_tx == 0)
473                                 return (0);
474                         goto end_of_tx;
475                 }
476
477                 /*
478                  * Set common flags of all TX Data Descriptors.
479                  *
480                  * The following bits must be set in all Data Descriptors:
481                  *   - E1000_ADVTXD_DTYP_DATA
482                  *   - E1000_ADVTXD_DCMD_DEXT
483                  *
484                  * The following bits must be set in the first Data Descriptor
485                  * and are ignored in the other ones:
486                  *   - E1000_ADVTXD_DCMD_IFCS
487                  *   - E1000_ADVTXD_MAC_1588
488                  *   - E1000_ADVTXD_DCMD_VLE
489                  *
490                  * The following bits must only be set in the last Data
491                  * Descriptor:
492                  *   - E1000_TXD_CMD_EOP
493                  *
494                  * The following bits can be set in any Data Descriptor, but
495                  * are only set in the last Data Descriptor:
496                  *   - E1000_TXD_CMD_RS
497                  */
498                 cmd_type_len = txq->txd_type |
499                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
500                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
501 #if defined(RTE_LIBRTE_IEEE1588)
502                 if (ol_flags & PKT_TX_IEEE1588_TMST)
503                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
504 #endif
505                 if (tx_ol_req) {
506                         /* Setup TX Advanced context descriptor if required */
507                         if (new_ctx) {
508                                 volatile struct e1000_adv_tx_context_desc *
509                                     ctx_txd;
510
511                                 ctx_txd = (volatile struct
512                                     e1000_adv_tx_context_desc *)
513                                     &txr[tx_id];
514
515                                 txn = &sw_ring[txe->next_id];
516                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
517
518                                 if (txe->mbuf != NULL) {
519                                         rte_pktmbuf_free_seg(txe->mbuf);
520                                         txe->mbuf = NULL;
521                                 }
522
523                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
524                                     vlan_macip_lens.data);
525
526                                 txe->last_id = tx_last;
527                                 tx_id = txe->next_id;
528                                 txe = txn;
529                         }
530
531                         /* Setup the TX Advanced Data Descriptor */
532                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
533                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
534                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
535                 }
536
537                 m_seg = tx_pkt;
538                 do {
539                         txn = &sw_ring[txe->next_id];
540                         txd = &txr[tx_id];
541
542                         if (txe->mbuf != NULL)
543                                 rte_pktmbuf_free_seg(txe->mbuf);
544                         txe->mbuf = m_seg;
545
546                         /*
547                          * Set up transmit descriptor.
548                          */
549                         slen = (uint16_t) m_seg->data_len;
550                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
551                         txd->read.buffer_addr =
552                                 rte_cpu_to_le_64(buf_dma_addr);
553                         txd->read.cmd_type_len =
554                                 rte_cpu_to_le_32(cmd_type_len | slen);
555                         txd->read.olinfo_status =
556                                 rte_cpu_to_le_32(olinfo_status);
557                         txe->last_id = tx_last;
558                         tx_id = txe->next_id;
559                         txe = txn;
560                         m_seg = m_seg->next;
561                 } while (m_seg != NULL);
562
563                 /*
564                  * The last packet data descriptor needs End Of Packet (EOP)
565                  * and Report Status (RS).
566                  */
567                 txd->read.cmd_type_len |=
568                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
569         }
570  end_of_tx:
571         rte_wmb();
572
573         /*
574          * Set the Transmit Descriptor Tail (TDT).
575          */
576         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
577         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
578                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
579                    (unsigned) tx_id, (unsigned) nb_tx);
580         txq->tx_tail = tx_id;
581
582         return (nb_tx);
583 }
584
585 /*********************************************************************
586  *
587  *  RX functions
588  *
589  **********************************************************************/
590 static inline uint64_t
591 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
592 {
593         uint64_t pkt_flags;
594
595         static uint64_t ip_pkt_types_map[16] = {
596                 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
597                 PKT_RX_IPV6_HDR, 0, 0, 0,
598                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
599                 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
600         };
601
602 #if defined(RTE_LIBRTE_IEEE1588)
603         static uint32_t ip_pkt_etqf_map[8] = {
604                 0, 0, 0, PKT_RX_IEEE1588_PTP,
605                 0, 0, 0, 0,
606         };
607
608         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
609                                 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
610                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
611 #else
612         pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
613                                 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
614 #endif
615         return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH);
616 }
617
618 static inline uint64_t
619 rx_desc_status_to_pkt_flags(uint32_t rx_status)
620 {
621         uint64_t pkt_flags;
622
623         /* Check if VLAN present */
624         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
625
626 #if defined(RTE_LIBRTE_IEEE1588)
627         if (rx_status & E1000_RXD_STAT_TMST)
628                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
629 #endif
630         return pkt_flags;
631 }
632
633 static inline uint64_t
634 rx_desc_error_to_pkt_flags(uint32_t rx_status)
635 {
636         /*
637          * Bit 30: IPE, IPv4 checksum error
638          * Bit 29: L4I, L4I integrity error
639          */
640
641         static uint64_t error_to_pkt_flags_map[4] = {
642                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
643                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
644         };
645         return error_to_pkt_flags_map[(rx_status >>
646                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
647 }
648
649 uint16_t
650 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
651                uint16_t nb_pkts)
652 {
653         struct igb_rx_queue *rxq;
654         volatile union e1000_adv_rx_desc *rx_ring;
655         volatile union e1000_adv_rx_desc *rxdp;
656         struct igb_rx_entry *sw_ring;
657         struct igb_rx_entry *rxe;
658         struct rte_mbuf *rxm;
659         struct rte_mbuf *nmb;
660         union e1000_adv_rx_desc rxd;
661         uint64_t dma_addr;
662         uint32_t staterr;
663         uint32_t hlen_type_rss;
664         uint16_t pkt_len;
665         uint16_t rx_id;
666         uint16_t nb_rx;
667         uint16_t nb_hold;
668         uint64_t pkt_flags;
669
670         nb_rx = 0;
671         nb_hold = 0;
672         rxq = rx_queue;
673         rx_id = rxq->rx_tail;
674         rx_ring = rxq->rx_ring;
675         sw_ring = rxq->sw_ring;
676         while (nb_rx < nb_pkts) {
677                 /*
678                  * The order of operations here is important as the DD status
679                  * bit must not be read after any other descriptor fields.
680                  * rx_ring and rxdp are pointing to volatile data so the order
681                  * of accesses cannot be reordered by the compiler. If they were
682                  * not volatile, they could be reordered which could lead to
683                  * using invalid descriptor fields when read from rxd.
684                  */
685                 rxdp = &rx_ring[rx_id];
686                 staterr = rxdp->wb.upper.status_error;
687                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
688                         break;
689                 rxd = *rxdp;
690
691                 /*
692                  * End of packet.
693                  *
694                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
695                  * likely to be invalid and to be dropped by the various
696                  * validation checks performed by the network stack.
697                  *
698                  * Allocate a new mbuf to replenish the RX ring descriptor.
699                  * If the allocation fails:
700                  *    - arrange for that RX descriptor to be the first one
701                  *      being parsed the next time the receive function is
702                  *      invoked [on the same queue].
703                  *
704                  *    - Stop parsing the RX ring and return immediately.
705                  *
706                  * This policy do not drop the packet received in the RX
707                  * descriptor for which the allocation of a new mbuf failed.
708                  * Thus, it allows that packet to be later retrieved if
709                  * mbuf have been freed in the mean time.
710                  * As a side effect, holding RX descriptors instead of
711                  * systematically giving them back to the NIC may lead to
712                  * RX ring exhaustion situations.
713                  * However, the NIC can gracefully prevent such situations
714                  * to happen by sending specific "back-pressure" flow control
715                  * frames to its peer(s).
716                  */
717                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
718                            "staterr=0x%x pkt_len=%u",
719                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
720                            (unsigned) rx_id, (unsigned) staterr,
721                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
722
723                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
724                 if (nmb == NULL) {
725                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
726                                    "queue_id=%u", (unsigned) rxq->port_id,
727                                    (unsigned) rxq->queue_id);
728                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
729                         break;
730                 }
731
732                 nb_hold++;
733                 rxe = &sw_ring[rx_id];
734                 rx_id++;
735                 if (rx_id == rxq->nb_rx_desc)
736                         rx_id = 0;
737
738                 /* Prefetch next mbuf while processing current one. */
739                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
740
741                 /*
742                  * When next RX descriptor is on a cache-line boundary,
743                  * prefetch the next 4 RX descriptors and the next 8 pointers
744                  * to mbufs.
745                  */
746                 if ((rx_id & 0x3) == 0) {
747                         rte_igb_prefetch(&rx_ring[rx_id]);
748                         rte_igb_prefetch(&sw_ring[rx_id]);
749                 }
750
751                 rxm = rxe->mbuf;
752                 rxe->mbuf = nmb;
753                 dma_addr =
754                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
755                 rxdp->read.hdr_addr = dma_addr;
756                 rxdp->read.pkt_addr = dma_addr;
757
758                 /*
759                  * Initialize the returned mbuf.
760                  * 1) setup generic mbuf fields:
761                  *    - number of segments,
762                  *    - next segment,
763                  *    - packet length,
764                  *    - RX port identifier.
765                  * 2) integrate hardware offload data, if any:
766                  *    - RSS flag & hash,
767                  *    - IP checksum flag,
768                  *    - VLAN TCI, if any,
769                  *    - error flags.
770                  */
771                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
772                                       rxq->crc_len);
773                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
774                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
775                 rxm->nb_segs = 1;
776                 rxm->next = NULL;
777                 rxm->pkt_len = pkt_len;
778                 rxm->data_len = pkt_len;
779                 rxm->port = rxq->port_id;
780
781                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
782                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
783                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
784                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
785
786                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
787                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
788                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
789                 rxm->ol_flags = pkt_flags;
790
791                 /*
792                  * Store the mbuf address into the next entry of the array
793                  * of returned packets.
794                  */
795                 rx_pkts[nb_rx++] = rxm;
796         }
797         rxq->rx_tail = rx_id;
798
799         /*
800          * If the number of free RX descriptors is greater than the RX free
801          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
802          * register.
803          * Update the RDT with the value of the last processed RX descriptor
804          * minus 1, to guarantee that the RDT register is never equal to the
805          * RDH register, which creates a "full" ring situtation from the
806          * hardware point of view...
807          */
808         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
809         if (nb_hold > rxq->rx_free_thresh) {
810                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
811                            "nb_hold=%u nb_rx=%u",
812                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
813                            (unsigned) rx_id, (unsigned) nb_hold,
814                            (unsigned) nb_rx);
815                 rx_id = (uint16_t) ((rx_id == 0) ?
816                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
817                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
818                 nb_hold = 0;
819         }
820         rxq->nb_rx_hold = nb_hold;
821         return (nb_rx);
822 }
823
824 uint16_t
825 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
826                          uint16_t nb_pkts)
827 {
828         struct igb_rx_queue *rxq;
829         volatile union e1000_adv_rx_desc *rx_ring;
830         volatile union e1000_adv_rx_desc *rxdp;
831         struct igb_rx_entry *sw_ring;
832         struct igb_rx_entry *rxe;
833         struct rte_mbuf *first_seg;
834         struct rte_mbuf *last_seg;
835         struct rte_mbuf *rxm;
836         struct rte_mbuf *nmb;
837         union e1000_adv_rx_desc rxd;
838         uint64_t dma; /* Physical address of mbuf data buffer */
839         uint32_t staterr;
840         uint32_t hlen_type_rss;
841         uint16_t rx_id;
842         uint16_t nb_rx;
843         uint16_t nb_hold;
844         uint16_t data_len;
845         uint64_t pkt_flags;
846
847         nb_rx = 0;
848         nb_hold = 0;
849         rxq = rx_queue;
850         rx_id = rxq->rx_tail;
851         rx_ring = rxq->rx_ring;
852         sw_ring = rxq->sw_ring;
853
854         /*
855          * Retrieve RX context of current packet, if any.
856          */
857         first_seg = rxq->pkt_first_seg;
858         last_seg = rxq->pkt_last_seg;
859
860         while (nb_rx < nb_pkts) {
861         next_desc:
862                 /*
863                  * The order of operations here is important as the DD status
864                  * bit must not be read after any other descriptor fields.
865                  * rx_ring and rxdp are pointing to volatile data so the order
866                  * of accesses cannot be reordered by the compiler. If they were
867                  * not volatile, they could be reordered which could lead to
868                  * using invalid descriptor fields when read from rxd.
869                  */
870                 rxdp = &rx_ring[rx_id];
871                 staterr = rxdp->wb.upper.status_error;
872                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
873                         break;
874                 rxd = *rxdp;
875
876                 /*
877                  * Descriptor done.
878                  *
879                  * Allocate a new mbuf to replenish the RX ring descriptor.
880                  * If the allocation fails:
881                  *    - arrange for that RX descriptor to be the first one
882                  *      being parsed the next time the receive function is
883                  *      invoked [on the same queue].
884                  *
885                  *    - Stop parsing the RX ring and return immediately.
886                  *
887                  * This policy does not drop the packet received in the RX
888                  * descriptor for which the allocation of a new mbuf failed.
889                  * Thus, it allows that packet to be later retrieved if
890                  * mbuf have been freed in the mean time.
891                  * As a side effect, holding RX descriptors instead of
892                  * systematically giving them back to the NIC may lead to
893                  * RX ring exhaustion situations.
894                  * However, the NIC can gracefully prevent such situations
895                  * to happen by sending specific "back-pressure" flow control
896                  * frames to its peer(s).
897                  */
898                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
899                            "staterr=0x%x data_len=%u",
900                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
901                            (unsigned) rx_id, (unsigned) staterr,
902                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
903
904                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
905                 if (nmb == NULL) {
906                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
907                                    "queue_id=%u", (unsigned) rxq->port_id,
908                                    (unsigned) rxq->queue_id);
909                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
910                         break;
911                 }
912
913                 nb_hold++;
914                 rxe = &sw_ring[rx_id];
915                 rx_id++;
916                 if (rx_id == rxq->nb_rx_desc)
917                         rx_id = 0;
918
919                 /* Prefetch next mbuf while processing current one. */
920                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
921
922                 /*
923                  * When next RX descriptor is on a cache-line boundary,
924                  * prefetch the next 4 RX descriptors and the next 8 pointers
925                  * to mbufs.
926                  */
927                 if ((rx_id & 0x3) == 0) {
928                         rte_igb_prefetch(&rx_ring[rx_id]);
929                         rte_igb_prefetch(&sw_ring[rx_id]);
930                 }
931
932                 /*
933                  * Update RX descriptor with the physical address of the new
934                  * data buffer of the new allocated mbuf.
935                  */
936                 rxm = rxe->mbuf;
937                 rxe->mbuf = nmb;
938                 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
939                 rxdp->read.pkt_addr = dma;
940                 rxdp->read.hdr_addr = dma;
941
942                 /*
943                  * Set data length & data buffer address of mbuf.
944                  */
945                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
946                 rxm->data_len = data_len;
947                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
948
949                 /*
950                  * If this is the first buffer of the received packet,
951                  * set the pointer to the first mbuf of the packet and
952                  * initialize its context.
953                  * Otherwise, update the total length and the number of segments
954                  * of the current scattered packet, and update the pointer to
955                  * the last mbuf of the current packet.
956                  */
957                 if (first_seg == NULL) {
958                         first_seg = rxm;
959                         first_seg->pkt_len = data_len;
960                         first_seg->nb_segs = 1;
961                 } else {
962                         first_seg->pkt_len += data_len;
963                         first_seg->nb_segs++;
964                         last_seg->next = rxm;
965                 }
966
967                 /*
968                  * If this is not the last buffer of the received packet,
969                  * update the pointer to the last mbuf of the current scattered
970                  * packet and continue to parse the RX ring.
971                  */
972                 if (! (staterr & E1000_RXD_STAT_EOP)) {
973                         last_seg = rxm;
974                         goto next_desc;
975                 }
976
977                 /*
978                  * This is the last buffer of the received packet.
979                  * If the CRC is not stripped by the hardware:
980                  *   - Subtract the CRC length from the total packet length.
981                  *   - If the last buffer only contains the whole CRC or a part
982                  *     of it, free the mbuf associated to the last buffer.
983                  *     If part of the CRC is also contained in the previous
984                  *     mbuf, subtract the length of that CRC part from the
985                  *     data length of the previous mbuf.
986                  */
987                 rxm->next = NULL;
988                 if (unlikely(rxq->crc_len > 0)) {
989                         first_seg->pkt_len -= ETHER_CRC_LEN;
990                         if (data_len <= ETHER_CRC_LEN) {
991                                 rte_pktmbuf_free_seg(rxm);
992                                 first_seg->nb_segs--;
993                                 last_seg->data_len = (uint16_t)
994                                         (last_seg->data_len -
995                                          (ETHER_CRC_LEN - data_len));
996                                 last_seg->next = NULL;
997                         } else
998                                 rxm->data_len =
999                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1000                 }
1001
1002                 /*
1003                  * Initialize the first mbuf of the returned packet:
1004                  *    - RX port identifier,
1005                  *    - hardware offload data, if any:
1006                  *      - RSS flag & hash,
1007                  *      - IP checksum flag,
1008                  *      - VLAN TCI, if any,
1009                  *      - error flags.
1010                  */
1011                 first_seg->port = rxq->port_id;
1012                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1013
1014                 /*
1015                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1016                  * set in the pkt_flags field.
1017                  */
1018                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1019                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1020                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1021                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1022                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1023                 first_seg->ol_flags = pkt_flags;
1024
1025                 /* Prefetch data of first segment, if configured to do so. */
1026                 rte_packet_prefetch((char *)first_seg->buf_addr +
1027                         first_seg->data_off);
1028
1029                 /*
1030                  * Store the mbuf address into the next entry of the array
1031                  * of returned packets.
1032                  */
1033                 rx_pkts[nb_rx++] = first_seg;
1034
1035                 /*
1036                  * Setup receipt context for a new packet.
1037                  */
1038                 first_seg = NULL;
1039         }
1040
1041         /*
1042          * Record index of the next RX descriptor to probe.
1043          */
1044         rxq->rx_tail = rx_id;
1045
1046         /*
1047          * Save receive context.
1048          */
1049         rxq->pkt_first_seg = first_seg;
1050         rxq->pkt_last_seg = last_seg;
1051
1052         /*
1053          * If the number of free RX descriptors is greater than the RX free
1054          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1055          * register.
1056          * Update the RDT with the value of the last processed RX descriptor
1057          * minus 1, to guarantee that the RDT register is never equal to the
1058          * RDH register, which creates a "full" ring situtation from the
1059          * hardware point of view...
1060          */
1061         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1062         if (nb_hold > rxq->rx_free_thresh) {
1063                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1064                            "nb_hold=%u nb_rx=%u",
1065                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1066                            (unsigned) rx_id, (unsigned) nb_hold,
1067                            (unsigned) nb_rx);
1068                 rx_id = (uint16_t) ((rx_id == 0) ?
1069                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1070                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1071                 nb_hold = 0;
1072         }
1073         rxq->nb_rx_hold = nb_hold;
1074         return (nb_rx);
1075 }
1076
1077 /*
1078  * Rings setup and release.
1079  *
1080  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1081  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1082  * This will also optimize cache line size effect.
1083  * H/W supports up to cache line size 128.
1084  */
1085 #define IGB_ALIGN 128
1086
1087 /*
1088  * Maximum number of Ring Descriptors.
1089  *
1090  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1091  * desscriptors should meet the following condition:
1092  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1093  */
1094 #define IGB_MIN_RING_DESC 32
1095 #define IGB_MAX_RING_DESC 4096
1096
1097 static const struct rte_memzone *
1098 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1099                       uint16_t queue_id, uint32_t ring_size, int socket_id)
1100 {
1101         char z_name[RTE_MEMZONE_NAMESIZE];
1102         const struct rte_memzone *mz;
1103
1104         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1105                         dev->driver->pci_drv.name, ring_name,
1106                                 dev->data->port_id, queue_id);
1107         mz = rte_memzone_lookup(z_name);
1108         if (mz)
1109                 return mz;
1110
1111 #ifdef RTE_LIBRTE_XEN_DOM0
1112         return rte_memzone_reserve_bounded(z_name, ring_size,
1113                         socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1114 #else
1115         return rte_memzone_reserve_aligned(z_name, ring_size,
1116                         socket_id, 0, IGB_ALIGN);
1117 #endif
1118 }
1119
1120 static void
1121 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1122 {
1123         unsigned i;
1124
1125         if (txq->sw_ring != NULL) {
1126                 for (i = 0; i < txq->nb_tx_desc; i++) {
1127                         if (txq->sw_ring[i].mbuf != NULL) {
1128                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1129                                 txq->sw_ring[i].mbuf = NULL;
1130                         }
1131                 }
1132         }
1133 }
1134
1135 static void
1136 igb_tx_queue_release(struct igb_tx_queue *txq)
1137 {
1138         if (txq != NULL) {
1139                 igb_tx_queue_release_mbufs(txq);
1140                 rte_free(txq->sw_ring);
1141                 rte_free(txq);
1142         }
1143 }
1144
1145 void
1146 eth_igb_tx_queue_release(void *txq)
1147 {
1148         igb_tx_queue_release(txq);
1149 }
1150
1151 static void
1152 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1153 {
1154         txq->tx_head = 0;
1155         txq->tx_tail = 0;
1156         txq->ctx_curr = 0;
1157         memset((void*)&txq->ctx_cache, 0,
1158                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1159 }
1160
1161 static void
1162 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1163 {
1164         static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1165                         .buffer_addr = 0}};
1166         struct igb_tx_entry *txe = txq->sw_ring;
1167         uint16_t i, prev;
1168         struct e1000_hw *hw;
1169
1170         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1171         /* Zero out HW ring memory */
1172         for (i = 0; i < txq->nb_tx_desc; i++) {
1173                 txq->tx_ring[i] = zeroed_desc;
1174         }
1175
1176         /* Initialize ring entries */
1177         prev = (uint16_t)(txq->nb_tx_desc - 1);
1178         for (i = 0; i < txq->nb_tx_desc; i++) {
1179                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1180
1181                 txd->wb.status = E1000_TXD_STAT_DD;
1182                 txe[i].mbuf = NULL;
1183                 txe[i].last_id = i;
1184                 txe[prev].next_id = i;
1185                 prev = i;
1186         }
1187
1188         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1189         /* 82575 specific, each tx queue will use 2 hw contexts */
1190         if (hw->mac.type == e1000_82575)
1191                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1192
1193         igb_reset_tx_queue_stat(txq);
1194 }
1195
1196 int
1197 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1198                          uint16_t queue_idx,
1199                          uint16_t nb_desc,
1200                          unsigned int socket_id,
1201                          const struct rte_eth_txconf *tx_conf)
1202 {
1203         const struct rte_memzone *tz;
1204         struct igb_tx_queue *txq;
1205         struct e1000_hw     *hw;
1206         uint32_t size;
1207
1208         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1209
1210         /*
1211          * Validate number of transmit descriptors.
1212          * It must not exceed hardware maximum, and must be multiple
1213          * of IGB_ALIGN.
1214          */
1215         if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1216             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1217                 return -EINVAL;
1218         }
1219
1220         /*
1221          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1222          * driver.
1223          */
1224         if (tx_conf->tx_free_thresh != 0)
1225                 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1226                              "used for the 1G driver.");
1227         if (tx_conf->tx_rs_thresh != 0)
1228                 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1229                              "used for the 1G driver.");
1230         if (tx_conf->tx_thresh.wthresh == 0)
1231                 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1232                              "consider setting the TX WTHRESH value to 4, 8, "
1233                              "or 16.");
1234
1235         /* Free memory prior to re-allocation if needed */
1236         if (dev->data->tx_queues[queue_idx] != NULL) {
1237                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1238                 dev->data->tx_queues[queue_idx] = NULL;
1239         }
1240
1241         /* First allocate the tx queue data structure */
1242         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1243                                                         CACHE_LINE_SIZE);
1244         if (txq == NULL)
1245                 return (-ENOMEM);
1246
1247         /*
1248          * Allocate TX ring hardware descriptors. A memzone large enough to
1249          * handle the maximum ring size is allocated in order to allow for
1250          * resizing in later calls to the queue setup function.
1251          */
1252         size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1253         tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1254                                         size, socket_id);
1255         if (tz == NULL) {
1256                 igb_tx_queue_release(txq);
1257                 return (-ENOMEM);
1258         }
1259
1260         txq->nb_tx_desc = nb_desc;
1261         txq->pthresh = tx_conf->tx_thresh.pthresh;
1262         txq->hthresh = tx_conf->tx_thresh.hthresh;
1263         txq->wthresh = tx_conf->tx_thresh.wthresh;
1264         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1265                 txq->wthresh = 1;
1266         txq->queue_id = queue_idx;
1267         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1268                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1269         txq->port_id = dev->data->port_id;
1270
1271         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1272 #ifndef RTE_LIBRTE_XEN_DOM0
1273         txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1274 #else
1275         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1276 #endif
1277          txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1278         /* Allocate software ring */
1279         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1280                                    sizeof(struct igb_tx_entry) * nb_desc,
1281                                    CACHE_LINE_SIZE);
1282         if (txq->sw_ring == NULL) {
1283                 igb_tx_queue_release(txq);
1284                 return (-ENOMEM);
1285         }
1286         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1287                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1288
1289         igb_reset_tx_queue(txq, dev);
1290         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1291         dev->data->tx_queues[queue_idx] = txq;
1292
1293         return (0);
1294 }
1295
1296 static void
1297 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1298 {
1299         unsigned i;
1300
1301         if (rxq->sw_ring != NULL) {
1302                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1303                         if (rxq->sw_ring[i].mbuf != NULL) {
1304                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1305                                 rxq->sw_ring[i].mbuf = NULL;
1306                         }
1307                 }
1308         }
1309 }
1310
1311 static void
1312 igb_rx_queue_release(struct igb_rx_queue *rxq)
1313 {
1314         if (rxq != NULL) {
1315                 igb_rx_queue_release_mbufs(rxq);
1316                 rte_free(rxq->sw_ring);
1317                 rte_free(rxq);
1318         }
1319 }
1320
1321 void
1322 eth_igb_rx_queue_release(void *rxq)
1323 {
1324         igb_rx_queue_release(rxq);
1325 }
1326
1327 static void
1328 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1329 {
1330         static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1331                         .pkt_addr = 0}};
1332         unsigned i;
1333
1334         /* Zero out HW ring memory */
1335         for (i = 0; i < rxq->nb_rx_desc; i++) {
1336                 rxq->rx_ring[i] = zeroed_desc;
1337         }
1338
1339         rxq->rx_tail = 0;
1340         rxq->pkt_first_seg = NULL;
1341         rxq->pkt_last_seg = NULL;
1342 }
1343
1344 int
1345 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1346                          uint16_t queue_idx,
1347                          uint16_t nb_desc,
1348                          unsigned int socket_id,
1349                          const struct rte_eth_rxconf *rx_conf,
1350                          struct rte_mempool *mp)
1351 {
1352         const struct rte_memzone *rz;
1353         struct igb_rx_queue *rxq;
1354         struct e1000_hw     *hw;
1355         unsigned int size;
1356
1357         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358
1359         /*
1360          * Validate number of receive descriptors.
1361          * It must not exceed hardware maximum, and must be multiple
1362          * of IGB_ALIGN.
1363          */
1364         if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1365             (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1366                 return (-EINVAL);
1367         }
1368
1369         /* Free memory prior to re-allocation if needed */
1370         if (dev->data->rx_queues[queue_idx] != NULL) {
1371                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1372                 dev->data->rx_queues[queue_idx] = NULL;
1373         }
1374
1375         /* First allocate the RX queue data structure. */
1376         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1377                           CACHE_LINE_SIZE);
1378         if (rxq == NULL)
1379                 return (-ENOMEM);
1380         rxq->mb_pool = mp;
1381         rxq->nb_rx_desc = nb_desc;
1382         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1383         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1384         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1385         if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1386                 rxq->wthresh = 1;
1387         rxq->drop_en = rx_conf->rx_drop_en;
1388         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1389         rxq->queue_id = queue_idx;
1390         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1391                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1392         rxq->port_id = dev->data->port_id;
1393         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1394                                   ETHER_CRC_LEN);
1395
1396         /*
1397          *  Allocate RX ring hardware descriptors. A memzone large enough to
1398          *  handle the maximum ring size is allocated in order to allow for
1399          *  resizing in later calls to the queue setup function.
1400          */
1401         size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1402         rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1403         if (rz == NULL) {
1404                 igb_rx_queue_release(rxq);
1405                 return (-ENOMEM);
1406         }
1407         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1408         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1409 #ifndef RTE_LIBRTE_XEN_DOM0
1410         rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1411 #else
1412         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1413 #endif
1414         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1415
1416         /* Allocate software ring. */
1417         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1418                                    sizeof(struct igb_rx_entry) * nb_desc,
1419                                    CACHE_LINE_SIZE);
1420         if (rxq->sw_ring == NULL) {
1421                 igb_rx_queue_release(rxq);
1422                 return (-ENOMEM);
1423         }
1424         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1425                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1426
1427         dev->data->rx_queues[queue_idx] = rxq;
1428         igb_reset_rx_queue(rxq);
1429
1430         return 0;
1431 }
1432
1433 uint32_t
1434 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1435 {
1436 #define IGB_RXQ_SCAN_INTERVAL 4
1437         volatile union e1000_adv_rx_desc *rxdp;
1438         struct igb_rx_queue *rxq;
1439         uint32_t desc = 0;
1440
1441         if (rx_queue_id >= dev->data->nb_rx_queues) {
1442                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1443                 return 0;
1444         }
1445
1446         rxq = dev->data->rx_queues[rx_queue_id];
1447         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1448
1449         while ((desc < rxq->nb_rx_desc) &&
1450                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1451                 desc += IGB_RXQ_SCAN_INTERVAL;
1452                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1453                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1454                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1455                                 desc - rxq->nb_rx_desc]);
1456         }
1457
1458         return 0;
1459 }
1460
1461 int
1462 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1463 {
1464         volatile union e1000_adv_rx_desc *rxdp;
1465         struct igb_rx_queue *rxq = rx_queue;
1466         uint32_t desc;
1467
1468         if (unlikely(offset >= rxq->nb_rx_desc))
1469                 return 0;
1470         desc = rxq->rx_tail + offset;
1471         if (desc >= rxq->nb_rx_desc)
1472                 desc -= rxq->nb_rx_desc;
1473
1474         rxdp = &rxq->rx_ring[desc];
1475         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1476 }
1477
1478 void
1479 igb_dev_clear_queues(struct rte_eth_dev *dev)
1480 {
1481         uint16_t i;
1482         struct igb_tx_queue *txq;
1483         struct igb_rx_queue *rxq;
1484
1485         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1486                 txq = dev->data->tx_queues[i];
1487                 if (txq != NULL) {
1488                         igb_tx_queue_release_mbufs(txq);
1489                         igb_reset_tx_queue(txq, dev);
1490                 }
1491         }
1492
1493         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1494                 rxq = dev->data->rx_queues[i];
1495                 if (rxq != NULL) {
1496                         igb_rx_queue_release_mbufs(rxq);
1497                         igb_reset_rx_queue(rxq);
1498                 }
1499         }
1500 }
1501
1502 /**
1503  * Receive Side Scaling (RSS).
1504  * See section 7.1.1.7 in the following document:
1505  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1506  *
1507  * Principles:
1508  * The source and destination IP addresses of the IP header and the source and
1509  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1510  * against a configurable random key to compute a 32-bit RSS hash result.
1511  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1512  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1513  * RSS output index which is used as the RX queue index where to store the
1514  * received packets.
1515  * The following output is supplied in the RX write-back descriptor:
1516  *     - 32-bit result of the Microsoft RSS hash function,
1517  *     - 4-bit RSS type field.
1518  */
1519
1520 /*
1521  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1522  * Used as the default key.
1523  */
1524 static uint8_t rss_intel_key[40] = {
1525         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1526         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1527         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1528         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1529         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1530 };
1531
1532 static void
1533 igb_rss_disable(struct rte_eth_dev *dev)
1534 {
1535         struct e1000_hw *hw;
1536         uint32_t mrqc;
1537
1538         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1540         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1541         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1542 }
1543
1544 static void
1545 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1546 {
1547         uint8_t  *hash_key;
1548         uint32_t rss_key;
1549         uint32_t mrqc;
1550         uint64_t rss_hf;
1551         uint16_t i;
1552
1553         hash_key = rss_conf->rss_key;
1554         if (hash_key != NULL) {
1555                 /* Fill in RSS hash key */
1556                 for (i = 0; i < 10; i++) {
1557                         rss_key  = hash_key[(i * 4)];
1558                         rss_key |= hash_key[(i * 4) + 1] << 8;
1559                         rss_key |= hash_key[(i * 4) + 2] << 16;
1560                         rss_key |= hash_key[(i * 4) + 3] << 24;
1561                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1562                 }
1563         }
1564
1565         /* Set configured hashing protocols in MRQC register */
1566         rss_hf = rss_conf->rss_hf;
1567         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1568         if (rss_hf & ETH_RSS_IPV4)
1569                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1570         if (rss_hf & ETH_RSS_IPV4_TCP)
1571                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1572         if (rss_hf & ETH_RSS_IPV6)
1573                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1574         if (rss_hf & ETH_RSS_IPV6_EX)
1575                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1576         if (rss_hf & ETH_RSS_IPV6_TCP)
1577                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1578         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1579                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1580         if (rss_hf & ETH_RSS_IPV4_UDP)
1581                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1582         if (rss_hf & ETH_RSS_IPV6_UDP)
1583                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1584         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1585                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1586         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1587 }
1588
1589 int
1590 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1591                         struct rte_eth_rss_conf *rss_conf)
1592 {
1593         struct e1000_hw *hw;
1594         uint32_t mrqc;
1595         uint64_t rss_hf;
1596
1597         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598
1599         /*
1600          * Before changing anything, first check that the update RSS operation
1601          * does not attempt to disable RSS, if RSS was enabled at
1602          * initialization time, or does not attempt to enable RSS, if RSS was
1603          * disabled at initialization time.
1604          */
1605         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1606         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1607         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1608                 if (rss_hf != 0) /* Enable RSS */
1609                         return -(EINVAL);
1610                 return 0; /* Nothing to do */
1611         }
1612         /* RSS enabled */
1613         if (rss_hf == 0) /* Disable RSS */
1614                 return -(EINVAL);
1615         igb_hw_rss_hash_set(hw, rss_conf);
1616         return 0;
1617 }
1618
1619 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1620                               struct rte_eth_rss_conf *rss_conf)
1621 {
1622         struct e1000_hw *hw;
1623         uint8_t *hash_key;
1624         uint32_t rss_key;
1625         uint32_t mrqc;
1626         uint64_t rss_hf;
1627         uint16_t i;
1628
1629         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1630         hash_key = rss_conf->rss_key;
1631         if (hash_key != NULL) {
1632                 /* Return RSS hash key */
1633                 for (i = 0; i < 10; i++) {
1634                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1635                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1636                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1637                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1638                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1639                 }
1640         }
1641
1642         /* Get RSS functions configured in MRQC register */
1643         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1644         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1645                 rss_conf->rss_hf = 0;
1646                 return 0;
1647         }
1648         rss_hf = 0;
1649         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1650                 rss_hf |= ETH_RSS_IPV4;
1651         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1652                 rss_hf |= ETH_RSS_IPV4_TCP;
1653         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1654                 rss_hf |= ETH_RSS_IPV6;
1655         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1656                 rss_hf |= ETH_RSS_IPV6_EX;
1657         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1658                 rss_hf |= ETH_RSS_IPV6_TCP;
1659         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1660                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1661         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1662                 rss_hf |= ETH_RSS_IPV4_UDP;
1663         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1664                 rss_hf |= ETH_RSS_IPV6_UDP;
1665         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1666                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1667         rss_conf->rss_hf = rss_hf;
1668         return 0;
1669 }
1670
1671 static void
1672 igb_rss_configure(struct rte_eth_dev *dev)
1673 {
1674         struct rte_eth_rss_conf rss_conf;
1675         struct e1000_hw *hw;
1676         uint32_t shift;
1677         uint16_t i;
1678
1679         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1680
1681         /* Fill in redirection table. */
1682         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1683         for (i = 0; i < 128; i++) {
1684                 union e1000_reta {
1685                         uint32_t dword;
1686                         uint8_t  bytes[4];
1687                 } reta;
1688                 uint8_t q_idx;
1689
1690                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1691                                    i % dev->data->nb_rx_queues : 0);
1692                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1693                 if ((i & 3) == 3)
1694                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1695         }
1696
1697         /*
1698          * Configure the RSS key and the RSS protocols used to compute
1699          * the RSS hash of input packets.
1700          */
1701         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1702         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1703                 igb_rss_disable(dev);
1704                 return;
1705         }
1706         if (rss_conf.rss_key == NULL)
1707                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1708         igb_hw_rss_hash_set(hw, &rss_conf);
1709 }
1710
1711 /*
1712  * Check if the mac type support VMDq or not.
1713  * Return 1 if it supports, otherwise, return 0.
1714  */
1715 static int
1716 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1717 {
1718         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1719
1720         switch (hw->mac.type) {
1721         case e1000_82576:
1722         case e1000_82580:
1723         case e1000_i350:
1724                 return 1;
1725         case e1000_82540:
1726         case e1000_82541:
1727         case e1000_82542:
1728         case e1000_82543:
1729         case e1000_82544:
1730         case e1000_82545:
1731         case e1000_82546:
1732         case e1000_82547:
1733         case e1000_82571:
1734         case e1000_82572:
1735         case e1000_82573:
1736         case e1000_82574:
1737         case e1000_82583:
1738         case e1000_i210:
1739         case e1000_i211:
1740         default:
1741                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1742                 return 0;
1743         }
1744 }
1745
1746 static int
1747 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1748 {
1749         struct rte_eth_vmdq_rx_conf *cfg;
1750         struct e1000_hw *hw;
1751         uint32_t mrqc, vt_ctl, vmolr, rctl;
1752         int i;
1753
1754         PMD_INIT_FUNC_TRACE();
1755
1756         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1757         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1758
1759         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1760         if (igb_is_vmdq_supported(dev) == 0)
1761                 return -1;
1762
1763         igb_rss_disable(dev);
1764
1765         /* RCTL: eanble VLAN filter */
1766         rctl = E1000_READ_REG(hw, E1000_RCTL);
1767         rctl |= E1000_RCTL_VFE;
1768         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1769
1770         /* MRQC: enable vmdq */
1771         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1772         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1773         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1774
1775         /* VTCTL:  pool selection according to VLAN tag */
1776         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1777         if (cfg->enable_default_pool)
1778                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1779         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1780         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1781
1782         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1783                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1784                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1785                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1786                         E1000_VMOLR_MPME);
1787
1788                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1789                         vmolr |= E1000_VMOLR_AUPE;
1790                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1791                         vmolr |= E1000_VMOLR_ROMPE;
1792                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1793                         vmolr |= E1000_VMOLR_ROPE;
1794                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1795                         vmolr |= E1000_VMOLR_BAM;
1796                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1797                         vmolr |= E1000_VMOLR_MPME;
1798
1799                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1800         }
1801
1802         /*
1803          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1804          * Both 82576 and 82580 support it
1805          */
1806         if (hw->mac.type != e1000_i350) {
1807                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1808                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1809                         vmolr |= E1000_VMOLR_STRVLAN;
1810                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1811                 }
1812         }
1813
1814         /* VFTA - enable all vlan filters */
1815         for (i = 0; i < IGB_VFTA_SIZE; i++)
1816                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1817
1818         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1819         if (hw->mac.type != e1000_82580)
1820                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1821
1822         /*
1823          * RAH/RAL - allow pools to read specific mac addresses
1824          * In this case, all pools should be able to read from mac addr 0
1825          */
1826         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1827         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1828
1829         /* VLVF: set up filters for vlan tags as configured */
1830         for (i = 0; i < cfg->nb_pool_maps; i++) {
1831                 /* set vlan id in VF register and set the valid bit */
1832                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1833                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1834                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1835                         E1000_VLVF_POOLSEL_MASK)));
1836         }
1837
1838         E1000_WRITE_FLUSH(hw);
1839
1840         return 0;
1841 }
1842
1843
1844 /*********************************************************************
1845  *
1846  *  Enable receive unit.
1847  *
1848  **********************************************************************/
1849
1850 static int
1851 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1852 {
1853         struct igb_rx_entry *rxe = rxq->sw_ring;
1854         uint64_t dma_addr;
1855         unsigned i;
1856
1857         /* Initialize software ring entries. */
1858         for (i = 0; i < rxq->nb_rx_desc; i++) {
1859                 volatile union e1000_adv_rx_desc *rxd;
1860                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1861
1862                 if (mbuf == NULL) {
1863                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1864                                      "queue_id=%hu", rxq->queue_id);
1865                         return (-ENOMEM);
1866                 }
1867                 dma_addr =
1868                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1869                 rxd = &rxq->rx_ring[i];
1870                 rxd->read.hdr_addr = dma_addr;
1871                 rxd->read.pkt_addr = dma_addr;
1872                 rxe[i].mbuf = mbuf;
1873         }
1874
1875         return 0;
1876 }
1877
1878 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1879 static int
1880 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1881 {
1882         struct e1000_hw *hw =
1883                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1884         uint32_t mrqc;
1885
1886         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1887                 /*
1888                  * SRIOV active scheme
1889                  * FIXME if support RSS together with VMDq & SRIOV
1890                  */
1891                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1892                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1893                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1894                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1895         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1896                 /*
1897                  * SRIOV inactive scheme
1898                  */
1899                 switch (dev->data->dev_conf.rxmode.mq_mode) {
1900                         case ETH_MQ_RX_RSS:
1901                                 igb_rss_configure(dev);
1902                                 break;
1903                         case ETH_MQ_RX_VMDQ_ONLY:
1904                                 /*Configure general VMDQ only RX parameters*/
1905                                 igb_vmdq_rx_hw_configure(dev);
1906                                 break;
1907                         case ETH_MQ_RX_NONE:
1908                                 /* if mq_mode is none, disable rss mode.*/
1909                         default:
1910                                 igb_rss_disable(dev);
1911                                 break;
1912                 }
1913         }
1914
1915         return 0;
1916 }
1917
1918 int
1919 eth_igb_rx_init(struct rte_eth_dev *dev)
1920 {
1921         struct e1000_hw     *hw;
1922         struct igb_rx_queue *rxq;
1923         struct rte_pktmbuf_pool_private *mbp_priv;
1924         uint32_t rctl;
1925         uint32_t rxcsum;
1926         uint32_t srrctl;
1927         uint16_t buf_size;
1928         uint16_t rctl_bsize;
1929         uint16_t i;
1930         int ret;
1931
1932         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1933         srrctl = 0;
1934
1935         /*
1936          * Make sure receives are disabled while setting
1937          * up the descriptor ring.
1938          */
1939         rctl = E1000_READ_REG(hw, E1000_RCTL);
1940         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1941
1942         /*
1943          * Configure support of jumbo frames, if any.
1944          */
1945         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1946                 rctl |= E1000_RCTL_LPE;
1947
1948                 /*
1949                  * Set maximum packet length by default, and might be updated
1950                  * together with enabling/disabling dual VLAN.
1951                  */
1952                 E1000_WRITE_REG(hw, E1000_RLPML,
1953                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1954                                                 VLAN_TAG_SIZE);
1955         } else
1956                 rctl &= ~E1000_RCTL_LPE;
1957
1958         /* Configure and enable each RX queue. */
1959         rctl_bsize = 0;
1960         dev->rx_pkt_burst = eth_igb_recv_pkts;
1961         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1962                 uint64_t bus_addr;
1963                 uint32_t rxdctl;
1964
1965                 rxq = dev->data->rx_queues[i];
1966
1967                 /* Allocate buffers for descriptor rings and set up queue */
1968                 ret = igb_alloc_rx_queue_mbufs(rxq);
1969                 if (ret)
1970                         return ret;
1971
1972                 /*
1973                  * Reset crc_len in case it was changed after queue setup by a
1974                  *  call to configure
1975                  */
1976                 rxq->crc_len =
1977                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1978                                                         0 : ETHER_CRC_LEN);
1979
1980                 bus_addr = rxq->rx_ring_phys_addr;
1981                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1982                                 rxq->nb_rx_desc *
1983                                 sizeof(union e1000_adv_rx_desc));
1984                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1985                                 (uint32_t)(bus_addr >> 32));
1986                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1987
1988                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1989
1990                 /*
1991                  * Configure RX buffer size.
1992                  */
1993                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1994                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1995                                        RTE_PKTMBUF_HEADROOM);
1996                 if (buf_size >= 1024) {
1997                         /*
1998                          * Configure the BSIZEPACKET field of the SRRCTL
1999                          * register of the queue.
2000                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2001                          * If this field is equal to 0b, then RCTL.BSIZE
2002                          * determines the RX packet buffer size.
2003                          */
2004                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2005                                    E1000_SRRCTL_BSIZEPKT_MASK);
2006                         buf_size = (uint16_t) ((srrctl &
2007                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2008                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2009
2010                         /* It adds dual VLAN length for supporting dual VLAN */
2011                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2012                                                 2 * VLAN_TAG_SIZE) > buf_size){
2013                                 if (!dev->data->scattered_rx)
2014                                         PMD_INIT_LOG(DEBUG,
2015                                                      "forcing scatter mode");
2016                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2017                                 dev->data->scattered_rx = 1;
2018                         }
2019                 } else {
2020                         /*
2021                          * Use BSIZE field of the device RCTL register.
2022                          */
2023                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2024                                 rctl_bsize = buf_size;
2025                         if (!dev->data->scattered_rx)
2026                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2027                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2028                         dev->data->scattered_rx = 1;
2029                 }
2030
2031                 /* Set if packets are dropped when no descriptors available */
2032                 if (rxq->drop_en)
2033                         srrctl |= E1000_SRRCTL_DROP_EN;
2034
2035                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2036
2037                 /* Enable this RX queue. */
2038                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2039                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2040                 rxdctl &= 0xFFF00000;
2041                 rxdctl |= (rxq->pthresh & 0x1F);
2042                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2043                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2044                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2045         }
2046
2047         if (dev->data->dev_conf.rxmode.enable_scatter) {
2048                 if (!dev->data->scattered_rx)
2049                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2050                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2051                 dev->data->scattered_rx = 1;
2052         }
2053
2054         /*
2055          * Setup BSIZE field of RCTL register, if needed.
2056          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2057          * register, since the code above configures the SRRCTL register of
2058          * the RX queue in such a case.
2059          * All configurable sizes are:
2060          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2061          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2062          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2063          *  2048: rctl |= E1000_RCTL_SZ_2048;
2064          *  1024: rctl |= E1000_RCTL_SZ_1024;
2065          *   512: rctl |= E1000_RCTL_SZ_512;
2066          *   256: rctl |= E1000_RCTL_SZ_256;
2067          */
2068         if (rctl_bsize > 0) {
2069                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2070                         rctl |= E1000_RCTL_SZ_512;
2071                 else /* 256 <= buf_size < 512 - use 256 */
2072                         rctl |= E1000_RCTL_SZ_256;
2073         }
2074
2075         /*
2076          * Configure RSS if device configured with multiple RX queues.
2077          */
2078         igb_dev_mq_rx_configure(dev);
2079
2080         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2081         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2082
2083         /*
2084          * Setup the Checksum Register.
2085          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2086          */
2087         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2088         rxcsum |= E1000_RXCSUM_PCSD;
2089
2090         /* Enable both L3/L4 rx checksum offload */
2091         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2092                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2093         else
2094                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2095         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2096
2097         /* Setup the Receive Control Register. */
2098         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2099                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2100
2101                 /* set STRCRC bit in all queues */
2102                 if (hw->mac.type == e1000_i350 ||
2103                     hw->mac.type == e1000_i210 ||
2104                     hw->mac.type == e1000_i211 ||
2105                     hw->mac.type == e1000_i354) {
2106                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2107                                 rxq = dev->data->rx_queues[i];
2108                                 uint32_t dvmolr = E1000_READ_REG(hw,
2109                                         E1000_DVMOLR(rxq->reg_idx));
2110                                 dvmolr |= E1000_DVMOLR_STRCRC;
2111                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2112                         }
2113                 }
2114         } else {
2115                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2116
2117                 /* clear STRCRC bit in all queues */
2118                 if (hw->mac.type == e1000_i350 ||
2119                     hw->mac.type == e1000_i210 ||
2120                     hw->mac.type == e1000_i211 ||
2121                     hw->mac.type == e1000_i354) {
2122                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2123                                 rxq = dev->data->rx_queues[i];
2124                                 uint32_t dvmolr = E1000_READ_REG(hw,
2125                                         E1000_DVMOLR(rxq->reg_idx));
2126                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2127                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2128                         }
2129                 }
2130         }
2131
2132         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2133         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2134                 E1000_RCTL_RDMTS_HALF |
2135                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2136
2137         /* Make sure VLAN Filters are off. */
2138         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2139                 rctl &= ~E1000_RCTL_VFE;
2140         /* Don't store bad packets. */
2141         rctl &= ~E1000_RCTL_SBP;
2142
2143         /* Enable Receives. */
2144         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2145
2146         /*
2147          * Setup the HW Rx Head and Tail Descriptor Pointers.
2148          * This needs to be done after enable.
2149          */
2150         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2151                 rxq = dev->data->rx_queues[i];
2152                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2153                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2154         }
2155
2156         return 0;
2157 }
2158
2159 /*********************************************************************
2160  *
2161  *  Enable transmit unit.
2162  *
2163  **********************************************************************/
2164 void
2165 eth_igb_tx_init(struct rte_eth_dev *dev)
2166 {
2167         struct e1000_hw     *hw;
2168         struct igb_tx_queue *txq;
2169         uint32_t tctl;
2170         uint32_t txdctl;
2171         uint16_t i;
2172
2173         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2174
2175         /* Setup the Base and Length of the Tx Descriptor Rings. */
2176         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2177                 uint64_t bus_addr;
2178                 txq = dev->data->tx_queues[i];
2179                 bus_addr = txq->tx_ring_phys_addr;
2180
2181                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2182                                 txq->nb_tx_desc *
2183                                 sizeof(union e1000_adv_tx_desc));
2184                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2185                                 (uint32_t)(bus_addr >> 32));
2186                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2187
2188                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2189                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2190                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2191
2192                 /* Setup Transmit threshold registers. */
2193                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2194                 txdctl |= txq->pthresh & 0x1F;
2195                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2196                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2197                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2198                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2199         }
2200
2201         /* Program the Transmit Control Register. */
2202         tctl = E1000_READ_REG(hw, E1000_TCTL);
2203         tctl &= ~E1000_TCTL_CT;
2204         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2205                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2206
2207         e1000_config_collision_dist(hw);
2208
2209         /* This write will effectively turn on the transmit unit. */
2210         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2211 }
2212
2213 /*********************************************************************
2214  *
2215  *  Enable VF receive unit.
2216  *
2217  **********************************************************************/
2218 int
2219 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2220 {
2221         struct e1000_hw     *hw;
2222         struct igb_rx_queue *rxq;
2223         struct rte_pktmbuf_pool_private *mbp_priv;
2224         uint32_t srrctl;
2225         uint16_t buf_size;
2226         uint16_t rctl_bsize;
2227         uint16_t i;
2228         int ret;
2229
2230         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2231
2232         /* setup MTU */
2233         e1000_rlpml_set_vf(hw,
2234                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2235                 VLAN_TAG_SIZE));
2236
2237         /* Configure and enable each RX queue. */
2238         rctl_bsize = 0;
2239         dev->rx_pkt_burst = eth_igb_recv_pkts;
2240         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2241                 uint64_t bus_addr;
2242                 uint32_t rxdctl;
2243
2244                 rxq = dev->data->rx_queues[i];
2245
2246                 /* Allocate buffers for descriptor rings and set up queue */
2247                 ret = igb_alloc_rx_queue_mbufs(rxq);
2248                 if (ret)
2249                         return ret;
2250
2251                 bus_addr = rxq->rx_ring_phys_addr;
2252                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2253                                 rxq->nb_rx_desc *
2254                                 sizeof(union e1000_adv_rx_desc));
2255                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2256                                 (uint32_t)(bus_addr >> 32));
2257                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2258
2259                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2260
2261                 /*
2262                  * Configure RX buffer size.
2263                  */
2264                 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2265                 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2266                                        RTE_PKTMBUF_HEADROOM);
2267                 if (buf_size >= 1024) {
2268                         /*
2269                          * Configure the BSIZEPACKET field of the SRRCTL
2270                          * register of the queue.
2271                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2272                          * If this field is equal to 0b, then RCTL.BSIZE
2273                          * determines the RX packet buffer size.
2274                          */
2275                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2276                                    E1000_SRRCTL_BSIZEPKT_MASK);
2277                         buf_size = (uint16_t) ((srrctl &
2278                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2279                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2280
2281                         /* It adds dual VLAN length for supporting dual VLAN */
2282                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2283                                                 2 * VLAN_TAG_SIZE) > buf_size){
2284                                 if (!dev->data->scattered_rx)
2285                                         PMD_INIT_LOG(DEBUG,
2286                                                      "forcing scatter mode");
2287                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2288                                 dev->data->scattered_rx = 1;
2289                         }
2290                 } else {
2291                         /*
2292                          * Use BSIZE field of the device RCTL register.
2293                          */
2294                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2295                                 rctl_bsize = buf_size;
2296                         if (!dev->data->scattered_rx)
2297                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2298                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2299                         dev->data->scattered_rx = 1;
2300                 }
2301
2302                 /* Set if packets are dropped when no descriptors available */
2303                 if (rxq->drop_en)
2304                         srrctl |= E1000_SRRCTL_DROP_EN;
2305
2306                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2307
2308                 /* Enable this RX queue. */
2309                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2310                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2311                 rxdctl &= 0xFFF00000;
2312                 rxdctl |= (rxq->pthresh & 0x1F);
2313                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2314                 if (hw->mac.type == e1000_vfadapt) {
2315                         /*
2316                          * Workaround of 82576 VF Erratum
2317                          * force set WTHRESH to 1
2318                          * to avoid Write-Back not triggered sometimes
2319                          */
2320                         rxdctl |= 0x10000;
2321                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2322                 }
2323                 else
2324                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2325                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2326         }
2327
2328         if (dev->data->dev_conf.rxmode.enable_scatter) {
2329                 if (!dev->data->scattered_rx)
2330                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2331                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2332                 dev->data->scattered_rx = 1;
2333         }
2334
2335         /*
2336          * Setup the HW Rx Head and Tail Descriptor Pointers.
2337          * This needs to be done after enable.
2338          */
2339         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2340                 rxq = dev->data->rx_queues[i];
2341                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2342                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2343         }
2344
2345         return 0;
2346 }
2347
2348 /*********************************************************************
2349  *
2350  *  Enable VF transmit unit.
2351  *
2352  **********************************************************************/
2353 void
2354 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2355 {
2356         struct e1000_hw     *hw;
2357         struct igb_tx_queue *txq;
2358         uint32_t txdctl;
2359         uint16_t i;
2360
2361         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2362
2363         /* Setup the Base and Length of the Tx Descriptor Rings. */
2364         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2365                 uint64_t bus_addr;
2366
2367                 txq = dev->data->tx_queues[i];
2368                 bus_addr = txq->tx_ring_phys_addr;
2369                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2370                                 txq->nb_tx_desc *
2371                                 sizeof(union e1000_adv_tx_desc));
2372                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2373                                 (uint32_t)(bus_addr >> 32));
2374                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2375
2376                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2377                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2378                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2379
2380                 /* Setup Transmit threshold registers. */
2381                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2382                 txdctl |= txq->pthresh & 0x1F;
2383                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2384                 if (hw->mac.type == e1000_82576) {
2385                         /*
2386                          * Workaround of 82576 VF Erratum
2387                          * force set WTHRESH to 1
2388                          * to avoid Write-Back not triggered sometimes
2389                          */
2390                         txdctl |= 0x10000;
2391                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2392                 }
2393                 else
2394                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2395                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2396                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2397         }
2398
2399 }
2400