net/e1000: fix memzone leak on queue re-configure
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_net.h>
40 #include <rte_string_fns.h>
41
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
45
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
48 #else
49 #define IGB_TX_IEEE1588_TMST 0
50 #endif
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK (                    \
53                 PKT_TX_OUTER_IPV6 |      \
54                 PKT_TX_OUTER_IPV4 |      \
55                 PKT_TX_IPV6 |            \
56                 PKT_TX_IPV4 |            \
57                 PKT_TX_VLAN_PKT |                \
58                 PKT_TX_IP_CKSUM |                \
59                 PKT_TX_L4_MASK |                 \
60                 PKT_TX_TCP_SEG |                 \
61                 IGB_TX_IEEE1588_TMST)
62
63 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
64                 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
65
66 /**
67  * Structure associated with each descriptor of the RX ring of a RX queue.
68  */
69 struct igb_rx_entry {
70         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
71 };
72
73 /**
74  * Structure associated with each descriptor of the TX ring of a TX queue.
75  */
76 struct igb_tx_entry {
77         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
78         uint16_t next_id; /**< Index of next descriptor in ring. */
79         uint16_t last_id; /**< Index of last scattered descriptor. */
80 };
81
82 /**
83  * rx queue flags
84  */
85 enum igb_rxq_flags {
86         IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
87 };
88
89 /**
90  * Structure associated with each RX queue.
91  */
92 struct igb_rx_queue {
93         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
94         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
95         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
96         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
97         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
98         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
99         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
100         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
101         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
102         uint16_t            rx_tail;    /**< current value of RDT register. */
103         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
104         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
105         uint16_t            queue_id;   /**< RX queue index. */
106         uint16_t            reg_idx;    /**< RX queue register index. */
107         uint16_t            port_id;    /**< Device port identifier. */
108         uint8_t             pthresh;    /**< Prefetch threshold register. */
109         uint8_t             hthresh;    /**< Host threshold register. */
110         uint8_t             wthresh;    /**< Write-back threshold register. */
111         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
112         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
113         uint32_t            flags;      /**< RX flags. */
114         uint64_t            offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
115         const struct rte_memzone *mz;
116 };
117
118 /**
119  * Hardware context number
120  */
121 enum igb_advctx_num {
122         IGB_CTX_0    = 0, /**< CTX0    */
123         IGB_CTX_1    = 1, /**< CTX1    */
124         IGB_CTX_NUM  = 2, /**< CTX_NUM */
125 };
126
127 /** Offload features */
128 union igb_tx_offload {
129         uint64_t data;
130         struct {
131                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
132                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
133                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
134                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
135                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
136
137                 /* uint64_t unused:8; */
138         };
139 };
140
141 /*
142  * Compare mask for igb_tx_offload.data,
143  * should be in sync with igb_tx_offload layout.
144  * */
145 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
146 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
147 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
148 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
149 /** Mac + IP + TCP + Mss mask. */
150 #define TX_TSO_CMP_MASK \
151         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
152
153 /**
154  * Strucutre to check if new context need be built
155  */
156 struct igb_advctx_info {
157         uint64_t flags;           /**< ol_flags related to context build. */
158         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
159         union igb_tx_offload tx_offload;
160         /** compare mask for tx offload. */
161         union igb_tx_offload tx_offload_mask;
162 };
163
164 /**
165  * Structure associated with each TX queue.
166  */
167 struct igb_tx_queue {
168         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
169         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
170         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
171         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
172         uint32_t               txd_type;      /**< Device-specific TXD type */
173         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
174         uint16_t               tx_tail; /**< Current value of TDT register. */
175         uint16_t               tx_head;
176         /**< Index of first used TX descriptor. */
177         uint16_t               queue_id; /**< TX queue index. */
178         uint16_t               reg_idx;  /**< TX queue register index. */
179         uint16_t               port_id;  /**< Device port identifier. */
180         uint8_t                pthresh;  /**< Prefetch threshold register. */
181         uint8_t                hthresh;  /**< Host threshold register. */
182         uint8_t                wthresh;  /**< Write-back threshold register. */
183         uint32_t               ctx_curr;
184         /**< Current used hardware descriptor. */
185         uint32_t               ctx_start;
186         /**< Start context position for transmit queue. */
187         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
188         /**< Hardware context history.*/
189         uint64_t               offloads; /**< offloads of DEV_TX_OFFLOAD_* */
190         const struct rte_memzone *mz;
191 };
192
193 #if 1
194 #define RTE_PMD_USE_PREFETCH
195 #endif
196
197 #ifdef RTE_PMD_USE_PREFETCH
198 #define rte_igb_prefetch(p)     rte_prefetch0(p)
199 #else
200 #define rte_igb_prefetch(p)     do {} while(0)
201 #endif
202
203 #ifdef RTE_PMD_PACKET_PREFETCH
204 #define rte_packet_prefetch(p) rte_prefetch1(p)
205 #else
206 #define rte_packet_prefetch(p)  do {} while(0)
207 #endif
208
209 /*
210  * Macro for VMDq feature for 1 GbE NIC.
211  */
212 #define E1000_VMOLR_SIZE                        (8)
213 #define IGB_TSO_MAX_HDRLEN                      (512)
214 #define IGB_TSO_MAX_MSS                         (9216)
215
216 /*********************************************************************
217  *
218  *  TX function
219  *
220  **********************************************************************/
221
222 /*
223  *There're some limitations in hardware for TCP segmentation offload. We
224  *should check whether the parameters are valid.
225  */
226 static inline uint64_t
227 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
228 {
229         if (!(ol_req & PKT_TX_TCP_SEG))
230                 return ol_req;
231         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
232                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
233                 ol_req &= ~PKT_TX_TCP_SEG;
234                 ol_req |= PKT_TX_TCP_CKSUM;
235         }
236         return ol_req;
237 }
238
239 /*
240  * Advanced context descriptor are almost same between igb/ixgbe
241  * This is a separate function, looking for optimization opportunity here
242  * Rework required to go with the pre-defined values.
243  */
244
245 static inline void
246 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
247                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
248                 uint64_t ol_flags, union igb_tx_offload tx_offload)
249 {
250         uint32_t type_tucmd_mlhl;
251         uint32_t mss_l4len_idx;
252         uint32_t ctx_idx, ctx_curr;
253         uint32_t vlan_macip_lens;
254         union igb_tx_offload tx_offload_mask;
255
256         ctx_curr = txq->ctx_curr;
257         ctx_idx = ctx_curr + txq->ctx_start;
258
259         tx_offload_mask.data = 0;
260         type_tucmd_mlhl = 0;
261
262         /* Specify which HW CTX to upload. */
263         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264
265         if (ol_flags & PKT_TX_VLAN_PKT)
266                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
267
268         /* check if TCP segmentation required for this packet */
269         if (ol_flags & PKT_TX_TCP_SEG) {
270                 /* implies IP cksum in IPv4 */
271                 if (ol_flags & PKT_TX_IP_CKSUM)
272                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
273                                 E1000_ADVTXD_TUCMD_L4T_TCP |
274                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
275                 else
276                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
277                                 E1000_ADVTXD_TUCMD_L4T_TCP |
278                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
279
280                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
281                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
282                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
283         } else { /* no TSO, check if hardware checksum is needed */
284                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
285                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
286
287                 if (ol_flags & PKT_TX_IP_CKSUM)
288                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
289
290                 switch (ol_flags & PKT_TX_L4_MASK) {
291                 case PKT_TX_UDP_CKSUM:
292                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
293                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
294                         mss_l4len_idx |= sizeof(struct rte_udp_hdr)
295                                 << E1000_ADVTXD_L4LEN_SHIFT;
296                         break;
297                 case PKT_TX_TCP_CKSUM:
298                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
299                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
300                         mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
301                                 << E1000_ADVTXD_L4LEN_SHIFT;
302                         break;
303                 case PKT_TX_SCTP_CKSUM:
304                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
305                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
306                         mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
307                                 << E1000_ADVTXD_L4LEN_SHIFT;
308                         break;
309                 default:
310                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
311                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
312                         break;
313                 }
314         }
315
316         txq->ctx_cache[ctx_curr].flags = ol_flags;
317         txq->ctx_cache[ctx_curr].tx_offload.data =
318                 tx_offload_mask.data & tx_offload.data;
319         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
320
321         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
322         vlan_macip_lens = (uint32_t)tx_offload.data;
323         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
324         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
325         ctx_txd->u.seqnum_seed = 0;
326 }
327
328 /*
329  * Check which hardware context can be used. Use the existing match
330  * or create a new context descriptor.
331  */
332 static inline uint32_t
333 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
334                 union igb_tx_offload tx_offload)
335 {
336         /* If match with the current context */
337         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
338                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
339                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
340                         return txq->ctx_curr;
341         }
342
343         /* If match with the second context */
344         txq->ctx_curr ^= 1;
345         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
346                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
347                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
348                         return txq->ctx_curr;
349         }
350
351         /* Mismatch, use the previous context */
352         return IGB_CTX_NUM;
353 }
354
355 static inline uint32_t
356 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
357 {
358         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
359         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
360         uint32_t tmp;
361
362         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
363         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
364         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
365         return tmp;
366 }
367
368 static inline uint32_t
369 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
370 {
371         uint32_t cmdtype;
372         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
373         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
374         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
375         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
376         return cmdtype;
377 }
378
379 uint16_t
380 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
381                uint16_t nb_pkts)
382 {
383         struct igb_tx_queue *txq;
384         struct igb_tx_entry *sw_ring;
385         struct igb_tx_entry *txe, *txn;
386         volatile union e1000_adv_tx_desc *txr;
387         volatile union e1000_adv_tx_desc *txd;
388         struct rte_mbuf     *tx_pkt;
389         struct rte_mbuf     *m_seg;
390         uint64_t buf_dma_addr;
391         uint32_t olinfo_status;
392         uint32_t cmd_type_len;
393         uint32_t pkt_len;
394         uint16_t slen;
395         uint64_t ol_flags;
396         uint16_t tx_end;
397         uint16_t tx_id;
398         uint16_t tx_last;
399         uint16_t nb_tx;
400         uint64_t tx_ol_req;
401         uint32_t new_ctx = 0;
402         uint32_t ctx = 0;
403         union igb_tx_offload tx_offload = {0};
404
405         txq = tx_queue;
406         sw_ring = txq->sw_ring;
407         txr     = txq->tx_ring;
408         tx_id   = txq->tx_tail;
409         txe = &sw_ring[tx_id];
410
411         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
412                 tx_pkt = *tx_pkts++;
413                 pkt_len = tx_pkt->pkt_len;
414
415                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
416
417                 /*
418                  * The number of descriptors that must be allocated for a
419                  * packet is the number of segments of that packet, plus 1
420                  * Context Descriptor for the VLAN Tag Identifier, if any.
421                  * Determine the last TX descriptor to allocate in the TX ring
422                  * for the packet, starting from the current position (tx_id)
423                  * in the ring.
424                  */
425                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
426
427                 ol_flags = tx_pkt->ol_flags;
428                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
429
430                 /* If a Context Descriptor need be built . */
431                 if (tx_ol_req) {
432                         tx_offload.l2_len = tx_pkt->l2_len;
433                         tx_offload.l3_len = tx_pkt->l3_len;
434                         tx_offload.l4_len = tx_pkt->l4_len;
435                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
436                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
437                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
438
439                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
440                         /* Only allocate context descriptor if required*/
441                         new_ctx = (ctx == IGB_CTX_NUM);
442                         ctx = txq->ctx_curr + txq->ctx_start;
443                         tx_last = (uint16_t) (tx_last + new_ctx);
444                 }
445                 if (tx_last >= txq->nb_tx_desc)
446                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
447
448                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
449                            " tx_first=%u tx_last=%u",
450                            (unsigned) txq->port_id,
451                            (unsigned) txq->queue_id,
452                            (unsigned) pkt_len,
453                            (unsigned) tx_id,
454                            (unsigned) tx_last);
455
456                 /*
457                  * Check if there are enough free descriptors in the TX ring
458                  * to transmit the next packet.
459                  * This operation is based on the two following rules:
460                  *
461                  *   1- Only check that the last needed TX descriptor can be
462                  *      allocated (by construction, if that descriptor is free,
463                  *      all intermediate ones are also free).
464                  *
465                  *      For this purpose, the index of the last TX descriptor
466                  *      used for a packet (the "last descriptor" of a packet)
467                  *      is recorded in the TX entries (the last one included)
468                  *      that are associated with all TX descriptors allocated
469                  *      for that packet.
470                  *
471                  *   2- Avoid to allocate the last free TX descriptor of the
472                  *      ring, in order to never set the TDT register with the
473                  *      same value stored in parallel by the NIC in the TDH
474                  *      register, which makes the TX engine of the NIC enter
475                  *      in a deadlock situation.
476                  *
477                  *      By extension, avoid to allocate a free descriptor that
478                  *      belongs to the last set of free descriptors allocated
479                  *      to the same packet previously transmitted.
480                  */
481
482                 /*
483                  * The "last descriptor" of the previously sent packet, if any,
484                  * which used the last descriptor to allocate.
485                  */
486                 tx_end = sw_ring[tx_last].last_id;
487
488                 /*
489                  * The next descriptor following that "last descriptor" in the
490                  * ring.
491                  */
492                 tx_end = sw_ring[tx_end].next_id;
493
494                 /*
495                  * The "last descriptor" associated with that next descriptor.
496                  */
497                 tx_end = sw_ring[tx_end].last_id;
498
499                 /*
500                  * Check that this descriptor is free.
501                  */
502                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
503                         if (nb_tx == 0)
504                                 return 0;
505                         goto end_of_tx;
506                 }
507
508                 /*
509                  * Set common flags of all TX Data Descriptors.
510                  *
511                  * The following bits must be set in all Data Descriptors:
512                  *   - E1000_ADVTXD_DTYP_DATA
513                  *   - E1000_ADVTXD_DCMD_DEXT
514                  *
515                  * The following bits must be set in the first Data Descriptor
516                  * and are ignored in the other ones:
517                  *   - E1000_ADVTXD_DCMD_IFCS
518                  *   - E1000_ADVTXD_MAC_1588
519                  *   - E1000_ADVTXD_DCMD_VLE
520                  *
521                  * The following bits must only be set in the last Data
522                  * Descriptor:
523                  *   - E1000_TXD_CMD_EOP
524                  *
525                  * The following bits can be set in any Data Descriptor, but
526                  * are only set in the last Data Descriptor:
527                  *   - E1000_TXD_CMD_RS
528                  */
529                 cmd_type_len = txq->txd_type |
530                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
531                 if (tx_ol_req & PKT_TX_TCP_SEG)
532                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
533                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
534 #if defined(RTE_LIBRTE_IEEE1588)
535                 if (ol_flags & PKT_TX_IEEE1588_TMST)
536                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
537 #endif
538                 if (tx_ol_req) {
539                         /* Setup TX Advanced context descriptor if required */
540                         if (new_ctx) {
541                                 volatile struct e1000_adv_tx_context_desc *
542                                     ctx_txd;
543
544                                 ctx_txd = (volatile struct
545                                     e1000_adv_tx_context_desc *)
546                                     &txr[tx_id];
547
548                                 txn = &sw_ring[txe->next_id];
549                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
550
551                                 if (txe->mbuf != NULL) {
552                                         rte_pktmbuf_free_seg(txe->mbuf);
553                                         txe->mbuf = NULL;
554                                 }
555
556                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
557
558                                 txe->last_id = tx_last;
559                                 tx_id = txe->next_id;
560                                 txe = txn;
561                         }
562
563                         /* Setup the TX Advanced Data Descriptor */
564                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
565                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
566                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
567                 }
568
569                 m_seg = tx_pkt;
570                 do {
571                         txn = &sw_ring[txe->next_id];
572                         txd = &txr[tx_id];
573
574                         if (txe->mbuf != NULL)
575                                 rte_pktmbuf_free_seg(txe->mbuf);
576                         txe->mbuf = m_seg;
577
578                         /*
579                          * Set up transmit descriptor.
580                          */
581                         slen = (uint16_t) m_seg->data_len;
582                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
583                         txd->read.buffer_addr =
584                                 rte_cpu_to_le_64(buf_dma_addr);
585                         txd->read.cmd_type_len =
586                                 rte_cpu_to_le_32(cmd_type_len | slen);
587                         txd->read.olinfo_status =
588                                 rte_cpu_to_le_32(olinfo_status);
589                         txe->last_id = tx_last;
590                         tx_id = txe->next_id;
591                         txe = txn;
592                         m_seg = m_seg->next;
593                 } while (m_seg != NULL);
594
595                 /*
596                  * The last packet data descriptor needs End Of Packet (EOP)
597                  * and Report Status (RS).
598                  */
599                 txd->read.cmd_type_len |=
600                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
601         }
602  end_of_tx:
603         rte_wmb();
604
605         /*
606          * Set the Transmit Descriptor Tail (TDT).
607          */
608         E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
609         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
610                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
611                    (unsigned) tx_id, (unsigned) nb_tx);
612         txq->tx_tail = tx_id;
613
614         return nb_tx;
615 }
616
617 /*********************************************************************
618  *
619  *  TX prep functions
620  *
621  **********************************************************************/
622 uint16_t
623 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
624                 uint16_t nb_pkts)
625 {
626         int i, ret;
627         struct rte_mbuf *m;
628
629         for (i = 0; i < nb_pkts; i++) {
630                 m = tx_pkts[i];
631
632                 /* Check some limitations for TSO in hardware */
633                 if (m->ol_flags & PKT_TX_TCP_SEG)
634                         if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
635                                         (m->l2_len + m->l3_len + m->l4_len >
636                                         IGB_TSO_MAX_HDRLEN)) {
637                                 rte_errno = EINVAL;
638                                 return i;
639                         }
640
641                 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
642                         rte_errno = ENOTSUP;
643                         return i;
644                 }
645
646 #ifdef RTE_ETHDEV_DEBUG_TX
647                 ret = rte_validate_tx_offload(m);
648                 if (ret != 0) {
649                         rte_errno = -ret;
650                         return i;
651                 }
652 #endif
653                 ret = rte_net_intel_cksum_prepare(m);
654                 if (ret != 0) {
655                         rte_errno = -ret;
656                         return i;
657                 }
658         }
659
660         return i;
661 }
662
663 /*********************************************************************
664  *
665  *  RX functions
666  *
667  **********************************************************************/
668 #define IGB_PACKET_TYPE_IPV4              0X01
669 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
670 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
671 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
672 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
673 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
674 #define IGB_PACKET_TYPE_IPV6              0X04
675 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
676 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
677 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
678 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
679 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
680 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
681 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
682 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
683 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
684 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
685 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
686 #define IGB_PACKET_TYPE_MAX               0X80
687 #define IGB_PACKET_TYPE_MASK              0X7F
688 #define IGB_PACKET_TYPE_SHIFT             0X04
689 static inline uint32_t
690 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
691 {
692         static const uint32_t
693                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
694                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
695                         RTE_PTYPE_L3_IPV4,
696                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
697                         RTE_PTYPE_L3_IPV4_EXT,
698                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV6,
700                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702                         RTE_PTYPE_INNER_L3_IPV6,
703                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV6_EXT,
705                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
706                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
707                         RTE_PTYPE_INNER_L3_IPV6_EXT,
708                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
709                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
710                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
711                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
712                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
713                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
714                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
715                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
716                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
717                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
718                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
719                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
720                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
721                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
722                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
723                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
724                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
725                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
726                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
727                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
728                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
729                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
730                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
731                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
732                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
733                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
734                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
735                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
736         };
737         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
738                 return RTE_PTYPE_UNKNOWN;
739
740         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
741
742         return ptype_table[pkt_info];
743 }
744
745 static inline uint64_t
746 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
747 {
748         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
749
750 #if defined(RTE_LIBRTE_IEEE1588)
751         static uint32_t ip_pkt_etqf_map[8] = {
752                 0, 0, 0, PKT_RX_IEEE1588_PTP,
753                 0, 0, 0, 0,
754         };
755
756         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
757         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
758
759         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
760         if (hw->mac.type == e1000_i210)
761                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
762         else
763                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
764 #else
765         RTE_SET_USED(rxq);
766 #endif
767
768         return pkt_flags;
769 }
770
771 static inline uint64_t
772 rx_desc_status_to_pkt_flags(uint32_t rx_status)
773 {
774         uint64_t pkt_flags;
775
776         /* Check if VLAN present */
777         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
778                 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
779
780 #if defined(RTE_LIBRTE_IEEE1588)
781         if (rx_status & E1000_RXD_STAT_TMST)
782                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
783 #endif
784         return pkt_flags;
785 }
786
787 static inline uint64_t
788 rx_desc_error_to_pkt_flags(uint32_t rx_status)
789 {
790         /*
791          * Bit 30: IPE, IPv4 checksum error
792          * Bit 29: L4I, L4I integrity error
793          */
794
795         static uint64_t error_to_pkt_flags_map[4] = {
796                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
797                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
798                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
799                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
800         };
801         return error_to_pkt_flags_map[(rx_status >>
802                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
803 }
804
805 uint16_t
806 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
807                uint16_t nb_pkts)
808 {
809         struct igb_rx_queue *rxq;
810         volatile union e1000_adv_rx_desc *rx_ring;
811         volatile union e1000_adv_rx_desc *rxdp;
812         struct igb_rx_entry *sw_ring;
813         struct igb_rx_entry *rxe;
814         struct rte_mbuf *rxm;
815         struct rte_mbuf *nmb;
816         union e1000_adv_rx_desc rxd;
817         uint64_t dma_addr;
818         uint32_t staterr;
819         uint32_t hlen_type_rss;
820         uint16_t pkt_len;
821         uint16_t rx_id;
822         uint16_t nb_rx;
823         uint16_t nb_hold;
824         uint64_t pkt_flags;
825
826         nb_rx = 0;
827         nb_hold = 0;
828         rxq = rx_queue;
829         rx_id = rxq->rx_tail;
830         rx_ring = rxq->rx_ring;
831         sw_ring = rxq->sw_ring;
832         while (nb_rx < nb_pkts) {
833                 /*
834                  * The order of operations here is important as the DD status
835                  * bit must not be read after any other descriptor fields.
836                  * rx_ring and rxdp are pointing to volatile data so the order
837                  * of accesses cannot be reordered by the compiler. If they were
838                  * not volatile, they could be reordered which could lead to
839                  * using invalid descriptor fields when read from rxd.
840                  */
841                 rxdp = &rx_ring[rx_id];
842                 staterr = rxdp->wb.upper.status_error;
843                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
844                         break;
845                 rxd = *rxdp;
846
847                 /*
848                  * End of packet.
849                  *
850                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
851                  * likely to be invalid and to be dropped by the various
852                  * validation checks performed by the network stack.
853                  *
854                  * Allocate a new mbuf to replenish the RX ring descriptor.
855                  * If the allocation fails:
856                  *    - arrange for that RX descriptor to be the first one
857                  *      being parsed the next time the receive function is
858                  *      invoked [on the same queue].
859                  *
860                  *    - Stop parsing the RX ring and return immediately.
861                  *
862                  * This policy do not drop the packet received in the RX
863                  * descriptor for which the allocation of a new mbuf failed.
864                  * Thus, it allows that packet to be later retrieved if
865                  * mbuf have been freed in the mean time.
866                  * As a side effect, holding RX descriptors instead of
867                  * systematically giving them back to the NIC may lead to
868                  * RX ring exhaustion situations.
869                  * However, the NIC can gracefully prevent such situations
870                  * to happen by sending specific "back-pressure" flow control
871                  * frames to its peer(s).
872                  */
873                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
874                            "staterr=0x%x pkt_len=%u",
875                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876                            (unsigned) rx_id, (unsigned) staterr,
877                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
878
879                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
880                 if (nmb == NULL) {
881                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882                                    "queue_id=%u", (unsigned) rxq->port_id,
883                                    (unsigned) rxq->queue_id);
884                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
885                         break;
886                 }
887
888                 nb_hold++;
889                 rxe = &sw_ring[rx_id];
890                 rx_id++;
891                 if (rx_id == rxq->nb_rx_desc)
892                         rx_id = 0;
893
894                 /* Prefetch next mbuf while processing current one. */
895                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
896
897                 /*
898                  * When next RX descriptor is on a cache-line boundary,
899                  * prefetch the next 4 RX descriptors and the next 8 pointers
900                  * to mbufs.
901                  */
902                 if ((rx_id & 0x3) == 0) {
903                         rte_igb_prefetch(&rx_ring[rx_id]);
904                         rte_igb_prefetch(&sw_ring[rx_id]);
905                 }
906
907                 rxm = rxe->mbuf;
908                 rxe->mbuf = nmb;
909                 dma_addr =
910                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
911                 rxdp->read.hdr_addr = 0;
912                 rxdp->read.pkt_addr = dma_addr;
913
914                 /*
915                  * Initialize the returned mbuf.
916                  * 1) setup generic mbuf fields:
917                  *    - number of segments,
918                  *    - next segment,
919                  *    - packet length,
920                  *    - RX port identifier.
921                  * 2) integrate hardware offload data, if any:
922                  *    - RSS flag & hash,
923                  *    - IP checksum flag,
924                  *    - VLAN TCI, if any,
925                  *    - error flags.
926                  */
927                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
928                                       rxq->crc_len);
929                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
930                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
931                 rxm->nb_segs = 1;
932                 rxm->next = NULL;
933                 rxm->pkt_len = pkt_len;
934                 rxm->data_len = pkt_len;
935                 rxm->port = rxq->port_id;
936
937                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
938                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
939
940                 /*
941                  * The vlan_tci field is only valid when PKT_RX_VLAN is
942                  * set in the pkt_flags field and must be in CPU byte order.
943                  */
944                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
945                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
946                         rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
947                 } else {
948                         rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
949                 }
950                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
951                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
952                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
953                 rxm->ol_flags = pkt_flags;
954                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
955                                                 lo_dword.hs_rss.pkt_info);
956
957                 /*
958                  * Store the mbuf address into the next entry of the array
959                  * of returned packets.
960                  */
961                 rx_pkts[nb_rx++] = rxm;
962         }
963         rxq->rx_tail = rx_id;
964
965         /*
966          * If the number of free RX descriptors is greater than the RX free
967          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
968          * register.
969          * Update the RDT with the value of the last processed RX descriptor
970          * minus 1, to guarantee that the RDT register is never equal to the
971          * RDH register, which creates a "full" ring situtation from the
972          * hardware point of view...
973          */
974         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
975         if (nb_hold > rxq->rx_free_thresh) {
976                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
977                            "nb_hold=%u nb_rx=%u",
978                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
979                            (unsigned) rx_id, (unsigned) nb_hold,
980                            (unsigned) nb_rx);
981                 rx_id = (uint16_t) ((rx_id == 0) ?
982                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
983                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
984                 nb_hold = 0;
985         }
986         rxq->nb_rx_hold = nb_hold;
987         return nb_rx;
988 }
989
990 uint16_t
991 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
992                          uint16_t nb_pkts)
993 {
994         struct igb_rx_queue *rxq;
995         volatile union e1000_adv_rx_desc *rx_ring;
996         volatile union e1000_adv_rx_desc *rxdp;
997         struct igb_rx_entry *sw_ring;
998         struct igb_rx_entry *rxe;
999         struct rte_mbuf *first_seg;
1000         struct rte_mbuf *last_seg;
1001         struct rte_mbuf *rxm;
1002         struct rte_mbuf *nmb;
1003         union e1000_adv_rx_desc rxd;
1004         uint64_t dma; /* Physical address of mbuf data buffer */
1005         uint32_t staterr;
1006         uint32_t hlen_type_rss;
1007         uint16_t rx_id;
1008         uint16_t nb_rx;
1009         uint16_t nb_hold;
1010         uint16_t data_len;
1011         uint64_t pkt_flags;
1012
1013         nb_rx = 0;
1014         nb_hold = 0;
1015         rxq = rx_queue;
1016         rx_id = rxq->rx_tail;
1017         rx_ring = rxq->rx_ring;
1018         sw_ring = rxq->sw_ring;
1019
1020         /*
1021          * Retrieve RX context of current packet, if any.
1022          */
1023         first_seg = rxq->pkt_first_seg;
1024         last_seg = rxq->pkt_last_seg;
1025
1026         while (nb_rx < nb_pkts) {
1027         next_desc:
1028                 /*
1029                  * The order of operations here is important as the DD status
1030                  * bit must not be read after any other descriptor fields.
1031                  * rx_ring and rxdp are pointing to volatile data so the order
1032                  * of accesses cannot be reordered by the compiler. If they were
1033                  * not volatile, they could be reordered which could lead to
1034                  * using invalid descriptor fields when read from rxd.
1035                  */
1036                 rxdp = &rx_ring[rx_id];
1037                 staterr = rxdp->wb.upper.status_error;
1038                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1039                         break;
1040                 rxd = *rxdp;
1041
1042                 /*
1043                  * Descriptor done.
1044                  *
1045                  * Allocate a new mbuf to replenish the RX ring descriptor.
1046                  * If the allocation fails:
1047                  *    - arrange for that RX descriptor to be the first one
1048                  *      being parsed the next time the receive function is
1049                  *      invoked [on the same queue].
1050                  *
1051                  *    - Stop parsing the RX ring and return immediately.
1052                  *
1053                  * This policy does not drop the packet received in the RX
1054                  * descriptor for which the allocation of a new mbuf failed.
1055                  * Thus, it allows that packet to be later retrieved if
1056                  * mbuf have been freed in the mean time.
1057                  * As a side effect, holding RX descriptors instead of
1058                  * systematically giving them back to the NIC may lead to
1059                  * RX ring exhaustion situations.
1060                  * However, the NIC can gracefully prevent such situations
1061                  * to happen by sending specific "back-pressure" flow control
1062                  * frames to its peer(s).
1063                  */
1064                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1065                            "staterr=0x%x data_len=%u",
1066                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1067                            (unsigned) rx_id, (unsigned) staterr,
1068                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1069
1070                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1071                 if (nmb == NULL) {
1072                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1073                                    "queue_id=%u", (unsigned) rxq->port_id,
1074                                    (unsigned) rxq->queue_id);
1075                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1076                         break;
1077                 }
1078
1079                 nb_hold++;
1080                 rxe = &sw_ring[rx_id];
1081                 rx_id++;
1082                 if (rx_id == rxq->nb_rx_desc)
1083                         rx_id = 0;
1084
1085                 /* Prefetch next mbuf while processing current one. */
1086                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1087
1088                 /*
1089                  * When next RX descriptor is on a cache-line boundary,
1090                  * prefetch the next 4 RX descriptors and the next 8 pointers
1091                  * to mbufs.
1092                  */
1093                 if ((rx_id & 0x3) == 0) {
1094                         rte_igb_prefetch(&rx_ring[rx_id]);
1095                         rte_igb_prefetch(&sw_ring[rx_id]);
1096                 }
1097
1098                 /*
1099                  * Update RX descriptor with the physical address of the new
1100                  * data buffer of the new allocated mbuf.
1101                  */
1102                 rxm = rxe->mbuf;
1103                 rxe->mbuf = nmb;
1104                 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1105                 rxdp->read.pkt_addr = dma;
1106                 rxdp->read.hdr_addr = 0;
1107
1108                 /*
1109                  * Set data length & data buffer address of mbuf.
1110                  */
1111                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1112                 rxm->data_len = data_len;
1113                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1114
1115                 /*
1116                  * If this is the first buffer of the received packet,
1117                  * set the pointer to the first mbuf of the packet and
1118                  * initialize its context.
1119                  * Otherwise, update the total length and the number of segments
1120                  * of the current scattered packet, and update the pointer to
1121                  * the last mbuf of the current packet.
1122                  */
1123                 if (first_seg == NULL) {
1124                         first_seg = rxm;
1125                         first_seg->pkt_len = data_len;
1126                         first_seg->nb_segs = 1;
1127                 } else {
1128                         first_seg->pkt_len += data_len;
1129                         first_seg->nb_segs++;
1130                         last_seg->next = rxm;
1131                 }
1132
1133                 /*
1134                  * If this is not the last buffer of the received packet,
1135                  * update the pointer to the last mbuf of the current scattered
1136                  * packet and continue to parse the RX ring.
1137                  */
1138                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1139                         last_seg = rxm;
1140                         goto next_desc;
1141                 }
1142
1143                 /*
1144                  * This is the last buffer of the received packet.
1145                  * If the CRC is not stripped by the hardware:
1146                  *   - Subtract the CRC length from the total packet length.
1147                  *   - If the last buffer only contains the whole CRC or a part
1148                  *     of it, free the mbuf associated to the last buffer.
1149                  *     If part of the CRC is also contained in the previous
1150                  *     mbuf, subtract the length of that CRC part from the
1151                  *     data length of the previous mbuf.
1152                  */
1153                 rxm->next = NULL;
1154                 if (unlikely(rxq->crc_len > 0)) {
1155                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1156                         if (data_len <= RTE_ETHER_CRC_LEN) {
1157                                 rte_pktmbuf_free_seg(rxm);
1158                                 first_seg->nb_segs--;
1159                                 last_seg->data_len = (uint16_t)
1160                                         (last_seg->data_len -
1161                                          (RTE_ETHER_CRC_LEN - data_len));
1162                                 last_seg->next = NULL;
1163                         } else
1164                                 rxm->data_len = (uint16_t)
1165                                         (data_len - RTE_ETHER_CRC_LEN);
1166                 }
1167
1168                 /*
1169                  * Initialize the first mbuf of the returned packet:
1170                  *    - RX port identifier,
1171                  *    - hardware offload data, if any:
1172                  *      - RSS flag & hash,
1173                  *      - IP checksum flag,
1174                  *      - VLAN TCI, if any,
1175                  *      - error flags.
1176                  */
1177                 first_seg->port = rxq->port_id;
1178                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1179
1180                 /*
1181                  * The vlan_tci field is only valid when PKT_RX_VLAN is
1182                  * set in the pkt_flags field and must be in CPU byte order.
1183                  */
1184                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1185                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1186                         first_seg->vlan_tci =
1187                                 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1188                 } else {
1189                         first_seg->vlan_tci =
1190                                 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1191                 }
1192                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1193                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1194                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1195                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1196                 first_seg->ol_flags = pkt_flags;
1197                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1198                                         lower.lo_dword.hs_rss.pkt_info);
1199
1200                 /* Prefetch data of first segment, if configured to do so. */
1201                 rte_packet_prefetch((char *)first_seg->buf_addr +
1202                         first_seg->data_off);
1203
1204                 /*
1205                  * Store the mbuf address into the next entry of the array
1206                  * of returned packets.
1207                  */
1208                 rx_pkts[nb_rx++] = first_seg;
1209
1210                 /*
1211                  * Setup receipt context for a new packet.
1212                  */
1213                 first_seg = NULL;
1214         }
1215
1216         /*
1217          * Record index of the next RX descriptor to probe.
1218          */
1219         rxq->rx_tail = rx_id;
1220
1221         /*
1222          * Save receive context.
1223          */
1224         rxq->pkt_first_seg = first_seg;
1225         rxq->pkt_last_seg = last_seg;
1226
1227         /*
1228          * If the number of free RX descriptors is greater than the RX free
1229          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1230          * register.
1231          * Update the RDT with the value of the last processed RX descriptor
1232          * minus 1, to guarantee that the RDT register is never equal to the
1233          * RDH register, which creates a "full" ring situtation from the
1234          * hardware point of view...
1235          */
1236         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1237         if (nb_hold > rxq->rx_free_thresh) {
1238                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1239                            "nb_hold=%u nb_rx=%u",
1240                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1241                            (unsigned) rx_id, (unsigned) nb_hold,
1242                            (unsigned) nb_rx);
1243                 rx_id = (uint16_t) ((rx_id == 0) ?
1244                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1245                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1246                 nb_hold = 0;
1247         }
1248         rxq->nb_rx_hold = nb_hold;
1249         return nb_rx;
1250 }
1251
1252 /*
1253  * Maximum number of Ring Descriptors.
1254  *
1255  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1256  * desscriptors should meet the following condition:
1257  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1258  */
1259
1260 static void
1261 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1262 {
1263         unsigned i;
1264
1265         if (txq->sw_ring != NULL) {
1266                 for (i = 0; i < txq->nb_tx_desc; i++) {
1267                         if (txq->sw_ring[i].mbuf != NULL) {
1268                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1269                                 txq->sw_ring[i].mbuf = NULL;
1270                         }
1271                 }
1272         }
1273 }
1274
1275 static void
1276 igb_tx_queue_release(struct igb_tx_queue *txq)
1277 {
1278         if (txq != NULL) {
1279                 igb_tx_queue_release_mbufs(txq);
1280                 rte_free(txq->sw_ring);
1281                 rte_memzone_free(txq->mz);
1282                 rte_free(txq);
1283         }
1284 }
1285
1286 void
1287 eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1288 {
1289         igb_tx_queue_release(dev->data->tx_queues[qid]);
1290 }
1291
1292 static int
1293 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1294 {
1295         struct igb_tx_entry *sw_ring;
1296         volatile union e1000_adv_tx_desc *txr;
1297         uint16_t tx_first; /* First segment analyzed. */
1298         uint16_t tx_id;    /* Current segment being processed. */
1299         uint16_t tx_last;  /* Last segment in the current packet. */
1300         uint16_t tx_next;  /* First segment of the next packet. */
1301         int count = 0;
1302
1303         if (!txq)
1304                 return -ENODEV;
1305
1306         sw_ring = txq->sw_ring;
1307         txr = txq->tx_ring;
1308
1309         /* tx_tail is the last sent packet on the sw_ring. Goto the end
1310          * of that packet (the last segment in the packet chain) and
1311          * then the next segment will be the start of the oldest segment
1312          * in the sw_ring. This is the first packet that will be
1313          * attempted to be freed.
1314          */
1315
1316         /* Get last segment in most recently added packet. */
1317         tx_first = sw_ring[txq->tx_tail].last_id;
1318
1319         /* Get the next segment, which is the oldest segment in ring. */
1320         tx_first = sw_ring[tx_first].next_id;
1321
1322         /* Set the current index to the first. */
1323         tx_id = tx_first;
1324
1325         /* Loop through each packet. For each packet, verify that an
1326          * mbuf exists and that the last segment is free. If so, free
1327          * it and move on.
1328          */
1329         while (1) {
1330                 tx_last = sw_ring[tx_id].last_id;
1331
1332                 if (sw_ring[tx_last].mbuf) {
1333                         if (txr[tx_last].wb.status &
1334                             E1000_TXD_STAT_DD) {
1335                                 /* Increment the number of packets
1336                                  * freed.
1337                                  */
1338                                 count++;
1339
1340                                 /* Get the start of the next packet. */
1341                                 tx_next = sw_ring[tx_last].next_id;
1342
1343                                 /* Loop through all segments in a
1344                                  * packet.
1345                                  */
1346                                 do {
1347                                         if (sw_ring[tx_id].mbuf) {
1348                                                 rte_pktmbuf_free_seg(
1349                                                         sw_ring[tx_id].mbuf);
1350                                                 sw_ring[tx_id].mbuf = NULL;
1351                                                 sw_ring[tx_id].last_id = tx_id;
1352                                         }
1353
1354                                         /* Move to next segemnt. */
1355                                         tx_id = sw_ring[tx_id].next_id;
1356
1357                                 } while (tx_id != tx_next);
1358
1359                                 if (unlikely(count == (int)free_cnt))
1360                                         break;
1361                         } else {
1362                                 /* mbuf still in use, nothing left to
1363                                  * free.
1364                                  */
1365                                 break;
1366                         }
1367                 } else {
1368                         /* There are multiple reasons to be here:
1369                          * 1) All the packets on the ring have been
1370                          *    freed - tx_id is equal to tx_first
1371                          *    and some packets have been freed.
1372                          *    - Done, exit
1373                          * 2) Interfaces has not sent a rings worth of
1374                          *    packets yet, so the segment after tail is
1375                          *    still empty. Or a previous call to this
1376                          *    function freed some of the segments but
1377                          *    not all so there is a hole in the list.
1378                          *    Hopefully this is a rare case.
1379                          *    - Walk the list and find the next mbuf. If
1380                          *      there isn't one, then done.
1381                          */
1382                         if (likely(tx_id == tx_first && count != 0))
1383                                 break;
1384
1385                         /* Walk the list and find the next mbuf, if any. */
1386                         do {
1387                                 /* Move to next segemnt. */
1388                                 tx_id = sw_ring[tx_id].next_id;
1389
1390                                 if (sw_ring[tx_id].mbuf)
1391                                         break;
1392
1393                         } while (tx_id != tx_first);
1394
1395                         /* Determine why previous loop bailed. If there
1396                          * is not an mbuf, done.
1397                          */
1398                         if (!sw_ring[tx_id].mbuf)
1399                                 break;
1400                 }
1401         }
1402
1403         return count;
1404 }
1405
1406 int
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1408 {
1409         return igb_tx_done_cleanup(txq, free_cnt);
1410 }
1411
1412 static void
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1414 {
1415         txq->tx_head = 0;
1416         txq->tx_tail = 0;
1417         txq->ctx_curr = 0;
1418         memset((void*)&txq->ctx_cache, 0,
1419                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1420 }
1421
1422 static void
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1424 {
1425         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426         struct igb_tx_entry *txe = txq->sw_ring;
1427         uint16_t i, prev;
1428         struct e1000_hw *hw;
1429
1430         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431         /* Zero out HW ring memory */
1432         for (i = 0; i < txq->nb_tx_desc; i++) {
1433                 txq->tx_ring[i] = zeroed_desc;
1434         }
1435
1436         /* Initialize ring entries */
1437         prev = (uint16_t)(txq->nb_tx_desc - 1);
1438         for (i = 0; i < txq->nb_tx_desc; i++) {
1439                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1440
1441                 txd->wb.status = E1000_TXD_STAT_DD;
1442                 txe[i].mbuf = NULL;
1443                 txe[i].last_id = i;
1444                 txe[prev].next_id = i;
1445                 prev = i;
1446         }
1447
1448         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449         /* 82575 specific, each tx queue will use 2 hw contexts */
1450         if (hw->mac.type == e1000_82575)
1451                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1452
1453         igb_reset_tx_queue_stat(txq);
1454 }
1455
1456 uint64_t
1457 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1458 {
1459         uint64_t tx_offload_capa;
1460
1461         RTE_SET_USED(dev);
1462         tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1463                           DEV_TX_OFFLOAD_IPV4_CKSUM  |
1464                           DEV_TX_OFFLOAD_UDP_CKSUM   |
1465                           DEV_TX_OFFLOAD_TCP_CKSUM   |
1466                           DEV_TX_OFFLOAD_SCTP_CKSUM  |
1467                           DEV_TX_OFFLOAD_TCP_TSO     |
1468                           DEV_TX_OFFLOAD_MULTI_SEGS;
1469
1470         return tx_offload_capa;
1471 }
1472
1473 uint64_t
1474 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1475 {
1476         uint64_t tx_queue_offload_capa;
1477
1478         tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1479
1480         return tx_queue_offload_capa;
1481 }
1482
1483 int
1484 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1485                          uint16_t queue_idx,
1486                          uint16_t nb_desc,
1487                          unsigned int socket_id,
1488                          const struct rte_eth_txconf *tx_conf)
1489 {
1490         const struct rte_memzone *tz;
1491         struct igb_tx_queue *txq;
1492         struct e1000_hw     *hw;
1493         uint32_t size;
1494         uint64_t offloads;
1495
1496         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1497
1498         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499
1500         /*
1501          * Validate number of transmit descriptors.
1502          * It must not exceed hardware maximum, and must be multiple
1503          * of E1000_ALIGN.
1504          */
1505         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1506                         (nb_desc > E1000_MAX_RING_DESC) ||
1507                         (nb_desc < E1000_MIN_RING_DESC)) {
1508                 return -EINVAL;
1509         }
1510
1511         /*
1512          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1513          * driver.
1514          */
1515         if (tx_conf->tx_free_thresh != 0)
1516                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1517                              "used for the 1G driver.");
1518         if (tx_conf->tx_rs_thresh != 0)
1519                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1520                              "used for the 1G driver.");
1521         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1522                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1523                              "consider setting the TX WTHRESH value to 4, 8, "
1524                              "or 16.");
1525
1526         /* Free memory prior to re-allocation if needed */
1527         if (dev->data->tx_queues[queue_idx] != NULL) {
1528                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1529                 dev->data->tx_queues[queue_idx] = NULL;
1530         }
1531
1532         /* First allocate the tx queue data structure */
1533         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1534                                                         RTE_CACHE_LINE_SIZE);
1535         if (txq == NULL)
1536                 return -ENOMEM;
1537
1538         /*
1539          * Allocate TX ring hardware descriptors. A memzone large enough to
1540          * handle the maximum ring size is allocated in order to allow for
1541          * resizing in later calls to the queue setup function.
1542          */
1543         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1544         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1545                                       E1000_ALIGN, socket_id);
1546         if (tz == NULL) {
1547                 igb_tx_queue_release(txq);
1548                 return -ENOMEM;
1549         }
1550
1551         txq->mz = tz;
1552         txq->nb_tx_desc = nb_desc;
1553         txq->pthresh = tx_conf->tx_thresh.pthresh;
1554         txq->hthresh = tx_conf->tx_thresh.hthresh;
1555         txq->wthresh = tx_conf->tx_thresh.wthresh;
1556         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1557                 txq->wthresh = 1;
1558         txq->queue_id = queue_idx;
1559         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1560                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1561         txq->port_id = dev->data->port_id;
1562
1563         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1564         txq->tx_ring_phys_addr = tz->iova;
1565
1566         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1567         /* Allocate software ring */
1568         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1569                                    sizeof(struct igb_tx_entry) * nb_desc,
1570                                    RTE_CACHE_LINE_SIZE);
1571         if (txq->sw_ring == NULL) {
1572                 igb_tx_queue_release(txq);
1573                 return -ENOMEM;
1574         }
1575         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1576                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1577
1578         igb_reset_tx_queue(txq, dev);
1579         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1580         dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1581         dev->data->tx_queues[queue_idx] = txq;
1582         txq->offloads = offloads;
1583
1584         return 0;
1585 }
1586
1587 static void
1588 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1589 {
1590         unsigned i;
1591
1592         if (rxq->sw_ring != NULL) {
1593                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1594                         if (rxq->sw_ring[i].mbuf != NULL) {
1595                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1596                                 rxq->sw_ring[i].mbuf = NULL;
1597                         }
1598                 }
1599         }
1600 }
1601
1602 static void
1603 igb_rx_queue_release(struct igb_rx_queue *rxq)
1604 {
1605         if (rxq != NULL) {
1606                 igb_rx_queue_release_mbufs(rxq);
1607                 rte_free(rxq->sw_ring);
1608                 rte_memzone_free(rxq->mz);
1609                 rte_free(rxq);
1610         }
1611 }
1612
1613 void
1614 eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1615 {
1616         igb_rx_queue_release(dev->data->rx_queues[qid]);
1617 }
1618
1619 static void
1620 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1621 {
1622         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1623         unsigned i;
1624
1625         /* Zero out HW ring memory */
1626         for (i = 0; i < rxq->nb_rx_desc; i++) {
1627                 rxq->rx_ring[i] = zeroed_desc;
1628         }
1629
1630         rxq->rx_tail = 0;
1631         rxq->pkt_first_seg = NULL;
1632         rxq->pkt_last_seg = NULL;
1633 }
1634
1635 uint64_t
1636 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1637 {
1638         uint64_t rx_offload_capa;
1639         struct e1000_hw *hw;
1640
1641         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1642
1643         rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
1644                           DEV_RX_OFFLOAD_VLAN_FILTER |
1645                           DEV_RX_OFFLOAD_IPV4_CKSUM  |
1646                           DEV_RX_OFFLOAD_UDP_CKSUM   |
1647                           DEV_RX_OFFLOAD_TCP_CKSUM   |
1648                           DEV_RX_OFFLOAD_JUMBO_FRAME |
1649                           DEV_RX_OFFLOAD_KEEP_CRC    |
1650                           DEV_RX_OFFLOAD_SCATTER     |
1651                           DEV_RX_OFFLOAD_RSS_HASH;
1652
1653         if (hw->mac.type == e1000_i350 ||
1654             hw->mac.type == e1000_i210 ||
1655             hw->mac.type == e1000_i211)
1656                 rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
1657
1658         return rx_offload_capa;
1659 }
1660
1661 uint64_t
1662 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1663 {
1664         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665         uint64_t rx_queue_offload_capa;
1666
1667         switch (hw->mac.type) {
1668         case e1000_vfadapt_i350:
1669                 /*
1670                  * As only one Rx queue can be used, let per queue offloading
1671                  * capability be same to per port queue offloading capability
1672                  * for better convenience.
1673                  */
1674                 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1675                 break;
1676         default:
1677                 rx_queue_offload_capa = 0;
1678         }
1679         return rx_queue_offload_capa;
1680 }
1681
1682 int
1683 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1684                          uint16_t queue_idx,
1685                          uint16_t nb_desc,
1686                          unsigned int socket_id,
1687                          const struct rte_eth_rxconf *rx_conf,
1688                          struct rte_mempool *mp)
1689 {
1690         const struct rte_memzone *rz;
1691         struct igb_rx_queue *rxq;
1692         struct e1000_hw     *hw;
1693         unsigned int size;
1694         uint64_t offloads;
1695
1696         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1697
1698         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1699
1700         /*
1701          * Validate number of receive descriptors.
1702          * It must not exceed hardware maximum, and must be multiple
1703          * of E1000_ALIGN.
1704          */
1705         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1706                         (nb_desc > E1000_MAX_RING_DESC) ||
1707                         (nb_desc < E1000_MIN_RING_DESC)) {
1708                 return -EINVAL;
1709         }
1710
1711         /* Free memory prior to re-allocation if needed */
1712         if (dev->data->rx_queues[queue_idx] != NULL) {
1713                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1714                 dev->data->rx_queues[queue_idx] = NULL;
1715         }
1716
1717         /* First allocate the RX queue data structure. */
1718         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1719                           RTE_CACHE_LINE_SIZE);
1720         if (rxq == NULL)
1721                 return -ENOMEM;
1722         rxq->offloads = offloads;
1723         rxq->mb_pool = mp;
1724         rxq->nb_rx_desc = nb_desc;
1725         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1726         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1727         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1728         if (rxq->wthresh > 0 &&
1729             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1730                 rxq->wthresh = 1;
1731         rxq->drop_en = rx_conf->rx_drop_en;
1732         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1733         rxq->queue_id = queue_idx;
1734         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1735                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1736         rxq->port_id = dev->data->port_id;
1737         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1738                 rxq->crc_len = RTE_ETHER_CRC_LEN;
1739         else
1740                 rxq->crc_len = 0;
1741
1742         /*
1743          *  Allocate RX ring hardware descriptors. A memzone large enough to
1744          *  handle the maximum ring size is allocated in order to allow for
1745          *  resizing in later calls to the queue setup function.
1746          */
1747         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1748         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1749                                       E1000_ALIGN, socket_id);
1750         if (rz == NULL) {
1751                 igb_rx_queue_release(rxq);
1752                 return -ENOMEM;
1753         }
1754
1755         rxq->mz = rz;
1756         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1757         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1758         rxq->rx_ring_phys_addr = rz->iova;
1759         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1760
1761         /* Allocate software ring. */
1762         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1763                                    sizeof(struct igb_rx_entry) * nb_desc,
1764                                    RTE_CACHE_LINE_SIZE);
1765         if (rxq->sw_ring == NULL) {
1766                 igb_rx_queue_release(rxq);
1767                 return -ENOMEM;
1768         }
1769         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1770                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1771
1772         dev->data->rx_queues[queue_idx] = rxq;
1773         igb_reset_rx_queue(rxq);
1774
1775         return 0;
1776 }
1777
1778 uint32_t
1779 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1780 {
1781 #define IGB_RXQ_SCAN_INTERVAL 4
1782         volatile union e1000_adv_rx_desc *rxdp;
1783         struct igb_rx_queue *rxq;
1784         uint32_t desc = 0;
1785
1786         rxq = dev->data->rx_queues[rx_queue_id];
1787         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1788
1789         while ((desc < rxq->nb_rx_desc) &&
1790                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1791                 desc += IGB_RXQ_SCAN_INTERVAL;
1792                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1793                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1794                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1795                                 desc - rxq->nb_rx_desc]);
1796         }
1797
1798         return desc;
1799 }
1800
1801 int
1802 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1803 {
1804         volatile union e1000_adv_rx_desc *rxdp;
1805         struct igb_rx_queue *rxq = rx_queue;
1806         uint32_t desc;
1807
1808         if (unlikely(offset >= rxq->nb_rx_desc))
1809                 return 0;
1810         desc = rxq->rx_tail + offset;
1811         if (desc >= rxq->nb_rx_desc)
1812                 desc -= rxq->nb_rx_desc;
1813
1814         rxdp = &rxq->rx_ring[desc];
1815         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1816 }
1817
1818 int
1819 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1820 {
1821         struct igb_rx_queue *rxq = rx_queue;
1822         volatile uint32_t *status;
1823         uint32_t desc;
1824
1825         if (unlikely(offset >= rxq->nb_rx_desc))
1826                 return -EINVAL;
1827
1828         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1829                 return RTE_ETH_RX_DESC_UNAVAIL;
1830
1831         desc = rxq->rx_tail + offset;
1832         if (desc >= rxq->nb_rx_desc)
1833                 desc -= rxq->nb_rx_desc;
1834
1835         status = &rxq->rx_ring[desc].wb.upper.status_error;
1836         if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1837                 return RTE_ETH_RX_DESC_DONE;
1838
1839         return RTE_ETH_RX_DESC_AVAIL;
1840 }
1841
1842 int
1843 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1844 {
1845         struct igb_tx_queue *txq = tx_queue;
1846         volatile uint32_t *status;
1847         uint32_t desc;
1848
1849         if (unlikely(offset >= txq->nb_tx_desc))
1850                 return -EINVAL;
1851
1852         desc = txq->tx_tail + offset;
1853         if (desc >= txq->nb_tx_desc)
1854                 desc -= txq->nb_tx_desc;
1855
1856         status = &txq->tx_ring[desc].wb.status;
1857         if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1858                 return RTE_ETH_TX_DESC_DONE;
1859
1860         return RTE_ETH_TX_DESC_FULL;
1861 }
1862
1863 void
1864 igb_dev_clear_queues(struct rte_eth_dev *dev)
1865 {
1866         uint16_t i;
1867         struct igb_tx_queue *txq;
1868         struct igb_rx_queue *rxq;
1869
1870         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1871                 txq = dev->data->tx_queues[i];
1872                 if (txq != NULL) {
1873                         igb_tx_queue_release_mbufs(txq);
1874                         igb_reset_tx_queue(txq, dev);
1875                 }
1876         }
1877
1878         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1879                 rxq = dev->data->rx_queues[i];
1880                 if (rxq != NULL) {
1881                         igb_rx_queue_release_mbufs(rxq);
1882                         igb_reset_rx_queue(rxq);
1883                 }
1884         }
1885 }
1886
1887 void
1888 igb_dev_free_queues(struct rte_eth_dev *dev)
1889 {
1890         uint16_t i;
1891
1892         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1893                 eth_igb_rx_queue_release(dev, i);
1894                 dev->data->rx_queues[i] = NULL;
1895         }
1896         dev->data->nb_rx_queues = 0;
1897
1898         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1899                 eth_igb_tx_queue_release(dev, i);
1900                 dev->data->tx_queues[i] = NULL;
1901         }
1902         dev->data->nb_tx_queues = 0;
1903 }
1904
1905 /**
1906  * Receive Side Scaling (RSS).
1907  * See section 7.1.1.7 in the following document:
1908  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1909  *
1910  * Principles:
1911  * The source and destination IP addresses of the IP header and the source and
1912  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1913  * against a configurable random key to compute a 32-bit RSS hash result.
1914  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1915  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1916  * RSS output index which is used as the RX queue index where to store the
1917  * received packets.
1918  * The following output is supplied in the RX write-back descriptor:
1919  *     - 32-bit result of the Microsoft RSS hash function,
1920  *     - 4-bit RSS type field.
1921  */
1922
1923 /*
1924  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1925  * Used as the default key.
1926  */
1927 static uint8_t rss_intel_key[40] = {
1928         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1929         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1930         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1931         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1932         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1933 };
1934
1935 static void
1936 igb_rss_disable(struct rte_eth_dev *dev)
1937 {
1938         struct e1000_hw *hw;
1939         uint32_t mrqc;
1940
1941         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1942         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1943         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1944         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1945 }
1946
1947 static void
1948 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1949 {
1950         uint8_t  *hash_key;
1951         uint32_t rss_key;
1952         uint32_t mrqc;
1953         uint64_t rss_hf;
1954         uint16_t i;
1955
1956         hash_key = rss_conf->rss_key;
1957         if (hash_key != NULL) {
1958                 /* Fill in RSS hash key */
1959                 for (i = 0; i < 10; i++) {
1960                         rss_key  = hash_key[(i * 4)];
1961                         rss_key |= hash_key[(i * 4) + 1] << 8;
1962                         rss_key |= hash_key[(i * 4) + 2] << 16;
1963                         rss_key |= hash_key[(i * 4) + 3] << 24;
1964                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1965                 }
1966         }
1967
1968         /* Set configured hashing protocols in MRQC register */
1969         rss_hf = rss_conf->rss_hf;
1970         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1971         if (rss_hf & ETH_RSS_IPV4)
1972                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1973         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1974                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1975         if (rss_hf & ETH_RSS_IPV6)
1976                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1977         if (rss_hf & ETH_RSS_IPV6_EX)
1978                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1979         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1980                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1981         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1982                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1983         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1984                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1985         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1986                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1987         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1988                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1989         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1990 }
1991
1992 int
1993 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1994                         struct rte_eth_rss_conf *rss_conf)
1995 {
1996         struct e1000_hw *hw;
1997         uint32_t mrqc;
1998         uint64_t rss_hf;
1999
2000         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2001
2002         /*
2003          * Before changing anything, first check that the update RSS operation
2004          * does not attempt to disable RSS, if RSS was enabled at
2005          * initialization time, or does not attempt to enable RSS, if RSS was
2006          * disabled at initialization time.
2007          */
2008         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
2009         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2010         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
2011                 if (rss_hf != 0) /* Enable RSS */
2012                         return -(EINVAL);
2013                 return 0; /* Nothing to do */
2014         }
2015         /* RSS enabled */
2016         if (rss_hf == 0) /* Disable RSS */
2017                 return -(EINVAL);
2018         igb_hw_rss_hash_set(hw, rss_conf);
2019         return 0;
2020 }
2021
2022 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2023                               struct rte_eth_rss_conf *rss_conf)
2024 {
2025         struct e1000_hw *hw;
2026         uint8_t *hash_key;
2027         uint32_t rss_key;
2028         uint32_t mrqc;
2029         uint64_t rss_hf;
2030         uint16_t i;
2031
2032         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2033         hash_key = rss_conf->rss_key;
2034         if (hash_key != NULL) {
2035                 /* Return RSS hash key */
2036                 for (i = 0; i < 10; i++) {
2037                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2038                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2039                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2040                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2041                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2042                 }
2043         }
2044
2045         /* Get RSS functions configured in MRQC register */
2046         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2047         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2048                 rss_conf->rss_hf = 0;
2049                 return 0;
2050         }
2051         rss_hf = 0;
2052         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2053                 rss_hf |= ETH_RSS_IPV4;
2054         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2055                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2056         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2057                 rss_hf |= ETH_RSS_IPV6;
2058         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2059                 rss_hf |= ETH_RSS_IPV6_EX;
2060         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2061                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2062         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2063                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2064         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2065                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2066         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2067                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2068         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2069                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2070         rss_conf->rss_hf = rss_hf;
2071         return 0;
2072 }
2073
2074 static void
2075 igb_rss_configure(struct rte_eth_dev *dev)
2076 {
2077         struct rte_eth_rss_conf rss_conf;
2078         struct e1000_hw *hw;
2079         uint32_t shift;
2080         uint16_t i;
2081
2082         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2083
2084         /* Fill in redirection table. */
2085         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2086         for (i = 0; i < 128; i++) {
2087                 union e1000_reta {
2088                         uint32_t dword;
2089                         uint8_t  bytes[4];
2090                 } reta;
2091                 uint8_t q_idx;
2092
2093                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2094                                    i % dev->data->nb_rx_queues : 0);
2095                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2096                 if ((i & 3) == 3)
2097                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2098         }
2099
2100         /*
2101          * Configure the RSS key and the RSS protocols used to compute
2102          * the RSS hash of input packets.
2103          */
2104         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2105         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2106                 igb_rss_disable(dev);
2107                 return;
2108         }
2109         if (rss_conf.rss_key == NULL)
2110                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2111         igb_hw_rss_hash_set(hw, &rss_conf);
2112 }
2113
2114 /*
2115  * Check if the mac type support VMDq or not.
2116  * Return 1 if it supports, otherwise, return 0.
2117  */
2118 static int
2119 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2120 {
2121         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2122
2123         switch (hw->mac.type) {
2124         case e1000_82576:
2125         case e1000_82580:
2126         case e1000_i350:
2127                 return 1;
2128         case e1000_82540:
2129         case e1000_82541:
2130         case e1000_82542:
2131         case e1000_82543:
2132         case e1000_82544:
2133         case e1000_82545:
2134         case e1000_82546:
2135         case e1000_82547:
2136         case e1000_82571:
2137         case e1000_82572:
2138         case e1000_82573:
2139         case e1000_82574:
2140         case e1000_82583:
2141         case e1000_i210:
2142         case e1000_i211:
2143         default:
2144                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2145                 return 0;
2146         }
2147 }
2148
2149 static int
2150 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2151 {
2152         struct rte_eth_vmdq_rx_conf *cfg;
2153         struct e1000_hw *hw;
2154         uint32_t mrqc, vt_ctl, vmolr, rctl;
2155         int i;
2156
2157         PMD_INIT_FUNC_TRACE();
2158
2159         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2161
2162         /* Check if mac type can support VMDq, return value of 0 means NOT support */
2163         if (igb_is_vmdq_supported(dev) == 0)
2164                 return -1;
2165
2166         igb_rss_disable(dev);
2167
2168         /* RCTL: eanble VLAN filter */
2169         rctl = E1000_READ_REG(hw, E1000_RCTL);
2170         rctl |= E1000_RCTL_VFE;
2171         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2172
2173         /* MRQC: enable vmdq */
2174         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2175         mrqc |= E1000_MRQC_ENABLE_VMDQ;
2176         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2177
2178         /* VTCTL:  pool selection according to VLAN tag */
2179         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2180         if (cfg->enable_default_pool)
2181                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2182         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2183         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2184
2185         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2186                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2187                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2188                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2189                         E1000_VMOLR_MPME);
2190
2191                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2192                         vmolr |= E1000_VMOLR_AUPE;
2193                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2194                         vmolr |= E1000_VMOLR_ROMPE;
2195                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2196                         vmolr |= E1000_VMOLR_ROPE;
2197                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2198                         vmolr |= E1000_VMOLR_BAM;
2199                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2200                         vmolr |= E1000_VMOLR_MPME;
2201
2202                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2203         }
2204
2205         /*
2206          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2207          * Both 82576 and 82580 support it
2208          */
2209         if (hw->mac.type != e1000_i350) {
2210                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2211                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2212                         vmolr |= E1000_VMOLR_STRVLAN;
2213                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2214                 }
2215         }
2216
2217         /* VFTA - enable all vlan filters */
2218         for (i = 0; i < IGB_VFTA_SIZE; i++)
2219                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2220
2221         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2222         if (hw->mac.type != e1000_82580)
2223                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2224
2225         /*
2226          * RAH/RAL - allow pools to read specific mac addresses
2227          * In this case, all pools should be able to read from mac addr 0
2228          */
2229         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2230         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2231
2232         /* VLVF: set up filters for vlan tags as configured */
2233         for (i = 0; i < cfg->nb_pool_maps; i++) {
2234                 /* set vlan id in VF register and set the valid bit */
2235                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2236                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2237                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2238                         E1000_VLVF_POOLSEL_MASK)));
2239         }
2240
2241         E1000_WRITE_FLUSH(hw);
2242
2243         return 0;
2244 }
2245
2246
2247 /*********************************************************************
2248  *
2249  *  Enable receive unit.
2250  *
2251  **********************************************************************/
2252
2253 static int
2254 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2255 {
2256         struct igb_rx_entry *rxe = rxq->sw_ring;
2257         uint64_t dma_addr;
2258         unsigned i;
2259
2260         /* Initialize software ring entries. */
2261         for (i = 0; i < rxq->nb_rx_desc; i++) {
2262                 volatile union e1000_adv_rx_desc *rxd;
2263                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2264
2265                 if (mbuf == NULL) {
2266                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2267                                      "queue_id=%hu", rxq->queue_id);
2268                         return -ENOMEM;
2269                 }
2270                 dma_addr =
2271                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2272                 rxd = &rxq->rx_ring[i];
2273                 rxd->read.hdr_addr = 0;
2274                 rxd->read.pkt_addr = dma_addr;
2275                 rxe[i].mbuf = mbuf;
2276         }
2277
2278         return 0;
2279 }
2280
2281 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2282 static int
2283 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2284 {
2285         struct e1000_hw *hw =
2286                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2287         uint32_t mrqc;
2288
2289         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2290                 /*
2291                  * SRIOV active scheme
2292                  * FIXME if support RSS together with VMDq & SRIOV
2293                  */
2294                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2295                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2296                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2297                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2298         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2299                 /*
2300                  * SRIOV inactive scheme
2301                  */
2302                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2303                         case ETH_MQ_RX_RSS:
2304                                 igb_rss_configure(dev);
2305                                 break;
2306                         case ETH_MQ_RX_VMDQ_ONLY:
2307                                 /*Configure general VMDQ only RX parameters*/
2308                                 igb_vmdq_rx_hw_configure(dev);
2309                                 break;
2310                         case ETH_MQ_RX_NONE:
2311                                 /* if mq_mode is none, disable rss mode.*/
2312                         default:
2313                                 igb_rss_disable(dev);
2314                                 break;
2315                 }
2316         }
2317
2318         return 0;
2319 }
2320
2321 int
2322 eth_igb_rx_init(struct rte_eth_dev *dev)
2323 {
2324         struct rte_eth_rxmode *rxmode;
2325         struct e1000_hw     *hw;
2326         struct igb_rx_queue *rxq;
2327         uint32_t rctl;
2328         uint32_t rxcsum;
2329         uint32_t srrctl;
2330         uint16_t buf_size;
2331         uint16_t rctl_bsize;
2332         uint16_t i;
2333         int ret;
2334
2335         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2336         srrctl = 0;
2337
2338         /*
2339          * Make sure receives are disabled while setting
2340          * up the descriptor ring.
2341          */
2342         rctl = E1000_READ_REG(hw, E1000_RCTL);
2343         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2344
2345         rxmode = &dev->data->dev_conf.rxmode;
2346
2347         /*
2348          * Configure support of jumbo frames, if any.
2349          */
2350         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2351                 uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2352
2353                 rctl |= E1000_RCTL_LPE;
2354
2355                 /*
2356                  * Set maximum packet length by default, and might be updated
2357                  * together with enabling/disabling dual VLAN.
2358                  */
2359                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2360                         max_len += VLAN_TAG_SIZE;
2361
2362                 E1000_WRITE_REG(hw, E1000_RLPML, max_len);
2363         } else
2364                 rctl &= ~E1000_RCTL_LPE;
2365
2366         /* Configure and enable each RX queue. */
2367         rctl_bsize = 0;
2368         dev->rx_pkt_burst = eth_igb_recv_pkts;
2369         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2370                 uint64_t bus_addr;
2371                 uint32_t rxdctl;
2372
2373                 rxq = dev->data->rx_queues[i];
2374
2375                 rxq->flags = 0;
2376                 /*
2377                  * i350 and i354 vlan packets have vlan tags byte swapped.
2378                  */
2379                 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2380                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2381                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2382                 } else {
2383                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2384                 }
2385
2386                 /* Allocate buffers for descriptor rings and set up queue */
2387                 ret = igb_alloc_rx_queue_mbufs(rxq);
2388                 if (ret)
2389                         return ret;
2390
2391                 /*
2392                  * Reset crc_len in case it was changed after queue setup by a
2393                  *  call to configure
2394                  */
2395                 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2396                         rxq->crc_len = RTE_ETHER_CRC_LEN;
2397                 else
2398                         rxq->crc_len = 0;
2399
2400                 bus_addr = rxq->rx_ring_phys_addr;
2401                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2402                                 rxq->nb_rx_desc *
2403                                 sizeof(union e1000_adv_rx_desc));
2404                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2405                                 (uint32_t)(bus_addr >> 32));
2406                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2407
2408                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2409
2410                 /*
2411                  * Configure RX buffer size.
2412                  */
2413                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2414                         RTE_PKTMBUF_HEADROOM);
2415                 if (buf_size >= 1024) {
2416                         /*
2417                          * Configure the BSIZEPACKET field of the SRRCTL
2418                          * register of the queue.
2419                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2420                          * If this field is equal to 0b, then RCTL.BSIZE
2421                          * determines the RX packet buffer size.
2422                          */
2423                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2424                                    E1000_SRRCTL_BSIZEPKT_MASK);
2425                         buf_size = (uint16_t) ((srrctl &
2426                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2427                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2428
2429                         /* It adds dual VLAN length for supporting dual VLAN */
2430                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2431                                                 2 * VLAN_TAG_SIZE) > buf_size){
2432                                 if (!dev->data->scattered_rx)
2433                                         PMD_INIT_LOG(DEBUG,
2434                                                      "forcing scatter mode");
2435                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2436                                 dev->data->scattered_rx = 1;
2437                         }
2438                 } else {
2439                         /*
2440                          * Use BSIZE field of the device RCTL register.
2441                          */
2442                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2443                                 rctl_bsize = buf_size;
2444                         if (!dev->data->scattered_rx)
2445                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2446                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2447                         dev->data->scattered_rx = 1;
2448                 }
2449
2450                 /* Set if packets are dropped when no descriptors available */
2451                 if (rxq->drop_en)
2452                         srrctl |= E1000_SRRCTL_DROP_EN;
2453
2454                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2455
2456                 /* Enable this RX queue. */
2457                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2458                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2459                 rxdctl &= 0xFFF00000;
2460                 rxdctl |= (rxq->pthresh & 0x1F);
2461                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2462                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2463                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2464         }
2465
2466         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2467                 if (!dev->data->scattered_rx)
2468                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2469                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2470                 dev->data->scattered_rx = 1;
2471         }
2472
2473         /*
2474          * Setup BSIZE field of RCTL register, if needed.
2475          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2476          * register, since the code above configures the SRRCTL register of
2477          * the RX queue in such a case.
2478          * All configurable sizes are:
2479          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2480          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2481          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2482          *  2048: rctl |= E1000_RCTL_SZ_2048;
2483          *  1024: rctl |= E1000_RCTL_SZ_1024;
2484          *   512: rctl |= E1000_RCTL_SZ_512;
2485          *   256: rctl |= E1000_RCTL_SZ_256;
2486          */
2487         if (rctl_bsize > 0) {
2488                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2489                         rctl |= E1000_RCTL_SZ_512;
2490                 else /* 256 <= buf_size < 512 - use 256 */
2491                         rctl |= E1000_RCTL_SZ_256;
2492         }
2493
2494         /*
2495          * Configure RSS if device configured with multiple RX queues.
2496          */
2497         igb_dev_mq_rx_configure(dev);
2498
2499         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2500         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2501
2502         /*
2503          * Setup the Checksum Register.
2504          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2505          */
2506         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2507         rxcsum |= E1000_RXCSUM_PCSD;
2508
2509         /* Enable both L3/L4 rx checksum offload */
2510         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2511                 rxcsum |= E1000_RXCSUM_IPOFL;
2512         else
2513                 rxcsum &= ~E1000_RXCSUM_IPOFL;
2514         if (rxmode->offloads &
2515                 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2516                 rxcsum |= E1000_RXCSUM_TUOFL;
2517         else
2518                 rxcsum &= ~E1000_RXCSUM_TUOFL;
2519         if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2520                 rxcsum |= E1000_RXCSUM_CRCOFL;
2521         else
2522                 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2523
2524         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2525
2526         /* Setup the Receive Control Register. */
2527         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2528                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2529
2530                 /* clear STRCRC bit in all queues */
2531                 if (hw->mac.type == e1000_i350 ||
2532                     hw->mac.type == e1000_i210 ||
2533                     hw->mac.type == e1000_i211 ||
2534                     hw->mac.type == e1000_i354) {
2535                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2536                                 rxq = dev->data->rx_queues[i];
2537                                 uint32_t dvmolr = E1000_READ_REG(hw,
2538                                         E1000_DVMOLR(rxq->reg_idx));
2539                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2540                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2541                         }
2542                 }
2543         } else {
2544                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2545
2546                 /* set STRCRC bit in all queues */
2547                 if (hw->mac.type == e1000_i350 ||
2548                     hw->mac.type == e1000_i210 ||
2549                     hw->mac.type == e1000_i211 ||
2550                     hw->mac.type == e1000_i354) {
2551                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2552                                 rxq = dev->data->rx_queues[i];
2553                                 uint32_t dvmolr = E1000_READ_REG(hw,
2554                                         E1000_DVMOLR(rxq->reg_idx));
2555                                 dvmolr |= E1000_DVMOLR_STRCRC;
2556                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2557                         }
2558                 }
2559         }
2560
2561         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2562         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2563                 E1000_RCTL_RDMTS_HALF |
2564                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2565
2566         /* Make sure VLAN Filters are off. */
2567         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2568                 rctl &= ~E1000_RCTL_VFE;
2569         /* Don't store bad packets. */
2570         rctl &= ~E1000_RCTL_SBP;
2571
2572         /* Enable Receives. */
2573         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2574
2575         /*
2576          * Setup the HW Rx Head and Tail Descriptor Pointers.
2577          * This needs to be done after enable.
2578          */
2579         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2580                 rxq = dev->data->rx_queues[i];
2581                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2582                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2583         }
2584
2585         return 0;
2586 }
2587
2588 /*********************************************************************
2589  *
2590  *  Enable transmit unit.
2591  *
2592  **********************************************************************/
2593 void
2594 eth_igb_tx_init(struct rte_eth_dev *dev)
2595 {
2596         struct e1000_hw     *hw;
2597         struct igb_tx_queue *txq;
2598         uint32_t tctl;
2599         uint32_t txdctl;
2600         uint16_t i;
2601
2602         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2603
2604         /* Setup the Base and Length of the Tx Descriptor Rings. */
2605         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2606                 uint64_t bus_addr;
2607                 txq = dev->data->tx_queues[i];
2608                 bus_addr = txq->tx_ring_phys_addr;
2609
2610                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2611                                 txq->nb_tx_desc *
2612                                 sizeof(union e1000_adv_tx_desc));
2613                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2614                                 (uint32_t)(bus_addr >> 32));
2615                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2616
2617                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2618                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2619                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2620
2621                 /* Setup Transmit threshold registers. */
2622                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2623                 txdctl |= txq->pthresh & 0x1F;
2624                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2625                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2626                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2627                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2628         }
2629
2630         /* Program the Transmit Control Register. */
2631         tctl = E1000_READ_REG(hw, E1000_TCTL);
2632         tctl &= ~E1000_TCTL_CT;
2633         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2634                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2635
2636         e1000_config_collision_dist(hw);
2637
2638         /* This write will effectively turn on the transmit unit. */
2639         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2640 }
2641
2642 /*********************************************************************
2643  *
2644  *  Enable VF receive unit.
2645  *
2646  **********************************************************************/
2647 int
2648 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2649 {
2650         struct e1000_hw     *hw;
2651         struct igb_rx_queue *rxq;
2652         uint32_t srrctl;
2653         uint16_t buf_size;
2654         uint16_t rctl_bsize;
2655         uint16_t i;
2656         int ret;
2657
2658         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2659
2660         /* setup MTU */
2661         e1000_rlpml_set_vf(hw,
2662                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2663                 VLAN_TAG_SIZE));
2664
2665         /* Configure and enable each RX queue. */
2666         rctl_bsize = 0;
2667         dev->rx_pkt_burst = eth_igb_recv_pkts;
2668         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2669                 uint64_t bus_addr;
2670                 uint32_t rxdctl;
2671
2672                 rxq = dev->data->rx_queues[i];
2673
2674                 rxq->flags = 0;
2675                 /*
2676                  * i350VF LB vlan packets have vlan tags byte swapped.
2677                  */
2678                 if (hw->mac.type == e1000_vfadapt_i350) {
2679                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2680                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2681                 } else {
2682                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2683                 }
2684
2685                 /* Allocate buffers for descriptor rings and set up queue */
2686                 ret = igb_alloc_rx_queue_mbufs(rxq);
2687                 if (ret)
2688                         return ret;
2689
2690                 bus_addr = rxq->rx_ring_phys_addr;
2691                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2692                                 rxq->nb_rx_desc *
2693                                 sizeof(union e1000_adv_rx_desc));
2694                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2695                                 (uint32_t)(bus_addr >> 32));
2696                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2697
2698                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2699
2700                 /*
2701                  * Configure RX buffer size.
2702                  */
2703                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2704                         RTE_PKTMBUF_HEADROOM);
2705                 if (buf_size >= 1024) {
2706                         /*
2707                          * Configure the BSIZEPACKET field of the SRRCTL
2708                          * register of the queue.
2709                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2710                          * If this field is equal to 0b, then RCTL.BSIZE
2711                          * determines the RX packet buffer size.
2712                          */
2713                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2714                                    E1000_SRRCTL_BSIZEPKT_MASK);
2715                         buf_size = (uint16_t) ((srrctl &
2716                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2717                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2718
2719                         /* It adds dual VLAN length for supporting dual VLAN */
2720                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2721                                                 2 * VLAN_TAG_SIZE) > buf_size){
2722                                 if (!dev->data->scattered_rx)
2723                                         PMD_INIT_LOG(DEBUG,
2724                                                      "forcing scatter mode");
2725                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2726                                 dev->data->scattered_rx = 1;
2727                         }
2728                 } else {
2729                         /*
2730                          * Use BSIZE field of the device RCTL register.
2731                          */
2732                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2733                                 rctl_bsize = buf_size;
2734                         if (!dev->data->scattered_rx)
2735                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2736                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2737                         dev->data->scattered_rx = 1;
2738                 }
2739
2740                 /* Set if packets are dropped when no descriptors available */
2741                 if (rxq->drop_en)
2742                         srrctl |= E1000_SRRCTL_DROP_EN;
2743
2744                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2745
2746                 /* Enable this RX queue. */
2747                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2748                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2749                 rxdctl &= 0xFFF00000;
2750                 rxdctl |= (rxq->pthresh & 0x1F);
2751                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2752                 if (hw->mac.type == e1000_vfadapt) {
2753                         /*
2754                          * Workaround of 82576 VF Erratum
2755                          * force set WTHRESH to 1
2756                          * to avoid Write-Back not triggered sometimes
2757                          */
2758                         rxdctl |= 0x10000;
2759                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2760                 }
2761                 else
2762                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2763                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2764         }
2765
2766         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2767                 if (!dev->data->scattered_rx)
2768                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2769                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2770                 dev->data->scattered_rx = 1;
2771         }
2772
2773         /*
2774          * Setup the HW Rx Head and Tail Descriptor Pointers.
2775          * This needs to be done after enable.
2776          */
2777         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2778                 rxq = dev->data->rx_queues[i];
2779                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2780                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2781         }
2782
2783         return 0;
2784 }
2785
2786 /*********************************************************************
2787  *
2788  *  Enable VF transmit unit.
2789  *
2790  **********************************************************************/
2791 void
2792 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2793 {
2794         struct e1000_hw     *hw;
2795         struct igb_tx_queue *txq;
2796         uint32_t txdctl;
2797         uint16_t i;
2798
2799         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2800
2801         /* Setup the Base and Length of the Tx Descriptor Rings. */
2802         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2803                 uint64_t bus_addr;
2804
2805                 txq = dev->data->tx_queues[i];
2806                 bus_addr = txq->tx_ring_phys_addr;
2807                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2808                                 txq->nb_tx_desc *
2809                                 sizeof(union e1000_adv_tx_desc));
2810                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2811                                 (uint32_t)(bus_addr >> 32));
2812                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2813
2814                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2815                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2816                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2817
2818                 /* Setup Transmit threshold registers. */
2819                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2820                 txdctl |= txq->pthresh & 0x1F;
2821                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2822                 if (hw->mac.type == e1000_82576) {
2823                         /*
2824                          * Workaround of 82576 VF Erratum
2825                          * force set WTHRESH to 1
2826                          * to avoid Write-Back not triggered sometimes
2827                          */
2828                         txdctl |= 0x10000;
2829                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2830                 }
2831                 else
2832                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2833                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2834                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2835         }
2836
2837 }
2838
2839 void
2840 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2841         struct rte_eth_rxq_info *qinfo)
2842 {
2843         struct igb_rx_queue *rxq;
2844
2845         rxq = dev->data->rx_queues[queue_id];
2846
2847         qinfo->mp = rxq->mb_pool;
2848         qinfo->scattered_rx = dev->data->scattered_rx;
2849         qinfo->nb_desc = rxq->nb_rx_desc;
2850
2851         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2852         qinfo->conf.rx_drop_en = rxq->drop_en;
2853         qinfo->conf.offloads = rxq->offloads;
2854 }
2855
2856 void
2857 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2858         struct rte_eth_txq_info *qinfo)
2859 {
2860         struct igb_tx_queue *txq;
2861
2862         txq = dev->data->tx_queues[queue_id];
2863
2864         qinfo->nb_desc = txq->nb_tx_desc;
2865
2866         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2867         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2868         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2869         qinfo->conf.offloads = txq->offloads;
2870 }
2871
2872 int
2873 igb_rss_conf_init(struct rte_eth_dev *dev,
2874                   struct igb_rte_flow_rss_conf *out,
2875                   const struct rte_flow_action_rss *in)
2876 {
2877         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2878
2879         if (in->key_len > RTE_DIM(out->key) ||
2880             ((hw->mac.type == e1000_82576) &&
2881              (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2882             ((hw->mac.type != e1000_82576) &&
2883              (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2884                 return -EINVAL;
2885         out->conf = (struct rte_flow_action_rss){
2886                 .func = in->func,
2887                 .level = in->level,
2888                 .types = in->types,
2889                 .key_len = in->key_len,
2890                 .queue_num = in->queue_num,
2891                 .key = memcpy(out->key, in->key, in->key_len),
2892                 .queue = memcpy(out->queue, in->queue,
2893                                 sizeof(*in->queue) * in->queue_num),
2894         };
2895         return 0;
2896 }
2897
2898 int
2899 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2900                     const struct rte_flow_action_rss *with)
2901 {
2902         return (comp->func == with->func &&
2903                 comp->level == with->level &&
2904                 comp->types == with->types &&
2905                 comp->key_len == with->key_len &&
2906                 comp->queue_num == with->queue_num &&
2907                 !memcmp(comp->key, with->key, with->key_len) &&
2908                 !memcmp(comp->queue, with->queue,
2909                         sizeof(*with->queue) * with->queue_num));
2910 }
2911
2912 int
2913 igb_config_rss_filter(struct rte_eth_dev *dev,
2914                 struct igb_rte_flow_rss_conf *conf, bool add)
2915 {
2916         uint32_t shift;
2917         uint16_t i, j;
2918         struct rte_eth_rss_conf rss_conf = {
2919                 .rss_key = conf->conf.key_len ?
2920                         (void *)(uintptr_t)conf->conf.key : NULL,
2921                 .rss_key_len = conf->conf.key_len,
2922                 .rss_hf = conf->conf.types,
2923         };
2924         struct e1000_filter_info *filter_info =
2925                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2926         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2927
2928         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2929
2930         if (!add) {
2931                 if (igb_action_rss_same(&filter_info->rss_info.conf,
2932                                         &conf->conf)) {
2933                         igb_rss_disable(dev);
2934                         memset(&filter_info->rss_info, 0,
2935                                 sizeof(struct igb_rte_flow_rss_conf));
2936                         return 0;
2937                 }
2938                 return -EINVAL;
2939         }
2940
2941         if (filter_info->rss_info.conf.queue_num)
2942                 return -EINVAL;
2943
2944         /* Fill in redirection table. */
2945         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2946         for (i = 0, j = 0; i < 128; i++, j++) {
2947                 union e1000_reta {
2948                         uint32_t dword;
2949                         uint8_t  bytes[4];
2950                 } reta;
2951                 uint8_t q_idx;
2952
2953                 if (j == conf->conf.queue_num)
2954                         j = 0;
2955                 q_idx = conf->conf.queue[j];
2956                 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2957                 if ((i & 3) == 3)
2958                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2959         }
2960
2961         /* Configure the RSS key and the RSS protocols used to compute
2962          * the RSS hash of input packets.
2963          */
2964         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2965                 igb_rss_disable(dev);
2966                 return 0;
2967         }
2968         if (rss_conf.rss_key == NULL)
2969                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2970         igb_hw_rss_hash_set(hw, &rss_conf);
2971
2972         if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))
2973                 return -EINVAL;
2974
2975         return 0;
2976 }