net: add rte prefix to ether defines
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_net.h>
40 #include <rte_string_fns.h>
41
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
45
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
48 #else
49 #define IGB_TX_IEEE1588_TMST 0
50 #endif
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK (                    \
53                 PKT_TX_OUTER_IPV6 |      \
54                 PKT_TX_OUTER_IPV4 |      \
55                 PKT_TX_IPV6 |            \
56                 PKT_TX_IPV4 |            \
57                 PKT_TX_VLAN_PKT |                \
58                 PKT_TX_IP_CKSUM |                \
59                 PKT_TX_L4_MASK |                 \
60                 PKT_TX_TCP_SEG |                 \
61                 IGB_TX_IEEE1588_TMST)
62
63 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
64                 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
65
66 /**
67  * Structure associated with each descriptor of the RX ring of a RX queue.
68  */
69 struct igb_rx_entry {
70         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
71 };
72
73 /**
74  * Structure associated with each descriptor of the TX ring of a TX queue.
75  */
76 struct igb_tx_entry {
77         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
78         uint16_t next_id; /**< Index of next descriptor in ring. */
79         uint16_t last_id; /**< Index of last scattered descriptor. */
80 };
81
82 /**
83  * rx queue flags
84  */
85 enum igb_rxq_flags {
86         IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
87 };
88
89 /**
90  * Structure associated with each RX queue.
91  */
92 struct igb_rx_queue {
93         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
94         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
95         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
96         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
97         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
98         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
99         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
100         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
101         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
102         uint16_t            rx_tail;    /**< current value of RDT register. */
103         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
104         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
105         uint16_t            queue_id;   /**< RX queue index. */
106         uint16_t            reg_idx;    /**< RX queue register index. */
107         uint16_t            port_id;    /**< Device port identifier. */
108         uint8_t             pthresh;    /**< Prefetch threshold register. */
109         uint8_t             hthresh;    /**< Host threshold register. */
110         uint8_t             wthresh;    /**< Write-back threshold register. */
111         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
112         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
113         uint32_t            flags;      /**< RX flags. */
114         uint64_t            offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
115 };
116
117 /**
118  * Hardware context number
119  */
120 enum igb_advctx_num {
121         IGB_CTX_0    = 0, /**< CTX0    */
122         IGB_CTX_1    = 1, /**< CTX1    */
123         IGB_CTX_NUM  = 2, /**< CTX_NUM */
124 };
125
126 /** Offload features */
127 union igb_tx_offload {
128         uint64_t data;
129         struct {
130                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
131                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
132                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
133                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
134                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
135
136                 /* uint64_t unused:8; */
137         };
138 };
139
140 /*
141  * Compare mask for igb_tx_offload.data,
142  * should be in sync with igb_tx_offload layout.
143  * */
144 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
145 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
146 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
147 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
148 /** Mac + IP + TCP + Mss mask. */
149 #define TX_TSO_CMP_MASK \
150         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
151
152 /**
153  * Strucutre to check if new context need be built
154  */
155 struct igb_advctx_info {
156         uint64_t flags;           /**< ol_flags related to context build. */
157         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
158         union igb_tx_offload tx_offload;
159         /** compare mask for tx offload. */
160         union igb_tx_offload tx_offload_mask;
161 };
162
163 /**
164  * Structure associated with each TX queue.
165  */
166 struct igb_tx_queue {
167         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
168         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
169         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
170         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
171         uint32_t               txd_type;      /**< Device-specific TXD type */
172         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
173         uint16_t               tx_tail; /**< Current value of TDT register. */
174         uint16_t               tx_head;
175         /**< Index of first used TX descriptor. */
176         uint16_t               queue_id; /**< TX queue index. */
177         uint16_t               reg_idx;  /**< TX queue register index. */
178         uint16_t               port_id;  /**< Device port identifier. */
179         uint8_t                pthresh;  /**< Prefetch threshold register. */
180         uint8_t                hthresh;  /**< Host threshold register. */
181         uint8_t                wthresh;  /**< Write-back threshold register. */
182         uint32_t               ctx_curr;
183         /**< Current used hardware descriptor. */
184         uint32_t               ctx_start;
185         /**< Start context position for transmit queue. */
186         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
187         /**< Hardware context history.*/
188         uint64_t               offloads; /**< offloads of DEV_TX_OFFLOAD_* */
189 };
190
191 #if 1
192 #define RTE_PMD_USE_PREFETCH
193 #endif
194
195 #ifdef RTE_PMD_USE_PREFETCH
196 #define rte_igb_prefetch(p)     rte_prefetch0(p)
197 #else
198 #define rte_igb_prefetch(p)     do {} while(0)
199 #endif
200
201 #ifdef RTE_PMD_PACKET_PREFETCH
202 #define rte_packet_prefetch(p) rte_prefetch1(p)
203 #else
204 #define rte_packet_prefetch(p)  do {} while(0)
205 #endif
206
207 /*
208  * Macro for VMDq feature for 1 GbE NIC.
209  */
210 #define E1000_VMOLR_SIZE                        (8)
211 #define IGB_TSO_MAX_HDRLEN                      (512)
212 #define IGB_TSO_MAX_MSS                         (9216)
213
214 /*********************************************************************
215  *
216  *  TX function
217  *
218  **********************************************************************/
219
220 /*
221  *There're some limitations in hardware for TCP segmentation offload. We
222  *should check whether the parameters are valid.
223  */
224 static inline uint64_t
225 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
226 {
227         if (!(ol_req & PKT_TX_TCP_SEG))
228                 return ol_req;
229         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
230                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
231                 ol_req &= ~PKT_TX_TCP_SEG;
232                 ol_req |= PKT_TX_TCP_CKSUM;
233         }
234         return ol_req;
235 }
236
237 /*
238  * Advanced context descriptor are almost same between igb/ixgbe
239  * This is a separate function, looking for optimization opportunity here
240  * Rework required to go with the pre-defined values.
241  */
242
243 static inline void
244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
246                 uint64_t ol_flags, union igb_tx_offload tx_offload)
247 {
248         uint32_t type_tucmd_mlhl;
249         uint32_t mss_l4len_idx;
250         uint32_t ctx_idx, ctx_curr;
251         uint32_t vlan_macip_lens;
252         union igb_tx_offload tx_offload_mask;
253
254         ctx_curr = txq->ctx_curr;
255         ctx_idx = ctx_curr + txq->ctx_start;
256
257         tx_offload_mask.data = 0;
258         type_tucmd_mlhl = 0;
259
260         /* Specify which HW CTX to upload. */
261         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
262
263         if (ol_flags & PKT_TX_VLAN_PKT)
264                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
265
266         /* check if TCP segmentation required for this packet */
267         if (ol_flags & PKT_TX_TCP_SEG) {
268                 /* implies IP cksum in IPv4 */
269                 if (ol_flags & PKT_TX_IP_CKSUM)
270                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
271                                 E1000_ADVTXD_TUCMD_L4T_TCP |
272                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
273                 else
274                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
275                                 E1000_ADVTXD_TUCMD_L4T_TCP |
276                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
277
278                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
279                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
280                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
281         } else { /* no TSO, check if hardware checksum is needed */
282                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
283                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
284
285                 if (ol_flags & PKT_TX_IP_CKSUM)
286                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
287
288                 switch (ol_flags & PKT_TX_L4_MASK) {
289                 case PKT_TX_UDP_CKSUM:
290                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
293                         break;
294                 case PKT_TX_TCP_CKSUM:
295                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
296                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
297                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
298                         break;
299                 case PKT_TX_SCTP_CKSUM:
300                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
301                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
302                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
303                         break;
304                 default:
305                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
306                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307                         break;
308                 }
309         }
310
311         txq->ctx_cache[ctx_curr].flags = ol_flags;
312         txq->ctx_cache[ctx_curr].tx_offload.data =
313                 tx_offload_mask.data & tx_offload.data;
314         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
315
316         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
317         vlan_macip_lens = (uint32_t)tx_offload.data;
318         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
319         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
320         ctx_txd->seqnum_seed = 0;
321 }
322
323 /*
324  * Check which hardware context can be used. Use the existing match
325  * or create a new context descriptor.
326  */
327 static inline uint32_t
328 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
329                 union igb_tx_offload tx_offload)
330 {
331         /* If match with the current context */
332         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
333                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
334                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
335                         return txq->ctx_curr;
336         }
337
338         /* If match with the second context */
339         txq->ctx_curr ^= 1;
340         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
341                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
342                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
343                         return txq->ctx_curr;
344         }
345
346         /* Mismatch, use the previous context */
347         return IGB_CTX_NUM;
348 }
349
350 static inline uint32_t
351 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
352 {
353         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
354         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
355         uint32_t tmp;
356
357         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
358         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
359         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
360         return tmp;
361 }
362
363 static inline uint32_t
364 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
365 {
366         uint32_t cmdtype;
367         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
368         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
369         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
370         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
371         return cmdtype;
372 }
373
374 uint16_t
375 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
376                uint16_t nb_pkts)
377 {
378         struct igb_tx_queue *txq;
379         struct igb_tx_entry *sw_ring;
380         struct igb_tx_entry *txe, *txn;
381         volatile union e1000_adv_tx_desc *txr;
382         volatile union e1000_adv_tx_desc *txd;
383         struct rte_mbuf     *tx_pkt;
384         struct rte_mbuf     *m_seg;
385         uint64_t buf_dma_addr;
386         uint32_t olinfo_status;
387         uint32_t cmd_type_len;
388         uint32_t pkt_len;
389         uint16_t slen;
390         uint64_t ol_flags;
391         uint16_t tx_end;
392         uint16_t tx_id;
393         uint16_t tx_last;
394         uint16_t nb_tx;
395         uint64_t tx_ol_req;
396         uint32_t new_ctx = 0;
397         uint32_t ctx = 0;
398         union igb_tx_offload tx_offload = {0};
399
400         txq = tx_queue;
401         sw_ring = txq->sw_ring;
402         txr     = txq->tx_ring;
403         tx_id   = txq->tx_tail;
404         txe = &sw_ring[tx_id];
405
406         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
407                 tx_pkt = *tx_pkts++;
408                 pkt_len = tx_pkt->pkt_len;
409
410                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
411
412                 /*
413                  * The number of descriptors that must be allocated for a
414                  * packet is the number of segments of that packet, plus 1
415                  * Context Descriptor for the VLAN Tag Identifier, if any.
416                  * Determine the last TX descriptor to allocate in the TX ring
417                  * for the packet, starting from the current position (tx_id)
418                  * in the ring.
419                  */
420                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
421
422                 ol_flags = tx_pkt->ol_flags;
423                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
424
425                 /* If a Context Descriptor need be built . */
426                 if (tx_ol_req) {
427                         tx_offload.l2_len = tx_pkt->l2_len;
428                         tx_offload.l3_len = tx_pkt->l3_len;
429                         tx_offload.l4_len = tx_pkt->l4_len;
430                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
431                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
432                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
433
434                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
435                         /* Only allocate context descriptor if required*/
436                         new_ctx = (ctx == IGB_CTX_NUM);
437                         ctx = txq->ctx_curr + txq->ctx_start;
438                         tx_last = (uint16_t) (tx_last + new_ctx);
439                 }
440                 if (tx_last >= txq->nb_tx_desc)
441                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
442
443                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
444                            " tx_first=%u tx_last=%u",
445                            (unsigned) txq->port_id,
446                            (unsigned) txq->queue_id,
447                            (unsigned) pkt_len,
448                            (unsigned) tx_id,
449                            (unsigned) tx_last);
450
451                 /*
452                  * Check if there are enough free descriptors in the TX ring
453                  * to transmit the next packet.
454                  * This operation is based on the two following rules:
455                  *
456                  *   1- Only check that the last needed TX descriptor can be
457                  *      allocated (by construction, if that descriptor is free,
458                  *      all intermediate ones are also free).
459                  *
460                  *      For this purpose, the index of the last TX descriptor
461                  *      used for a packet (the "last descriptor" of a packet)
462                  *      is recorded in the TX entries (the last one included)
463                  *      that are associated with all TX descriptors allocated
464                  *      for that packet.
465                  *
466                  *   2- Avoid to allocate the last free TX descriptor of the
467                  *      ring, in order to never set the TDT register with the
468                  *      same value stored in parallel by the NIC in the TDH
469                  *      register, which makes the TX engine of the NIC enter
470                  *      in a deadlock situation.
471                  *
472                  *      By extension, avoid to allocate a free descriptor that
473                  *      belongs to the last set of free descriptors allocated
474                  *      to the same packet previously transmitted.
475                  */
476
477                 /*
478                  * The "last descriptor" of the previously sent packet, if any,
479                  * which used the last descriptor to allocate.
480                  */
481                 tx_end = sw_ring[tx_last].last_id;
482
483                 /*
484                  * The next descriptor following that "last descriptor" in the
485                  * ring.
486                  */
487                 tx_end = sw_ring[tx_end].next_id;
488
489                 /*
490                  * The "last descriptor" associated with that next descriptor.
491                  */
492                 tx_end = sw_ring[tx_end].last_id;
493
494                 /*
495                  * Check that this descriptor is free.
496                  */
497                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
498                         if (nb_tx == 0)
499                                 return 0;
500                         goto end_of_tx;
501                 }
502
503                 /*
504                  * Set common flags of all TX Data Descriptors.
505                  *
506                  * The following bits must be set in all Data Descriptors:
507                  *   - E1000_ADVTXD_DTYP_DATA
508                  *   - E1000_ADVTXD_DCMD_DEXT
509                  *
510                  * The following bits must be set in the first Data Descriptor
511                  * and are ignored in the other ones:
512                  *   - E1000_ADVTXD_DCMD_IFCS
513                  *   - E1000_ADVTXD_MAC_1588
514                  *   - E1000_ADVTXD_DCMD_VLE
515                  *
516                  * The following bits must only be set in the last Data
517                  * Descriptor:
518                  *   - E1000_TXD_CMD_EOP
519                  *
520                  * The following bits can be set in any Data Descriptor, but
521                  * are only set in the last Data Descriptor:
522                  *   - E1000_TXD_CMD_RS
523                  */
524                 cmd_type_len = txq->txd_type |
525                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
526                 if (tx_ol_req & PKT_TX_TCP_SEG)
527                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
528                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
529 #if defined(RTE_LIBRTE_IEEE1588)
530                 if (ol_flags & PKT_TX_IEEE1588_TMST)
531                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
532 #endif
533                 if (tx_ol_req) {
534                         /* Setup TX Advanced context descriptor if required */
535                         if (new_ctx) {
536                                 volatile struct e1000_adv_tx_context_desc *
537                                     ctx_txd;
538
539                                 ctx_txd = (volatile struct
540                                     e1000_adv_tx_context_desc *)
541                                     &txr[tx_id];
542
543                                 txn = &sw_ring[txe->next_id];
544                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
545
546                                 if (txe->mbuf != NULL) {
547                                         rte_pktmbuf_free_seg(txe->mbuf);
548                                         txe->mbuf = NULL;
549                                 }
550
551                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
552
553                                 txe->last_id = tx_last;
554                                 tx_id = txe->next_id;
555                                 txe = txn;
556                         }
557
558                         /* Setup the TX Advanced Data Descriptor */
559                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
560                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
561                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
562                 }
563
564                 m_seg = tx_pkt;
565                 do {
566                         txn = &sw_ring[txe->next_id];
567                         txd = &txr[tx_id];
568
569                         if (txe->mbuf != NULL)
570                                 rte_pktmbuf_free_seg(txe->mbuf);
571                         txe->mbuf = m_seg;
572
573                         /*
574                          * Set up transmit descriptor.
575                          */
576                         slen = (uint16_t) m_seg->data_len;
577                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
578                         txd->read.buffer_addr =
579                                 rte_cpu_to_le_64(buf_dma_addr);
580                         txd->read.cmd_type_len =
581                                 rte_cpu_to_le_32(cmd_type_len | slen);
582                         txd->read.olinfo_status =
583                                 rte_cpu_to_le_32(olinfo_status);
584                         txe->last_id = tx_last;
585                         tx_id = txe->next_id;
586                         txe = txn;
587                         m_seg = m_seg->next;
588                 } while (m_seg != NULL);
589
590                 /*
591                  * The last packet data descriptor needs End Of Packet (EOP)
592                  * and Report Status (RS).
593                  */
594                 txd->read.cmd_type_len |=
595                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
596         }
597  end_of_tx:
598         rte_wmb();
599
600         /*
601          * Set the Transmit Descriptor Tail (TDT).
602          */
603         E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
604         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
605                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
606                    (unsigned) tx_id, (unsigned) nb_tx);
607         txq->tx_tail = tx_id;
608
609         return nb_tx;
610 }
611
612 /*********************************************************************
613  *
614  *  TX prep functions
615  *
616  **********************************************************************/
617 uint16_t
618 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
619                 uint16_t nb_pkts)
620 {
621         int i, ret;
622         struct rte_mbuf *m;
623
624         for (i = 0; i < nb_pkts; i++) {
625                 m = tx_pkts[i];
626
627                 /* Check some limitations for TSO in hardware */
628                 if (m->ol_flags & PKT_TX_TCP_SEG)
629                         if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
630                                         (m->l2_len + m->l3_len + m->l4_len >
631                                         IGB_TSO_MAX_HDRLEN)) {
632                                 rte_errno = -EINVAL;
633                                 return i;
634                         }
635
636                 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
637                         rte_errno = -ENOTSUP;
638                         return i;
639                 }
640
641 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
642                 ret = rte_validate_tx_offload(m);
643                 if (ret != 0) {
644                         rte_errno = ret;
645                         return i;
646                 }
647 #endif
648                 ret = rte_net_intel_cksum_prepare(m);
649                 if (ret != 0) {
650                         rte_errno = ret;
651                         return i;
652                 }
653         }
654
655         return i;
656 }
657
658 /*********************************************************************
659  *
660  *  RX functions
661  *
662  **********************************************************************/
663 #define IGB_PACKET_TYPE_IPV4              0X01
664 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
665 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
666 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
667 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
668 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
669 #define IGB_PACKET_TYPE_IPV6              0X04
670 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
671 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
672 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
673 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
674 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
675 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
676 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
677 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
678 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
679 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
680 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
681 #define IGB_PACKET_TYPE_MAX               0X80
682 #define IGB_PACKET_TYPE_MASK              0X7F
683 #define IGB_PACKET_TYPE_SHIFT             0X04
684 static inline uint32_t
685 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
686 {
687         static const uint32_t
688                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
689                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
690                         RTE_PTYPE_L3_IPV4,
691                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
692                         RTE_PTYPE_L3_IPV4_EXT,
693                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
694                         RTE_PTYPE_L3_IPV6,
695                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
696                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697                         RTE_PTYPE_INNER_L3_IPV6,
698                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
699                         RTE_PTYPE_L3_IPV6_EXT,
700                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
701                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702                         RTE_PTYPE_INNER_L3_IPV6_EXT,
703                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
705                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
706                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
707                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
708                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
709                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
710                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
711                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
712                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
713                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
714                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
715                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
716                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
717                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
718                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
719                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
720                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
721                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
722                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
723                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
724                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
725                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
726                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
727                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
728                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
729                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
730                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
731         };
732         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
733                 return RTE_PTYPE_UNKNOWN;
734
735         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
736
737         return ptype_table[pkt_info];
738 }
739
740 static inline uint64_t
741 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
742 {
743         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
744
745 #if defined(RTE_LIBRTE_IEEE1588)
746         static uint32_t ip_pkt_etqf_map[8] = {
747                 0, 0, 0, PKT_RX_IEEE1588_PTP,
748                 0, 0, 0, 0,
749         };
750
751         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
752         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
753
754         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
755         if (hw->mac.type == e1000_i210)
756                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
757         else
758                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
759 #else
760         RTE_SET_USED(rxq);
761 #endif
762
763         return pkt_flags;
764 }
765
766 static inline uint64_t
767 rx_desc_status_to_pkt_flags(uint32_t rx_status)
768 {
769         uint64_t pkt_flags;
770
771         /* Check if VLAN present */
772         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
773                 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
774
775 #if defined(RTE_LIBRTE_IEEE1588)
776         if (rx_status & E1000_RXD_STAT_TMST)
777                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
778 #endif
779         return pkt_flags;
780 }
781
782 static inline uint64_t
783 rx_desc_error_to_pkt_flags(uint32_t rx_status)
784 {
785         /*
786          * Bit 30: IPE, IPv4 checksum error
787          * Bit 29: L4I, L4I integrity error
788          */
789
790         static uint64_t error_to_pkt_flags_map[4] = {
791                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
792                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
793                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
794                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
795         };
796         return error_to_pkt_flags_map[(rx_status >>
797                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
798 }
799
800 uint16_t
801 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
802                uint16_t nb_pkts)
803 {
804         struct igb_rx_queue *rxq;
805         volatile union e1000_adv_rx_desc *rx_ring;
806         volatile union e1000_adv_rx_desc *rxdp;
807         struct igb_rx_entry *sw_ring;
808         struct igb_rx_entry *rxe;
809         struct rte_mbuf *rxm;
810         struct rte_mbuf *nmb;
811         union e1000_adv_rx_desc rxd;
812         uint64_t dma_addr;
813         uint32_t staterr;
814         uint32_t hlen_type_rss;
815         uint16_t pkt_len;
816         uint16_t rx_id;
817         uint16_t nb_rx;
818         uint16_t nb_hold;
819         uint64_t pkt_flags;
820
821         nb_rx = 0;
822         nb_hold = 0;
823         rxq = rx_queue;
824         rx_id = rxq->rx_tail;
825         rx_ring = rxq->rx_ring;
826         sw_ring = rxq->sw_ring;
827         while (nb_rx < nb_pkts) {
828                 /*
829                  * The order of operations here is important as the DD status
830                  * bit must not be read after any other descriptor fields.
831                  * rx_ring and rxdp are pointing to volatile data so the order
832                  * of accesses cannot be reordered by the compiler. If they were
833                  * not volatile, they could be reordered which could lead to
834                  * using invalid descriptor fields when read from rxd.
835                  */
836                 rxdp = &rx_ring[rx_id];
837                 staterr = rxdp->wb.upper.status_error;
838                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
839                         break;
840                 rxd = *rxdp;
841
842                 /*
843                  * End of packet.
844                  *
845                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
846                  * likely to be invalid and to be dropped by the various
847                  * validation checks performed by the network stack.
848                  *
849                  * Allocate a new mbuf to replenish the RX ring descriptor.
850                  * If the allocation fails:
851                  *    - arrange for that RX descriptor to be the first one
852                  *      being parsed the next time the receive function is
853                  *      invoked [on the same queue].
854                  *
855                  *    - Stop parsing the RX ring and return immediately.
856                  *
857                  * This policy do not drop the packet received in the RX
858                  * descriptor for which the allocation of a new mbuf failed.
859                  * Thus, it allows that packet to be later retrieved if
860                  * mbuf have been freed in the mean time.
861                  * As a side effect, holding RX descriptors instead of
862                  * systematically giving them back to the NIC may lead to
863                  * RX ring exhaustion situations.
864                  * However, the NIC can gracefully prevent such situations
865                  * to happen by sending specific "back-pressure" flow control
866                  * frames to its peer(s).
867                  */
868                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
869                            "staterr=0x%x pkt_len=%u",
870                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
871                            (unsigned) rx_id, (unsigned) staterr,
872                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
873
874                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
875                 if (nmb == NULL) {
876                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
877                                    "queue_id=%u", (unsigned) rxq->port_id,
878                                    (unsigned) rxq->queue_id);
879                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
880                         break;
881                 }
882
883                 nb_hold++;
884                 rxe = &sw_ring[rx_id];
885                 rx_id++;
886                 if (rx_id == rxq->nb_rx_desc)
887                         rx_id = 0;
888
889                 /* Prefetch next mbuf while processing current one. */
890                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
891
892                 /*
893                  * When next RX descriptor is on a cache-line boundary,
894                  * prefetch the next 4 RX descriptors and the next 8 pointers
895                  * to mbufs.
896                  */
897                 if ((rx_id & 0x3) == 0) {
898                         rte_igb_prefetch(&rx_ring[rx_id]);
899                         rte_igb_prefetch(&sw_ring[rx_id]);
900                 }
901
902                 rxm = rxe->mbuf;
903                 rxe->mbuf = nmb;
904                 dma_addr =
905                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
906                 rxdp->read.hdr_addr = 0;
907                 rxdp->read.pkt_addr = dma_addr;
908
909                 /*
910                  * Initialize the returned mbuf.
911                  * 1) setup generic mbuf fields:
912                  *    - number of segments,
913                  *    - next segment,
914                  *    - packet length,
915                  *    - RX port identifier.
916                  * 2) integrate hardware offload data, if any:
917                  *    - RSS flag & hash,
918                  *    - IP checksum flag,
919                  *    - VLAN TCI, if any,
920                  *    - error flags.
921                  */
922                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
923                                       rxq->crc_len);
924                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
925                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
926                 rxm->nb_segs = 1;
927                 rxm->next = NULL;
928                 rxm->pkt_len = pkt_len;
929                 rxm->data_len = pkt_len;
930                 rxm->port = rxq->port_id;
931
932                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
933                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
934
935                 /*
936                  * The vlan_tci field is only valid when PKT_RX_VLAN is
937                  * set in the pkt_flags field and must be in CPU byte order.
938                  */
939                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
940                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
941                         rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
942                 } else {
943                         rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
944                 }
945                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
946                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
947                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
948                 rxm->ol_flags = pkt_flags;
949                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
950                                                 lo_dword.hs_rss.pkt_info);
951
952                 /*
953                  * Store the mbuf address into the next entry of the array
954                  * of returned packets.
955                  */
956                 rx_pkts[nb_rx++] = rxm;
957         }
958         rxq->rx_tail = rx_id;
959
960         /*
961          * If the number of free RX descriptors is greater than the RX free
962          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
963          * register.
964          * Update the RDT with the value of the last processed RX descriptor
965          * minus 1, to guarantee that the RDT register is never equal to the
966          * RDH register, which creates a "full" ring situtation from the
967          * hardware point of view...
968          */
969         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
970         if (nb_hold > rxq->rx_free_thresh) {
971                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
972                            "nb_hold=%u nb_rx=%u",
973                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
974                            (unsigned) rx_id, (unsigned) nb_hold,
975                            (unsigned) nb_rx);
976                 rx_id = (uint16_t) ((rx_id == 0) ?
977                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
978                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
979                 nb_hold = 0;
980         }
981         rxq->nb_rx_hold = nb_hold;
982         return nb_rx;
983 }
984
985 uint16_t
986 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
987                          uint16_t nb_pkts)
988 {
989         struct igb_rx_queue *rxq;
990         volatile union e1000_adv_rx_desc *rx_ring;
991         volatile union e1000_adv_rx_desc *rxdp;
992         struct igb_rx_entry *sw_ring;
993         struct igb_rx_entry *rxe;
994         struct rte_mbuf *first_seg;
995         struct rte_mbuf *last_seg;
996         struct rte_mbuf *rxm;
997         struct rte_mbuf *nmb;
998         union e1000_adv_rx_desc rxd;
999         uint64_t dma; /* Physical address of mbuf data buffer */
1000         uint32_t staterr;
1001         uint32_t hlen_type_rss;
1002         uint16_t rx_id;
1003         uint16_t nb_rx;
1004         uint16_t nb_hold;
1005         uint16_t data_len;
1006         uint64_t pkt_flags;
1007
1008         nb_rx = 0;
1009         nb_hold = 0;
1010         rxq = rx_queue;
1011         rx_id = rxq->rx_tail;
1012         rx_ring = rxq->rx_ring;
1013         sw_ring = rxq->sw_ring;
1014
1015         /*
1016          * Retrieve RX context of current packet, if any.
1017          */
1018         first_seg = rxq->pkt_first_seg;
1019         last_seg = rxq->pkt_last_seg;
1020
1021         while (nb_rx < nb_pkts) {
1022         next_desc:
1023                 /*
1024                  * The order of operations here is important as the DD status
1025                  * bit must not be read after any other descriptor fields.
1026                  * rx_ring and rxdp are pointing to volatile data so the order
1027                  * of accesses cannot be reordered by the compiler. If they were
1028                  * not volatile, they could be reordered which could lead to
1029                  * using invalid descriptor fields when read from rxd.
1030                  */
1031                 rxdp = &rx_ring[rx_id];
1032                 staterr = rxdp->wb.upper.status_error;
1033                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1034                         break;
1035                 rxd = *rxdp;
1036
1037                 /*
1038                  * Descriptor done.
1039                  *
1040                  * Allocate a new mbuf to replenish the RX ring descriptor.
1041                  * If the allocation fails:
1042                  *    - arrange for that RX descriptor to be the first one
1043                  *      being parsed the next time the receive function is
1044                  *      invoked [on the same queue].
1045                  *
1046                  *    - Stop parsing the RX ring and return immediately.
1047                  *
1048                  * This policy does not drop the packet received in the RX
1049                  * descriptor for which the allocation of a new mbuf failed.
1050                  * Thus, it allows that packet to be later retrieved if
1051                  * mbuf have been freed in the mean time.
1052                  * As a side effect, holding RX descriptors instead of
1053                  * systematically giving them back to the NIC may lead to
1054                  * RX ring exhaustion situations.
1055                  * However, the NIC can gracefully prevent such situations
1056                  * to happen by sending specific "back-pressure" flow control
1057                  * frames to its peer(s).
1058                  */
1059                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1060                            "staterr=0x%x data_len=%u",
1061                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1062                            (unsigned) rx_id, (unsigned) staterr,
1063                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1064
1065                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1066                 if (nmb == NULL) {
1067                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1068                                    "queue_id=%u", (unsigned) rxq->port_id,
1069                                    (unsigned) rxq->queue_id);
1070                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1071                         break;
1072                 }
1073
1074                 nb_hold++;
1075                 rxe = &sw_ring[rx_id];
1076                 rx_id++;
1077                 if (rx_id == rxq->nb_rx_desc)
1078                         rx_id = 0;
1079
1080                 /* Prefetch next mbuf while processing current one. */
1081                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1082
1083                 /*
1084                  * When next RX descriptor is on a cache-line boundary,
1085                  * prefetch the next 4 RX descriptors and the next 8 pointers
1086                  * to mbufs.
1087                  */
1088                 if ((rx_id & 0x3) == 0) {
1089                         rte_igb_prefetch(&rx_ring[rx_id]);
1090                         rte_igb_prefetch(&sw_ring[rx_id]);
1091                 }
1092
1093                 /*
1094                  * Update RX descriptor with the physical address of the new
1095                  * data buffer of the new allocated mbuf.
1096                  */
1097                 rxm = rxe->mbuf;
1098                 rxe->mbuf = nmb;
1099                 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1100                 rxdp->read.pkt_addr = dma;
1101                 rxdp->read.hdr_addr = 0;
1102
1103                 /*
1104                  * Set data length & data buffer address of mbuf.
1105                  */
1106                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1107                 rxm->data_len = data_len;
1108                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1109
1110                 /*
1111                  * If this is the first buffer of the received packet,
1112                  * set the pointer to the first mbuf of the packet and
1113                  * initialize its context.
1114                  * Otherwise, update the total length and the number of segments
1115                  * of the current scattered packet, and update the pointer to
1116                  * the last mbuf of the current packet.
1117                  */
1118                 if (first_seg == NULL) {
1119                         first_seg = rxm;
1120                         first_seg->pkt_len = data_len;
1121                         first_seg->nb_segs = 1;
1122                 } else {
1123                         first_seg->pkt_len += data_len;
1124                         first_seg->nb_segs++;
1125                         last_seg->next = rxm;
1126                 }
1127
1128                 /*
1129                  * If this is not the last buffer of the received packet,
1130                  * update the pointer to the last mbuf of the current scattered
1131                  * packet and continue to parse the RX ring.
1132                  */
1133                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1134                         last_seg = rxm;
1135                         goto next_desc;
1136                 }
1137
1138                 /*
1139                  * This is the last buffer of the received packet.
1140                  * If the CRC is not stripped by the hardware:
1141                  *   - Subtract the CRC length from the total packet length.
1142                  *   - If the last buffer only contains the whole CRC or a part
1143                  *     of it, free the mbuf associated to the last buffer.
1144                  *     If part of the CRC is also contained in the previous
1145                  *     mbuf, subtract the length of that CRC part from the
1146                  *     data length of the previous mbuf.
1147                  */
1148                 rxm->next = NULL;
1149                 if (unlikely(rxq->crc_len > 0)) {
1150                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1151                         if (data_len <= RTE_ETHER_CRC_LEN) {
1152                                 rte_pktmbuf_free_seg(rxm);
1153                                 first_seg->nb_segs--;
1154                                 last_seg->data_len = (uint16_t)
1155                                         (last_seg->data_len -
1156                                          (RTE_ETHER_CRC_LEN - data_len));
1157                                 last_seg->next = NULL;
1158                         } else
1159                                 rxm->data_len = (uint16_t)
1160                                         (data_len - RTE_ETHER_CRC_LEN);
1161                 }
1162
1163                 /*
1164                  * Initialize the first mbuf of the returned packet:
1165                  *    - RX port identifier,
1166                  *    - hardware offload data, if any:
1167                  *      - RSS flag & hash,
1168                  *      - IP checksum flag,
1169                  *      - VLAN TCI, if any,
1170                  *      - error flags.
1171                  */
1172                 first_seg->port = rxq->port_id;
1173                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1174
1175                 /*
1176                  * The vlan_tci field is only valid when PKT_RX_VLAN is
1177                  * set in the pkt_flags field and must be in CPU byte order.
1178                  */
1179                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1180                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1181                         first_seg->vlan_tci =
1182                                 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1183                 } else {
1184                         first_seg->vlan_tci =
1185                                 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1186                 }
1187                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1188                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1189                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1190                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1191                 first_seg->ol_flags = pkt_flags;
1192                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1193                                         lower.lo_dword.hs_rss.pkt_info);
1194
1195                 /* Prefetch data of first segment, if configured to do so. */
1196                 rte_packet_prefetch((char *)first_seg->buf_addr +
1197                         first_seg->data_off);
1198
1199                 /*
1200                  * Store the mbuf address into the next entry of the array
1201                  * of returned packets.
1202                  */
1203                 rx_pkts[nb_rx++] = first_seg;
1204
1205                 /*
1206                  * Setup receipt context for a new packet.
1207                  */
1208                 first_seg = NULL;
1209         }
1210
1211         /*
1212          * Record index of the next RX descriptor to probe.
1213          */
1214         rxq->rx_tail = rx_id;
1215
1216         /*
1217          * Save receive context.
1218          */
1219         rxq->pkt_first_seg = first_seg;
1220         rxq->pkt_last_seg = last_seg;
1221
1222         /*
1223          * If the number of free RX descriptors is greater than the RX free
1224          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1225          * register.
1226          * Update the RDT with the value of the last processed RX descriptor
1227          * minus 1, to guarantee that the RDT register is never equal to the
1228          * RDH register, which creates a "full" ring situtation from the
1229          * hardware point of view...
1230          */
1231         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1232         if (nb_hold > rxq->rx_free_thresh) {
1233                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1234                            "nb_hold=%u nb_rx=%u",
1235                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1236                            (unsigned) rx_id, (unsigned) nb_hold,
1237                            (unsigned) nb_rx);
1238                 rx_id = (uint16_t) ((rx_id == 0) ?
1239                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1240                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1241                 nb_hold = 0;
1242         }
1243         rxq->nb_rx_hold = nb_hold;
1244         return nb_rx;
1245 }
1246
1247 /*
1248  * Maximum number of Ring Descriptors.
1249  *
1250  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1251  * desscriptors should meet the following condition:
1252  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1253  */
1254
1255 static void
1256 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1257 {
1258         unsigned i;
1259
1260         if (txq->sw_ring != NULL) {
1261                 for (i = 0; i < txq->nb_tx_desc; i++) {
1262                         if (txq->sw_ring[i].mbuf != NULL) {
1263                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1264                                 txq->sw_ring[i].mbuf = NULL;
1265                         }
1266                 }
1267         }
1268 }
1269
1270 static void
1271 igb_tx_queue_release(struct igb_tx_queue *txq)
1272 {
1273         if (txq != NULL) {
1274                 igb_tx_queue_release_mbufs(txq);
1275                 rte_free(txq->sw_ring);
1276                 rte_free(txq);
1277         }
1278 }
1279
1280 void
1281 eth_igb_tx_queue_release(void *txq)
1282 {
1283         igb_tx_queue_release(txq);
1284 }
1285
1286 static int
1287 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1288 {
1289         struct igb_tx_entry *sw_ring;
1290         volatile union e1000_adv_tx_desc *txr;
1291         uint16_t tx_first; /* First segment analyzed. */
1292         uint16_t tx_id;    /* Current segment being processed. */
1293         uint16_t tx_last;  /* Last segment in the current packet. */
1294         uint16_t tx_next;  /* First segment of the next packet. */
1295         int count;
1296
1297         if (txq != NULL) {
1298                 count = 0;
1299                 sw_ring = txq->sw_ring;
1300                 txr = txq->tx_ring;
1301
1302                 /*
1303                  * tx_tail is the last sent packet on the sw_ring. Goto the end
1304                  * of that packet (the last segment in the packet chain) and
1305                  * then the next segment will be the start of the oldest segment
1306                  * in the sw_ring. This is the first packet that will be
1307                  * attempted to be freed.
1308                  */
1309
1310                 /* Get last segment in most recently added packet. */
1311                 tx_first = sw_ring[txq->tx_tail].last_id;
1312
1313                 /* Get the next segment, which is the oldest segment in ring. */
1314                 tx_first = sw_ring[tx_first].next_id;
1315
1316                 /* Set the current index to the first. */
1317                 tx_id = tx_first;
1318
1319                 /*
1320                  * Loop through each packet. For each packet, verify that an
1321                  * mbuf exists and that the last segment is free. If so, free
1322                  * it and move on.
1323                  */
1324                 while (1) {
1325                         tx_last = sw_ring[tx_id].last_id;
1326
1327                         if (sw_ring[tx_last].mbuf) {
1328                                 if (txr[tx_last].wb.status &
1329                                                 E1000_TXD_STAT_DD) {
1330                                         /*
1331                                          * Increment the number of packets
1332                                          * freed.
1333                                          */
1334                                         count++;
1335
1336                                         /* Get the start of the next packet. */
1337                                         tx_next = sw_ring[tx_last].next_id;
1338
1339                                         /*
1340                                          * Loop through all segments in a
1341                                          * packet.
1342                                          */
1343                                         do {
1344                                                 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1345                                                 sw_ring[tx_id].mbuf = NULL;
1346                                                 sw_ring[tx_id].last_id = tx_id;
1347
1348                                                 /* Move to next segemnt. */
1349                                                 tx_id = sw_ring[tx_id].next_id;
1350
1351                                         } while (tx_id != tx_next);
1352
1353                                         if (unlikely(count == (int)free_cnt))
1354                                                 break;
1355                                 } else
1356                                         /*
1357                                          * mbuf still in use, nothing left to
1358                                          * free.
1359                                          */
1360                                         break;
1361                         } else {
1362                                 /*
1363                                  * There are multiple reasons to be here:
1364                                  * 1) All the packets on the ring have been
1365                                  *    freed - tx_id is equal to tx_first
1366                                  *    and some packets have been freed.
1367                                  *    - Done, exit
1368                                  * 2) Interfaces has not sent a rings worth of
1369                                  *    packets yet, so the segment after tail is
1370                                  *    still empty. Or a previous call to this
1371                                  *    function freed some of the segments but
1372                                  *    not all so there is a hole in the list.
1373                                  *    Hopefully this is a rare case.
1374                                  *    - Walk the list and find the next mbuf. If
1375                                  *      there isn't one, then done.
1376                                  */
1377                                 if (likely((tx_id == tx_first) && (count != 0)))
1378                                         break;
1379
1380                                 /*
1381                                  * Walk the list and find the next mbuf, if any.
1382                                  */
1383                                 do {
1384                                         /* Move to next segemnt. */
1385                                         tx_id = sw_ring[tx_id].next_id;
1386
1387                                         if (sw_ring[tx_id].mbuf)
1388                                                 break;
1389
1390                                 } while (tx_id != tx_first);
1391
1392                                 /*
1393                                  * Determine why previous loop bailed. If there
1394                                  * is not an mbuf, done.
1395                                  */
1396                                 if (sw_ring[tx_id].mbuf == NULL)
1397                                         break;
1398                         }
1399                 }
1400         } else
1401                 count = -ENODEV;
1402
1403         return count;
1404 }
1405
1406 int
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1408 {
1409         return igb_tx_done_cleanup(txq, free_cnt);
1410 }
1411
1412 static void
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1414 {
1415         txq->tx_head = 0;
1416         txq->tx_tail = 0;
1417         txq->ctx_curr = 0;
1418         memset((void*)&txq->ctx_cache, 0,
1419                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1420 }
1421
1422 static void
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1424 {
1425         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426         struct igb_tx_entry *txe = txq->sw_ring;
1427         uint16_t i, prev;
1428         struct e1000_hw *hw;
1429
1430         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431         /* Zero out HW ring memory */
1432         for (i = 0; i < txq->nb_tx_desc; i++) {
1433                 txq->tx_ring[i] = zeroed_desc;
1434         }
1435
1436         /* Initialize ring entries */
1437         prev = (uint16_t)(txq->nb_tx_desc - 1);
1438         for (i = 0; i < txq->nb_tx_desc; i++) {
1439                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1440
1441                 txd->wb.status = E1000_TXD_STAT_DD;
1442                 txe[i].mbuf = NULL;
1443                 txe[i].last_id = i;
1444                 txe[prev].next_id = i;
1445                 prev = i;
1446         }
1447
1448         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449         /* 82575 specific, each tx queue will use 2 hw contexts */
1450         if (hw->mac.type == e1000_82575)
1451                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1452
1453         igb_reset_tx_queue_stat(txq);
1454 }
1455
1456 uint64_t
1457 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1458 {
1459         uint64_t tx_offload_capa;
1460
1461         RTE_SET_USED(dev);
1462         tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1463                           DEV_TX_OFFLOAD_IPV4_CKSUM  |
1464                           DEV_TX_OFFLOAD_UDP_CKSUM   |
1465                           DEV_TX_OFFLOAD_TCP_CKSUM   |
1466                           DEV_TX_OFFLOAD_SCTP_CKSUM  |
1467                           DEV_TX_OFFLOAD_TCP_TSO     |
1468                           DEV_TX_OFFLOAD_MULTI_SEGS;
1469
1470         return tx_offload_capa;
1471 }
1472
1473 uint64_t
1474 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1475 {
1476         uint64_t tx_queue_offload_capa;
1477
1478         tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1479
1480         return tx_queue_offload_capa;
1481 }
1482
1483 int
1484 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1485                          uint16_t queue_idx,
1486                          uint16_t nb_desc,
1487                          unsigned int socket_id,
1488                          const struct rte_eth_txconf *tx_conf)
1489 {
1490         const struct rte_memzone *tz;
1491         struct igb_tx_queue *txq;
1492         struct e1000_hw     *hw;
1493         uint32_t size;
1494         uint64_t offloads;
1495
1496         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1497
1498         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499
1500         /*
1501          * Validate number of transmit descriptors.
1502          * It must not exceed hardware maximum, and must be multiple
1503          * of E1000_ALIGN.
1504          */
1505         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1506                         (nb_desc > E1000_MAX_RING_DESC) ||
1507                         (nb_desc < E1000_MIN_RING_DESC)) {
1508                 return -EINVAL;
1509         }
1510
1511         /*
1512          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1513          * driver.
1514          */
1515         if (tx_conf->tx_free_thresh != 0)
1516                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1517                              "used for the 1G driver.");
1518         if (tx_conf->tx_rs_thresh != 0)
1519                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1520                              "used for the 1G driver.");
1521         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1522                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1523                              "consider setting the TX WTHRESH value to 4, 8, "
1524                              "or 16.");
1525
1526         /* Free memory prior to re-allocation if needed */
1527         if (dev->data->tx_queues[queue_idx] != NULL) {
1528                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1529                 dev->data->tx_queues[queue_idx] = NULL;
1530         }
1531
1532         /* First allocate the tx queue data structure */
1533         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1534                                                         RTE_CACHE_LINE_SIZE);
1535         if (txq == NULL)
1536                 return -ENOMEM;
1537
1538         /*
1539          * Allocate TX ring hardware descriptors. A memzone large enough to
1540          * handle the maximum ring size is allocated in order to allow for
1541          * resizing in later calls to the queue setup function.
1542          */
1543         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1544         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1545                                       E1000_ALIGN, socket_id);
1546         if (tz == NULL) {
1547                 igb_tx_queue_release(txq);
1548                 return -ENOMEM;
1549         }
1550
1551         txq->nb_tx_desc = nb_desc;
1552         txq->pthresh = tx_conf->tx_thresh.pthresh;
1553         txq->hthresh = tx_conf->tx_thresh.hthresh;
1554         txq->wthresh = tx_conf->tx_thresh.wthresh;
1555         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1556                 txq->wthresh = 1;
1557         txq->queue_id = queue_idx;
1558         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1559                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1560         txq->port_id = dev->data->port_id;
1561
1562         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1563         txq->tx_ring_phys_addr = tz->iova;
1564
1565         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1566         /* Allocate software ring */
1567         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1568                                    sizeof(struct igb_tx_entry) * nb_desc,
1569                                    RTE_CACHE_LINE_SIZE);
1570         if (txq->sw_ring == NULL) {
1571                 igb_tx_queue_release(txq);
1572                 return -ENOMEM;
1573         }
1574         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1575                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1576
1577         igb_reset_tx_queue(txq, dev);
1578         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1579         dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1580         dev->data->tx_queues[queue_idx] = txq;
1581         txq->offloads = offloads;
1582
1583         return 0;
1584 }
1585
1586 static void
1587 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1588 {
1589         unsigned i;
1590
1591         if (rxq->sw_ring != NULL) {
1592                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1593                         if (rxq->sw_ring[i].mbuf != NULL) {
1594                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1595                                 rxq->sw_ring[i].mbuf = NULL;
1596                         }
1597                 }
1598         }
1599 }
1600
1601 static void
1602 igb_rx_queue_release(struct igb_rx_queue *rxq)
1603 {
1604         if (rxq != NULL) {
1605                 igb_rx_queue_release_mbufs(rxq);
1606                 rte_free(rxq->sw_ring);
1607                 rte_free(rxq);
1608         }
1609 }
1610
1611 void
1612 eth_igb_rx_queue_release(void *rxq)
1613 {
1614         igb_rx_queue_release(rxq);
1615 }
1616
1617 static void
1618 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1619 {
1620         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1621         unsigned i;
1622
1623         /* Zero out HW ring memory */
1624         for (i = 0; i < rxq->nb_rx_desc; i++) {
1625                 rxq->rx_ring[i] = zeroed_desc;
1626         }
1627
1628         rxq->rx_tail = 0;
1629         rxq->pkt_first_seg = NULL;
1630         rxq->pkt_last_seg = NULL;
1631 }
1632
1633 uint64_t
1634 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1635 {
1636         uint64_t rx_offload_capa;
1637
1638         RTE_SET_USED(dev);
1639         rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
1640                           DEV_RX_OFFLOAD_VLAN_FILTER |
1641                           DEV_RX_OFFLOAD_IPV4_CKSUM  |
1642                           DEV_RX_OFFLOAD_UDP_CKSUM   |
1643                           DEV_RX_OFFLOAD_TCP_CKSUM   |
1644                           DEV_RX_OFFLOAD_JUMBO_FRAME |
1645                           DEV_RX_OFFLOAD_KEEP_CRC    |
1646                           DEV_RX_OFFLOAD_SCATTER;
1647
1648         return rx_offload_capa;
1649 }
1650
1651 uint64_t
1652 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1653 {
1654         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655         uint64_t rx_queue_offload_capa;
1656
1657         switch (hw->mac.type) {
1658         case e1000_vfadapt_i350:
1659                 /*
1660                  * As only one Rx queue can be used, let per queue offloading
1661                  * capability be same to per port queue offloading capability
1662                  * for better convenience.
1663                  */
1664                 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1665                 break;
1666         default:
1667                 rx_queue_offload_capa = 0;
1668         }
1669         return rx_queue_offload_capa;
1670 }
1671
1672 int
1673 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1674                          uint16_t queue_idx,
1675                          uint16_t nb_desc,
1676                          unsigned int socket_id,
1677                          const struct rte_eth_rxconf *rx_conf,
1678                          struct rte_mempool *mp)
1679 {
1680         const struct rte_memzone *rz;
1681         struct igb_rx_queue *rxq;
1682         struct e1000_hw     *hw;
1683         unsigned int size;
1684         uint64_t offloads;
1685
1686         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1687
1688         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1689
1690         /*
1691          * Validate number of receive descriptors.
1692          * It must not exceed hardware maximum, and must be multiple
1693          * of E1000_ALIGN.
1694          */
1695         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1696                         (nb_desc > E1000_MAX_RING_DESC) ||
1697                         (nb_desc < E1000_MIN_RING_DESC)) {
1698                 return -EINVAL;
1699         }
1700
1701         /* Free memory prior to re-allocation if needed */
1702         if (dev->data->rx_queues[queue_idx] != NULL) {
1703                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1704                 dev->data->rx_queues[queue_idx] = NULL;
1705         }
1706
1707         /* First allocate the RX queue data structure. */
1708         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1709                           RTE_CACHE_LINE_SIZE);
1710         if (rxq == NULL)
1711                 return -ENOMEM;
1712         rxq->offloads = offloads;
1713         rxq->mb_pool = mp;
1714         rxq->nb_rx_desc = nb_desc;
1715         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1716         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1717         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1718         if (rxq->wthresh > 0 &&
1719             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1720                 rxq->wthresh = 1;
1721         rxq->drop_en = rx_conf->rx_drop_en;
1722         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1723         rxq->queue_id = queue_idx;
1724         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1725                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1726         rxq->port_id = dev->data->port_id;
1727         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1728                 rxq->crc_len = RTE_ETHER_CRC_LEN;
1729         else
1730                 rxq->crc_len = 0;
1731
1732         /*
1733          *  Allocate RX ring hardware descriptors. A memzone large enough to
1734          *  handle the maximum ring size is allocated in order to allow for
1735          *  resizing in later calls to the queue setup function.
1736          */
1737         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1738         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1739                                       E1000_ALIGN, socket_id);
1740         if (rz == NULL) {
1741                 igb_rx_queue_release(rxq);
1742                 return -ENOMEM;
1743         }
1744         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1745         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1746         rxq->rx_ring_phys_addr = rz->iova;
1747         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1748
1749         /* Allocate software ring. */
1750         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1751                                    sizeof(struct igb_rx_entry) * nb_desc,
1752                                    RTE_CACHE_LINE_SIZE);
1753         if (rxq->sw_ring == NULL) {
1754                 igb_rx_queue_release(rxq);
1755                 return -ENOMEM;
1756         }
1757         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1758                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1759
1760         dev->data->rx_queues[queue_idx] = rxq;
1761         igb_reset_rx_queue(rxq);
1762
1763         return 0;
1764 }
1765
1766 uint32_t
1767 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1768 {
1769 #define IGB_RXQ_SCAN_INTERVAL 4
1770         volatile union e1000_adv_rx_desc *rxdp;
1771         struct igb_rx_queue *rxq;
1772         uint32_t desc = 0;
1773
1774         rxq = dev->data->rx_queues[rx_queue_id];
1775         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1776
1777         while ((desc < rxq->nb_rx_desc) &&
1778                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1779                 desc += IGB_RXQ_SCAN_INTERVAL;
1780                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1781                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1782                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1783                                 desc - rxq->nb_rx_desc]);
1784         }
1785
1786         return desc;
1787 }
1788
1789 int
1790 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1791 {
1792         volatile union e1000_adv_rx_desc *rxdp;
1793         struct igb_rx_queue *rxq = rx_queue;
1794         uint32_t desc;
1795
1796         if (unlikely(offset >= rxq->nb_rx_desc))
1797                 return 0;
1798         desc = rxq->rx_tail + offset;
1799         if (desc >= rxq->nb_rx_desc)
1800                 desc -= rxq->nb_rx_desc;
1801
1802         rxdp = &rxq->rx_ring[desc];
1803         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1804 }
1805
1806 int
1807 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1808 {
1809         struct igb_rx_queue *rxq = rx_queue;
1810         volatile uint32_t *status;
1811         uint32_t desc;
1812
1813         if (unlikely(offset >= rxq->nb_rx_desc))
1814                 return -EINVAL;
1815
1816         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1817                 return RTE_ETH_RX_DESC_UNAVAIL;
1818
1819         desc = rxq->rx_tail + offset;
1820         if (desc >= rxq->nb_rx_desc)
1821                 desc -= rxq->nb_rx_desc;
1822
1823         status = &rxq->rx_ring[desc].wb.upper.status_error;
1824         if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1825                 return RTE_ETH_RX_DESC_DONE;
1826
1827         return RTE_ETH_RX_DESC_AVAIL;
1828 }
1829
1830 int
1831 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1832 {
1833         struct igb_tx_queue *txq = tx_queue;
1834         volatile uint32_t *status;
1835         uint32_t desc;
1836
1837         if (unlikely(offset >= txq->nb_tx_desc))
1838                 return -EINVAL;
1839
1840         desc = txq->tx_tail + offset;
1841         if (desc >= txq->nb_tx_desc)
1842                 desc -= txq->nb_tx_desc;
1843
1844         status = &txq->tx_ring[desc].wb.status;
1845         if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1846                 return RTE_ETH_TX_DESC_DONE;
1847
1848         return RTE_ETH_TX_DESC_FULL;
1849 }
1850
1851 void
1852 igb_dev_clear_queues(struct rte_eth_dev *dev)
1853 {
1854         uint16_t i;
1855         struct igb_tx_queue *txq;
1856         struct igb_rx_queue *rxq;
1857
1858         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1859                 txq = dev->data->tx_queues[i];
1860                 if (txq != NULL) {
1861                         igb_tx_queue_release_mbufs(txq);
1862                         igb_reset_tx_queue(txq, dev);
1863                 }
1864         }
1865
1866         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1867                 rxq = dev->data->rx_queues[i];
1868                 if (rxq != NULL) {
1869                         igb_rx_queue_release_mbufs(rxq);
1870                         igb_reset_rx_queue(rxq);
1871                 }
1872         }
1873 }
1874
1875 void
1876 igb_dev_free_queues(struct rte_eth_dev *dev)
1877 {
1878         uint16_t i;
1879
1880         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1881                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1882                 dev->data->rx_queues[i] = NULL;
1883         }
1884         dev->data->nb_rx_queues = 0;
1885
1886         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1887                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1888                 dev->data->tx_queues[i] = NULL;
1889         }
1890         dev->data->nb_tx_queues = 0;
1891 }
1892
1893 /**
1894  * Receive Side Scaling (RSS).
1895  * See section 7.1.1.7 in the following document:
1896  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1897  *
1898  * Principles:
1899  * The source and destination IP addresses of the IP header and the source and
1900  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1901  * against a configurable random key to compute a 32-bit RSS hash result.
1902  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1903  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1904  * RSS output index which is used as the RX queue index where to store the
1905  * received packets.
1906  * The following output is supplied in the RX write-back descriptor:
1907  *     - 32-bit result of the Microsoft RSS hash function,
1908  *     - 4-bit RSS type field.
1909  */
1910
1911 /*
1912  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1913  * Used as the default key.
1914  */
1915 static uint8_t rss_intel_key[40] = {
1916         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1917         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1918         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1919         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1920         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1921 };
1922
1923 static void
1924 igb_rss_disable(struct rte_eth_dev *dev)
1925 {
1926         struct e1000_hw *hw;
1927         uint32_t mrqc;
1928
1929         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1930         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1931         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1932         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1933 }
1934
1935 static void
1936 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1937 {
1938         uint8_t  *hash_key;
1939         uint32_t rss_key;
1940         uint32_t mrqc;
1941         uint64_t rss_hf;
1942         uint16_t i;
1943
1944         hash_key = rss_conf->rss_key;
1945         if (hash_key != NULL) {
1946                 /* Fill in RSS hash key */
1947                 for (i = 0; i < 10; i++) {
1948                         rss_key  = hash_key[(i * 4)];
1949                         rss_key |= hash_key[(i * 4) + 1] << 8;
1950                         rss_key |= hash_key[(i * 4) + 2] << 16;
1951                         rss_key |= hash_key[(i * 4) + 3] << 24;
1952                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1953                 }
1954         }
1955
1956         /* Set configured hashing protocols in MRQC register */
1957         rss_hf = rss_conf->rss_hf;
1958         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1959         if (rss_hf & ETH_RSS_IPV4)
1960                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1961         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1962                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1963         if (rss_hf & ETH_RSS_IPV6)
1964                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1965         if (rss_hf & ETH_RSS_IPV6_EX)
1966                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1967         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1968                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1969         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1970                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1971         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1972                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1973         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1974                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1975         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1976                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1977         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1978 }
1979
1980 int
1981 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1982                         struct rte_eth_rss_conf *rss_conf)
1983 {
1984         struct e1000_hw *hw;
1985         uint32_t mrqc;
1986         uint64_t rss_hf;
1987
1988         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1989
1990         /*
1991          * Before changing anything, first check that the update RSS operation
1992          * does not attempt to disable RSS, if RSS was enabled at
1993          * initialization time, or does not attempt to enable RSS, if RSS was
1994          * disabled at initialization time.
1995          */
1996         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1997         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1998         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1999                 if (rss_hf != 0) /* Enable RSS */
2000                         return -(EINVAL);
2001                 return 0; /* Nothing to do */
2002         }
2003         /* RSS enabled */
2004         if (rss_hf == 0) /* Disable RSS */
2005                 return -(EINVAL);
2006         igb_hw_rss_hash_set(hw, rss_conf);
2007         return 0;
2008 }
2009
2010 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2011                               struct rte_eth_rss_conf *rss_conf)
2012 {
2013         struct e1000_hw *hw;
2014         uint8_t *hash_key;
2015         uint32_t rss_key;
2016         uint32_t mrqc;
2017         uint64_t rss_hf;
2018         uint16_t i;
2019
2020         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021         hash_key = rss_conf->rss_key;
2022         if (hash_key != NULL) {
2023                 /* Return RSS hash key */
2024                 for (i = 0; i < 10; i++) {
2025                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2026                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2027                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2028                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2029                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2030                 }
2031         }
2032
2033         /* Get RSS functions configured in MRQC register */
2034         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2035         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2036                 rss_conf->rss_hf = 0;
2037                 return 0;
2038         }
2039         rss_hf = 0;
2040         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2041                 rss_hf |= ETH_RSS_IPV4;
2042         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2043                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2044         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2045                 rss_hf |= ETH_RSS_IPV6;
2046         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2047                 rss_hf |= ETH_RSS_IPV6_EX;
2048         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2049                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2050         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2051                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2052         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2053                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2054         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2055                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2056         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2057                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2058         rss_conf->rss_hf = rss_hf;
2059         return 0;
2060 }
2061
2062 static void
2063 igb_rss_configure(struct rte_eth_dev *dev)
2064 {
2065         struct rte_eth_rss_conf rss_conf;
2066         struct e1000_hw *hw;
2067         uint32_t shift;
2068         uint16_t i;
2069
2070         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071
2072         /* Fill in redirection table. */
2073         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2074         for (i = 0; i < 128; i++) {
2075                 union e1000_reta {
2076                         uint32_t dword;
2077                         uint8_t  bytes[4];
2078                 } reta;
2079                 uint8_t q_idx;
2080
2081                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2082                                    i % dev->data->nb_rx_queues : 0);
2083                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2084                 if ((i & 3) == 3)
2085                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2086         }
2087
2088         /*
2089          * Configure the RSS key and the RSS protocols used to compute
2090          * the RSS hash of input packets.
2091          */
2092         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2093         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2094                 igb_rss_disable(dev);
2095                 return;
2096         }
2097         if (rss_conf.rss_key == NULL)
2098                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2099         igb_hw_rss_hash_set(hw, &rss_conf);
2100 }
2101
2102 /*
2103  * Check if the mac type support VMDq or not.
2104  * Return 1 if it supports, otherwise, return 0.
2105  */
2106 static int
2107 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2108 {
2109         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2110
2111         switch (hw->mac.type) {
2112         case e1000_82576:
2113         case e1000_82580:
2114         case e1000_i350:
2115                 return 1;
2116         case e1000_82540:
2117         case e1000_82541:
2118         case e1000_82542:
2119         case e1000_82543:
2120         case e1000_82544:
2121         case e1000_82545:
2122         case e1000_82546:
2123         case e1000_82547:
2124         case e1000_82571:
2125         case e1000_82572:
2126         case e1000_82573:
2127         case e1000_82574:
2128         case e1000_82583:
2129         case e1000_i210:
2130         case e1000_i211:
2131         default:
2132                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2133                 return 0;
2134         }
2135 }
2136
2137 static int
2138 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2139 {
2140         struct rte_eth_vmdq_rx_conf *cfg;
2141         struct e1000_hw *hw;
2142         uint32_t mrqc, vt_ctl, vmolr, rctl;
2143         int i;
2144
2145         PMD_INIT_FUNC_TRACE();
2146
2147         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2148         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2149
2150         /* Check if mac type can support VMDq, return value of 0 means NOT support */
2151         if (igb_is_vmdq_supported(dev) == 0)
2152                 return -1;
2153
2154         igb_rss_disable(dev);
2155
2156         /* RCTL: eanble VLAN filter */
2157         rctl = E1000_READ_REG(hw, E1000_RCTL);
2158         rctl |= E1000_RCTL_VFE;
2159         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2160
2161         /* MRQC: enable vmdq */
2162         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2163         mrqc |= E1000_MRQC_ENABLE_VMDQ;
2164         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2165
2166         /* VTCTL:  pool selection according to VLAN tag */
2167         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2168         if (cfg->enable_default_pool)
2169                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2170         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2171         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2172
2173         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2174                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2175                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2176                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2177                         E1000_VMOLR_MPME);
2178
2179                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2180                         vmolr |= E1000_VMOLR_AUPE;
2181                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2182                         vmolr |= E1000_VMOLR_ROMPE;
2183                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2184                         vmolr |= E1000_VMOLR_ROPE;
2185                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2186                         vmolr |= E1000_VMOLR_BAM;
2187                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2188                         vmolr |= E1000_VMOLR_MPME;
2189
2190                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2191         }
2192
2193         /*
2194          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2195          * Both 82576 and 82580 support it
2196          */
2197         if (hw->mac.type != e1000_i350) {
2198                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2199                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2200                         vmolr |= E1000_VMOLR_STRVLAN;
2201                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2202                 }
2203         }
2204
2205         /* VFTA - enable all vlan filters */
2206         for (i = 0; i < IGB_VFTA_SIZE; i++)
2207                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2208
2209         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2210         if (hw->mac.type != e1000_82580)
2211                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2212
2213         /*
2214          * RAH/RAL - allow pools to read specific mac addresses
2215          * In this case, all pools should be able to read from mac addr 0
2216          */
2217         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2218         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2219
2220         /* VLVF: set up filters for vlan tags as configured */
2221         for (i = 0; i < cfg->nb_pool_maps; i++) {
2222                 /* set vlan id in VF register and set the valid bit */
2223                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2224                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2225                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2226                         E1000_VLVF_POOLSEL_MASK)));
2227         }
2228
2229         E1000_WRITE_FLUSH(hw);
2230
2231         return 0;
2232 }
2233
2234
2235 /*********************************************************************
2236  *
2237  *  Enable receive unit.
2238  *
2239  **********************************************************************/
2240
2241 static int
2242 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2243 {
2244         struct igb_rx_entry *rxe = rxq->sw_ring;
2245         uint64_t dma_addr;
2246         unsigned i;
2247
2248         /* Initialize software ring entries. */
2249         for (i = 0; i < rxq->nb_rx_desc; i++) {
2250                 volatile union e1000_adv_rx_desc *rxd;
2251                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2252
2253                 if (mbuf == NULL) {
2254                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2255                                      "queue_id=%hu", rxq->queue_id);
2256                         return -ENOMEM;
2257                 }
2258                 dma_addr =
2259                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2260                 rxd = &rxq->rx_ring[i];
2261                 rxd->read.hdr_addr = 0;
2262                 rxd->read.pkt_addr = dma_addr;
2263                 rxe[i].mbuf = mbuf;
2264         }
2265
2266         return 0;
2267 }
2268
2269 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2270 static int
2271 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2272 {
2273         struct e1000_hw *hw =
2274                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2275         uint32_t mrqc;
2276
2277         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2278                 /*
2279                  * SRIOV active scheme
2280                  * FIXME if support RSS together with VMDq & SRIOV
2281                  */
2282                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2283                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2284                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2285                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2286         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2287                 /*
2288                  * SRIOV inactive scheme
2289                  */
2290                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2291                         case ETH_MQ_RX_RSS:
2292                                 igb_rss_configure(dev);
2293                                 break;
2294                         case ETH_MQ_RX_VMDQ_ONLY:
2295                                 /*Configure general VMDQ only RX parameters*/
2296                                 igb_vmdq_rx_hw_configure(dev);
2297                                 break;
2298                         case ETH_MQ_RX_NONE:
2299                                 /* if mq_mode is none, disable rss mode.*/
2300                         default:
2301                                 igb_rss_disable(dev);
2302                                 break;
2303                 }
2304         }
2305
2306         return 0;
2307 }
2308
2309 int
2310 eth_igb_rx_init(struct rte_eth_dev *dev)
2311 {
2312         struct rte_eth_rxmode *rxmode;
2313         struct e1000_hw     *hw;
2314         struct igb_rx_queue *rxq;
2315         uint32_t rctl;
2316         uint32_t rxcsum;
2317         uint32_t srrctl;
2318         uint16_t buf_size;
2319         uint16_t rctl_bsize;
2320         uint16_t i;
2321         int ret;
2322
2323         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2324         srrctl = 0;
2325
2326         /*
2327          * Make sure receives are disabled while setting
2328          * up the descriptor ring.
2329          */
2330         rctl = E1000_READ_REG(hw, E1000_RCTL);
2331         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2332
2333         rxmode = &dev->data->dev_conf.rxmode;
2334
2335         /*
2336          * Configure support of jumbo frames, if any.
2337          */
2338         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2339                 rctl |= E1000_RCTL_LPE;
2340
2341                 /*
2342                  * Set maximum packet length by default, and might be updated
2343                  * together with enabling/disabling dual VLAN.
2344                  */
2345                 E1000_WRITE_REG(hw, E1000_RLPML,
2346                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2347                                                 VLAN_TAG_SIZE);
2348         } else
2349                 rctl &= ~E1000_RCTL_LPE;
2350
2351         /* Configure and enable each RX queue. */
2352         rctl_bsize = 0;
2353         dev->rx_pkt_burst = eth_igb_recv_pkts;
2354         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2355                 uint64_t bus_addr;
2356                 uint32_t rxdctl;
2357
2358                 rxq = dev->data->rx_queues[i];
2359
2360                 rxq->flags = 0;
2361                 /*
2362                  * i350 and i354 vlan packets have vlan tags byte swapped.
2363                  */
2364                 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2365                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2366                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2367                 } else {
2368                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2369                 }
2370
2371                 /* Allocate buffers for descriptor rings and set up queue */
2372                 ret = igb_alloc_rx_queue_mbufs(rxq);
2373                 if (ret)
2374                         return ret;
2375
2376                 /*
2377                  * Reset crc_len in case it was changed after queue setup by a
2378                  *  call to configure
2379                  */
2380                 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2381                         rxq->crc_len = RTE_ETHER_CRC_LEN;
2382                 else
2383                         rxq->crc_len = 0;
2384
2385                 bus_addr = rxq->rx_ring_phys_addr;
2386                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2387                                 rxq->nb_rx_desc *
2388                                 sizeof(union e1000_adv_rx_desc));
2389                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2390                                 (uint32_t)(bus_addr >> 32));
2391                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2392
2393                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2394
2395                 /*
2396                  * Configure RX buffer size.
2397                  */
2398                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2399                         RTE_PKTMBUF_HEADROOM);
2400                 if (buf_size >= 1024) {
2401                         /*
2402                          * Configure the BSIZEPACKET field of the SRRCTL
2403                          * register of the queue.
2404                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2405                          * If this field is equal to 0b, then RCTL.BSIZE
2406                          * determines the RX packet buffer size.
2407                          */
2408                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2409                                    E1000_SRRCTL_BSIZEPKT_MASK);
2410                         buf_size = (uint16_t) ((srrctl &
2411                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2412                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2413
2414                         /* It adds dual VLAN length for supporting dual VLAN */
2415                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2416                                                 2 * VLAN_TAG_SIZE) > buf_size){
2417                                 if (!dev->data->scattered_rx)
2418                                         PMD_INIT_LOG(DEBUG,
2419                                                      "forcing scatter mode");
2420                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2421                                 dev->data->scattered_rx = 1;
2422                         }
2423                 } else {
2424                         /*
2425                          * Use BSIZE field of the device RCTL register.
2426                          */
2427                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2428                                 rctl_bsize = buf_size;
2429                         if (!dev->data->scattered_rx)
2430                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2431                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2432                         dev->data->scattered_rx = 1;
2433                 }
2434
2435                 /* Set if packets are dropped when no descriptors available */
2436                 if (rxq->drop_en)
2437                         srrctl |= E1000_SRRCTL_DROP_EN;
2438
2439                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2440
2441                 /* Enable this RX queue. */
2442                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2443                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2444                 rxdctl &= 0xFFF00000;
2445                 rxdctl |= (rxq->pthresh & 0x1F);
2446                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2447                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2448                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2449         }
2450
2451         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2452                 if (!dev->data->scattered_rx)
2453                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2454                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2455                 dev->data->scattered_rx = 1;
2456         }
2457
2458         /*
2459          * Setup BSIZE field of RCTL register, if needed.
2460          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2461          * register, since the code above configures the SRRCTL register of
2462          * the RX queue in such a case.
2463          * All configurable sizes are:
2464          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2465          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2466          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2467          *  2048: rctl |= E1000_RCTL_SZ_2048;
2468          *  1024: rctl |= E1000_RCTL_SZ_1024;
2469          *   512: rctl |= E1000_RCTL_SZ_512;
2470          *   256: rctl |= E1000_RCTL_SZ_256;
2471          */
2472         if (rctl_bsize > 0) {
2473                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2474                         rctl |= E1000_RCTL_SZ_512;
2475                 else /* 256 <= buf_size < 512 - use 256 */
2476                         rctl |= E1000_RCTL_SZ_256;
2477         }
2478
2479         /*
2480          * Configure RSS if device configured with multiple RX queues.
2481          */
2482         igb_dev_mq_rx_configure(dev);
2483
2484         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2485         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2486
2487         /*
2488          * Setup the Checksum Register.
2489          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2490          */
2491         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2492         rxcsum |= E1000_RXCSUM_PCSD;
2493
2494         /* Enable both L3/L4 rx checksum offload */
2495         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2496                 rxcsum |= E1000_RXCSUM_IPOFL;
2497         else
2498                 rxcsum &= ~E1000_RXCSUM_IPOFL;
2499         if (rxmode->offloads &
2500                 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2501                 rxcsum |= E1000_RXCSUM_TUOFL;
2502         else
2503                 rxcsum &= ~E1000_RXCSUM_TUOFL;
2504         if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2505                 rxcsum |= E1000_RXCSUM_CRCOFL;
2506         else
2507                 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2508
2509         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2510
2511         /* Setup the Receive Control Register. */
2512         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2513                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2514
2515                 /* clear STRCRC bit in all queues */
2516                 if (hw->mac.type == e1000_i350 ||
2517                     hw->mac.type == e1000_i210 ||
2518                     hw->mac.type == e1000_i211 ||
2519                     hw->mac.type == e1000_i354) {
2520                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2521                                 rxq = dev->data->rx_queues[i];
2522                                 uint32_t dvmolr = E1000_READ_REG(hw,
2523                                         E1000_DVMOLR(rxq->reg_idx));
2524                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2525                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2526                         }
2527                 }
2528         } else {
2529                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2530
2531                 /* set STRCRC bit in all queues */
2532                 if (hw->mac.type == e1000_i350 ||
2533                     hw->mac.type == e1000_i210 ||
2534                     hw->mac.type == e1000_i211 ||
2535                     hw->mac.type == e1000_i354) {
2536                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2537                                 rxq = dev->data->rx_queues[i];
2538                                 uint32_t dvmolr = E1000_READ_REG(hw,
2539                                         E1000_DVMOLR(rxq->reg_idx));
2540                                 dvmolr |= E1000_DVMOLR_STRCRC;
2541                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2542                         }
2543                 }
2544         }
2545
2546         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2547         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2548                 E1000_RCTL_RDMTS_HALF |
2549                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2550
2551         /* Make sure VLAN Filters are off. */
2552         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2553                 rctl &= ~E1000_RCTL_VFE;
2554         /* Don't store bad packets. */
2555         rctl &= ~E1000_RCTL_SBP;
2556
2557         /* Enable Receives. */
2558         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2559
2560         /*
2561          * Setup the HW Rx Head and Tail Descriptor Pointers.
2562          * This needs to be done after enable.
2563          */
2564         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2565                 rxq = dev->data->rx_queues[i];
2566                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2567                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2568         }
2569
2570         return 0;
2571 }
2572
2573 /*********************************************************************
2574  *
2575  *  Enable transmit unit.
2576  *
2577  **********************************************************************/
2578 void
2579 eth_igb_tx_init(struct rte_eth_dev *dev)
2580 {
2581         struct e1000_hw     *hw;
2582         struct igb_tx_queue *txq;
2583         uint32_t tctl;
2584         uint32_t txdctl;
2585         uint16_t i;
2586
2587         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2588
2589         /* Setup the Base and Length of the Tx Descriptor Rings. */
2590         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2591                 uint64_t bus_addr;
2592                 txq = dev->data->tx_queues[i];
2593                 bus_addr = txq->tx_ring_phys_addr;
2594
2595                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2596                                 txq->nb_tx_desc *
2597                                 sizeof(union e1000_adv_tx_desc));
2598                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2599                                 (uint32_t)(bus_addr >> 32));
2600                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2601
2602                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2603                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2604                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2605
2606                 /* Setup Transmit threshold registers. */
2607                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2608                 txdctl |= txq->pthresh & 0x1F;
2609                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2610                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2611                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2612                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2613         }
2614
2615         /* Program the Transmit Control Register. */
2616         tctl = E1000_READ_REG(hw, E1000_TCTL);
2617         tctl &= ~E1000_TCTL_CT;
2618         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2619                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2620
2621         e1000_config_collision_dist(hw);
2622
2623         /* This write will effectively turn on the transmit unit. */
2624         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2625 }
2626
2627 /*********************************************************************
2628  *
2629  *  Enable VF receive unit.
2630  *
2631  **********************************************************************/
2632 int
2633 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2634 {
2635         struct e1000_hw     *hw;
2636         struct igb_rx_queue *rxq;
2637         uint32_t srrctl;
2638         uint16_t buf_size;
2639         uint16_t rctl_bsize;
2640         uint16_t i;
2641         int ret;
2642
2643         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2644
2645         /* setup MTU */
2646         e1000_rlpml_set_vf(hw,
2647                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2648                 VLAN_TAG_SIZE));
2649
2650         /* Configure and enable each RX queue. */
2651         rctl_bsize = 0;
2652         dev->rx_pkt_burst = eth_igb_recv_pkts;
2653         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2654                 uint64_t bus_addr;
2655                 uint32_t rxdctl;
2656
2657                 rxq = dev->data->rx_queues[i];
2658
2659                 rxq->flags = 0;
2660                 /*
2661                  * i350VF LB vlan packets have vlan tags byte swapped.
2662                  */
2663                 if (hw->mac.type == e1000_vfadapt_i350) {
2664                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2665                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2666                 } else {
2667                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2668                 }
2669
2670                 /* Allocate buffers for descriptor rings and set up queue */
2671                 ret = igb_alloc_rx_queue_mbufs(rxq);
2672                 if (ret)
2673                         return ret;
2674
2675                 bus_addr = rxq->rx_ring_phys_addr;
2676                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2677                                 rxq->nb_rx_desc *
2678                                 sizeof(union e1000_adv_rx_desc));
2679                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2680                                 (uint32_t)(bus_addr >> 32));
2681                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2682
2683                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2684
2685                 /*
2686                  * Configure RX buffer size.
2687                  */
2688                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2689                         RTE_PKTMBUF_HEADROOM);
2690                 if (buf_size >= 1024) {
2691                         /*
2692                          * Configure the BSIZEPACKET field of the SRRCTL
2693                          * register of the queue.
2694                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2695                          * If this field is equal to 0b, then RCTL.BSIZE
2696                          * determines the RX packet buffer size.
2697                          */
2698                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2699                                    E1000_SRRCTL_BSIZEPKT_MASK);
2700                         buf_size = (uint16_t) ((srrctl &
2701                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2702                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2703
2704                         /* It adds dual VLAN length for supporting dual VLAN */
2705                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2706                                                 2 * VLAN_TAG_SIZE) > buf_size){
2707                                 if (!dev->data->scattered_rx)
2708                                         PMD_INIT_LOG(DEBUG,
2709                                                      "forcing scatter mode");
2710                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2711                                 dev->data->scattered_rx = 1;
2712                         }
2713                 } else {
2714                         /*
2715                          * Use BSIZE field of the device RCTL register.
2716                          */
2717                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2718                                 rctl_bsize = buf_size;
2719                         if (!dev->data->scattered_rx)
2720                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2721                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2722                         dev->data->scattered_rx = 1;
2723                 }
2724
2725                 /* Set if packets are dropped when no descriptors available */
2726                 if (rxq->drop_en)
2727                         srrctl |= E1000_SRRCTL_DROP_EN;
2728
2729                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2730
2731                 /* Enable this RX queue. */
2732                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2733                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2734                 rxdctl &= 0xFFF00000;
2735                 rxdctl |= (rxq->pthresh & 0x1F);
2736                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2737                 if (hw->mac.type == e1000_vfadapt) {
2738                         /*
2739                          * Workaround of 82576 VF Erratum
2740                          * force set WTHRESH to 1
2741                          * to avoid Write-Back not triggered sometimes
2742                          */
2743                         rxdctl |= 0x10000;
2744                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2745                 }
2746                 else
2747                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2748                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2749         }
2750
2751         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2752                 if (!dev->data->scattered_rx)
2753                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2754                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2755                 dev->data->scattered_rx = 1;
2756         }
2757
2758         /*
2759          * Setup the HW Rx Head and Tail Descriptor Pointers.
2760          * This needs to be done after enable.
2761          */
2762         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2763                 rxq = dev->data->rx_queues[i];
2764                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2765                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2766         }
2767
2768         return 0;
2769 }
2770
2771 /*********************************************************************
2772  *
2773  *  Enable VF transmit unit.
2774  *
2775  **********************************************************************/
2776 void
2777 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2778 {
2779         struct e1000_hw     *hw;
2780         struct igb_tx_queue *txq;
2781         uint32_t txdctl;
2782         uint16_t i;
2783
2784         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2785
2786         /* Setup the Base and Length of the Tx Descriptor Rings. */
2787         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2788                 uint64_t bus_addr;
2789
2790                 txq = dev->data->tx_queues[i];
2791                 bus_addr = txq->tx_ring_phys_addr;
2792                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2793                                 txq->nb_tx_desc *
2794                                 sizeof(union e1000_adv_tx_desc));
2795                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2796                                 (uint32_t)(bus_addr >> 32));
2797                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2798
2799                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2800                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2801                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2802
2803                 /* Setup Transmit threshold registers. */
2804                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2805                 txdctl |= txq->pthresh & 0x1F;
2806                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2807                 if (hw->mac.type == e1000_82576) {
2808                         /*
2809                          * Workaround of 82576 VF Erratum
2810                          * force set WTHRESH to 1
2811                          * to avoid Write-Back not triggered sometimes
2812                          */
2813                         txdctl |= 0x10000;
2814                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2815                 }
2816                 else
2817                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2818                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2819                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2820         }
2821
2822 }
2823
2824 void
2825 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2826         struct rte_eth_rxq_info *qinfo)
2827 {
2828         struct igb_rx_queue *rxq;
2829
2830         rxq = dev->data->rx_queues[queue_id];
2831
2832         qinfo->mp = rxq->mb_pool;
2833         qinfo->scattered_rx = dev->data->scattered_rx;
2834         qinfo->nb_desc = rxq->nb_rx_desc;
2835
2836         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2837         qinfo->conf.rx_drop_en = rxq->drop_en;
2838         qinfo->conf.offloads = rxq->offloads;
2839 }
2840
2841 void
2842 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2843         struct rte_eth_txq_info *qinfo)
2844 {
2845         struct igb_tx_queue *txq;
2846
2847         txq = dev->data->tx_queues[queue_id];
2848
2849         qinfo->nb_desc = txq->nb_tx_desc;
2850
2851         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2852         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2853         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2854         qinfo->conf.offloads = txq->offloads;
2855 }
2856
2857 int
2858 igb_rss_conf_init(struct rte_eth_dev *dev,
2859                   struct igb_rte_flow_rss_conf *out,
2860                   const struct rte_flow_action_rss *in)
2861 {
2862         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2863
2864         if (in->key_len > RTE_DIM(out->key) ||
2865             ((hw->mac.type == e1000_82576) &&
2866              (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2867             ((hw->mac.type != e1000_82576) &&
2868              (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2869                 return -EINVAL;
2870         out->conf = (struct rte_flow_action_rss){
2871                 .func = in->func,
2872                 .level = in->level,
2873                 .types = in->types,
2874                 .key_len = in->key_len,
2875                 .queue_num = in->queue_num,
2876                 .key = memcpy(out->key, in->key, in->key_len),
2877                 .queue = memcpy(out->queue, in->queue,
2878                                 sizeof(*in->queue) * in->queue_num),
2879         };
2880         return 0;
2881 }
2882
2883 int
2884 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2885                     const struct rte_flow_action_rss *with)
2886 {
2887         return (comp->func == with->func &&
2888                 comp->level == with->level &&
2889                 comp->types == with->types &&
2890                 comp->key_len == with->key_len &&
2891                 comp->queue_num == with->queue_num &&
2892                 !memcmp(comp->key, with->key, with->key_len) &&
2893                 !memcmp(comp->queue, with->queue,
2894                         sizeof(*with->queue) * with->queue_num));
2895 }
2896
2897 int
2898 igb_config_rss_filter(struct rte_eth_dev *dev,
2899                 struct igb_rte_flow_rss_conf *conf, bool add)
2900 {
2901         uint32_t shift;
2902         uint16_t i, j;
2903         struct rte_eth_rss_conf rss_conf = {
2904                 .rss_key = conf->conf.key_len ?
2905                         (void *)(uintptr_t)conf->conf.key : NULL,
2906                 .rss_key_len = conf->conf.key_len,
2907                 .rss_hf = conf->conf.types,
2908         };
2909         struct e1000_filter_info *filter_info =
2910                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2911         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2912
2913         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2914
2915         if (!add) {
2916                 if (igb_action_rss_same(&filter_info->rss_info.conf,
2917                                         &conf->conf)) {
2918                         igb_rss_disable(dev);
2919                         memset(&filter_info->rss_info, 0,
2920                                 sizeof(struct igb_rte_flow_rss_conf));
2921                         return 0;
2922                 }
2923                 return -EINVAL;
2924         }
2925
2926         if (filter_info->rss_info.conf.queue_num)
2927                 return -EINVAL;
2928
2929         /* Fill in redirection table. */
2930         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2931         for (i = 0, j = 0; i < 128; i++, j++) {
2932                 union e1000_reta {
2933                         uint32_t dword;
2934                         uint8_t  bytes[4];
2935                 } reta;
2936                 uint8_t q_idx;
2937
2938                 if (j == conf->conf.queue_num)
2939                         j = 0;
2940                 q_idx = conf->conf.queue[j];
2941                 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2942                 if ((i & 3) == 3)
2943                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2944         }
2945
2946         /* Configure the RSS key and the RSS protocols used to compute
2947          * the RSS hash of input packets.
2948          */
2949         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2950                 igb_rss_disable(dev);
2951                 return 0;
2952         }
2953         if (rss_conf.rss_key == NULL)
2954                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2955         igb_hw_rss_hash_set(hw, &rss_conf);
2956
2957         if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))
2958                 return -EINVAL;
2959
2960         return 0;
2961 }