net/e1000: convert to new Tx offloads API
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_net.h>
40 #include <rte_string_fns.h>
41
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
45
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
48 #else
49 #define IGB_TX_IEEE1588_TMST 0
50 #endif
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK (                    \
53                 PKT_TX_VLAN_PKT |                \
54                 PKT_TX_IP_CKSUM |                \
55                 PKT_TX_L4_MASK |                 \
56                 PKT_TX_TCP_SEG |                 \
57                 IGB_TX_IEEE1588_TMST)
58
59 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
60                 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
61
62 /**
63  * Structure associated with each descriptor of the RX ring of a RX queue.
64  */
65 struct igb_rx_entry {
66         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
67 };
68
69 /**
70  * Structure associated with each descriptor of the TX ring of a TX queue.
71  */
72 struct igb_tx_entry {
73         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
74         uint16_t next_id; /**< Index of next descriptor in ring. */
75         uint16_t last_id; /**< Index of last scattered descriptor. */
76 };
77
78 /**
79  * rx queue flags
80  */
81 enum igb_rxq_flags {
82         IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
83 };
84
85 /**
86  * Structure associated with each RX queue.
87  */
88 struct igb_rx_queue {
89         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
90         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
91         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
92         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
93         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
94         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
95         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
96         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
97         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
98         uint16_t            rx_tail;    /**< current value of RDT register. */
99         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
100         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
101         uint16_t            queue_id;   /**< RX queue index. */
102         uint16_t            reg_idx;    /**< RX queue register index. */
103         uint16_t            port_id;    /**< Device port identifier. */
104         uint8_t             pthresh;    /**< Prefetch threshold register. */
105         uint8_t             hthresh;    /**< Host threshold register. */
106         uint8_t             wthresh;    /**< Write-back threshold register. */
107         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
108         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
109         uint32_t            flags;      /**< RX flags. */
110         uint64_t            offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
111 };
112
113 /**
114  * Hardware context number
115  */
116 enum igb_advctx_num {
117         IGB_CTX_0    = 0, /**< CTX0    */
118         IGB_CTX_1    = 1, /**< CTX1    */
119         IGB_CTX_NUM  = 2, /**< CTX_NUM */
120 };
121
122 /** Offload features */
123 union igb_tx_offload {
124         uint64_t data;
125         struct {
126                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
127                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
128                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
129                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
130                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
131
132                 /* uint64_t unused:8; */
133         };
134 };
135
136 /*
137  * Compare mask for igb_tx_offload.data,
138  * should be in sync with igb_tx_offload layout.
139  * */
140 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
141 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
142 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
143 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
144 /** Mac + IP + TCP + Mss mask. */
145 #define TX_TSO_CMP_MASK \
146         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
147
148 /**
149  * Strucutre to check if new context need be built
150  */
151 struct igb_advctx_info {
152         uint64_t flags;           /**< ol_flags related to context build. */
153         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
154         union igb_tx_offload tx_offload;
155         /** compare mask for tx offload. */
156         union igb_tx_offload tx_offload_mask;
157 };
158
159 /**
160  * Structure associated with each TX queue.
161  */
162 struct igb_tx_queue {
163         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
164         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
165         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
166         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
167         uint32_t               txd_type;      /**< Device-specific TXD type */
168         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
169         uint16_t               tx_tail; /**< Current value of TDT register. */
170         uint16_t               tx_head;
171         /**< Index of first used TX descriptor. */
172         uint16_t               queue_id; /**< TX queue index. */
173         uint16_t               reg_idx;  /**< TX queue register index. */
174         uint16_t               port_id;  /**< Device port identifier. */
175         uint8_t                pthresh;  /**< Prefetch threshold register. */
176         uint8_t                hthresh;  /**< Host threshold register. */
177         uint8_t                wthresh;  /**< Write-back threshold register. */
178         uint32_t               ctx_curr;
179         /**< Current used hardware descriptor. */
180         uint32_t               ctx_start;
181         /**< Start context position for transmit queue. */
182         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
183         /**< Hardware context history.*/
184         uint64_t               offloads; /**< offloads of DEV_TX_OFFLOAD_* */
185 };
186
187 #if 1
188 #define RTE_PMD_USE_PREFETCH
189 #endif
190
191 #ifdef RTE_PMD_USE_PREFETCH
192 #define rte_igb_prefetch(p)     rte_prefetch0(p)
193 #else
194 #define rte_igb_prefetch(p)     do {} while(0)
195 #endif
196
197 #ifdef RTE_PMD_PACKET_PREFETCH
198 #define rte_packet_prefetch(p) rte_prefetch1(p)
199 #else
200 #define rte_packet_prefetch(p)  do {} while(0)
201 #endif
202
203 /*
204  * Macro for VMDq feature for 1 GbE NIC.
205  */
206 #define E1000_VMOLR_SIZE                        (8)
207 #define IGB_TSO_MAX_HDRLEN                      (512)
208 #define IGB_TSO_MAX_MSS                         (9216)
209
210 /*********************************************************************
211  *
212  *  TX function
213  *
214  **********************************************************************/
215
216 /*
217  *There're some limitations in hardware for TCP segmentation offload. We
218  *should check whether the parameters are valid.
219  */
220 static inline uint64_t
221 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
222 {
223         if (!(ol_req & PKT_TX_TCP_SEG))
224                 return ol_req;
225         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
226                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
227                 ol_req &= ~PKT_TX_TCP_SEG;
228                 ol_req |= PKT_TX_TCP_CKSUM;
229         }
230         return ol_req;
231 }
232
233 /*
234  * Advanced context descriptor are almost same between igb/ixgbe
235  * This is a separate function, looking for optimization opportunity here
236  * Rework required to go with the pre-defined values.
237  */
238
239 static inline void
240 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
241                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
242                 uint64_t ol_flags, union igb_tx_offload tx_offload)
243 {
244         uint32_t type_tucmd_mlhl;
245         uint32_t mss_l4len_idx;
246         uint32_t ctx_idx, ctx_curr;
247         uint32_t vlan_macip_lens;
248         union igb_tx_offload tx_offload_mask;
249
250         ctx_curr = txq->ctx_curr;
251         ctx_idx = ctx_curr + txq->ctx_start;
252
253         tx_offload_mask.data = 0;
254         type_tucmd_mlhl = 0;
255
256         /* Specify which HW CTX to upload. */
257         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
258
259         if (ol_flags & PKT_TX_VLAN_PKT)
260                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
261
262         /* check if TCP segmentation required for this packet */
263         if (ol_flags & PKT_TX_TCP_SEG) {
264                 /* implies IP cksum in IPv4 */
265                 if (ol_flags & PKT_TX_IP_CKSUM)
266                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
267                                 E1000_ADVTXD_TUCMD_L4T_TCP |
268                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
269                 else
270                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
271                                 E1000_ADVTXD_TUCMD_L4T_TCP |
272                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
273
274                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
275                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
276                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
277         } else { /* no TSO, check if hardware checksum is needed */
278                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
279                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
280
281                 if (ol_flags & PKT_TX_IP_CKSUM)
282                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
283
284                 switch (ol_flags & PKT_TX_L4_MASK) {
285                 case PKT_TX_UDP_CKSUM:
286                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
287                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
289                         break;
290                 case PKT_TX_TCP_CKSUM:
291                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
292                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
294                         break;
295                 case PKT_TX_SCTP_CKSUM:
296                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
297                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
299                         break;
300                 default:
301                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
302                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
303                         break;
304                 }
305         }
306
307         txq->ctx_cache[ctx_curr].flags = ol_flags;
308         txq->ctx_cache[ctx_curr].tx_offload.data =
309                 tx_offload_mask.data & tx_offload.data;
310         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
311
312         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
313         vlan_macip_lens = (uint32_t)tx_offload.data;
314         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
315         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
316         ctx_txd->seqnum_seed = 0;
317 }
318
319 /*
320  * Check which hardware context can be used. Use the existing match
321  * or create a new context descriptor.
322  */
323 static inline uint32_t
324 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
325                 union igb_tx_offload tx_offload)
326 {
327         /* If match with the current context */
328         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
329                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
330                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
331                         return txq->ctx_curr;
332         }
333
334         /* If match with the second context */
335         txq->ctx_curr ^= 1;
336         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
337                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
338                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
339                         return txq->ctx_curr;
340         }
341
342         /* Mismatch, use the previous context */
343         return IGB_CTX_NUM;
344 }
345
346 static inline uint32_t
347 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
348 {
349         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
350         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
351         uint32_t tmp;
352
353         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
354         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
355         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
356         return tmp;
357 }
358
359 static inline uint32_t
360 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
361 {
362         uint32_t cmdtype;
363         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
364         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
365         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
366         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
367         return cmdtype;
368 }
369
370 uint16_t
371 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
372                uint16_t nb_pkts)
373 {
374         struct igb_tx_queue *txq;
375         struct igb_tx_entry *sw_ring;
376         struct igb_tx_entry *txe, *txn;
377         volatile union e1000_adv_tx_desc *txr;
378         volatile union e1000_adv_tx_desc *txd;
379         struct rte_mbuf     *tx_pkt;
380         struct rte_mbuf     *m_seg;
381         uint64_t buf_dma_addr;
382         uint32_t olinfo_status;
383         uint32_t cmd_type_len;
384         uint32_t pkt_len;
385         uint16_t slen;
386         uint64_t ol_flags;
387         uint16_t tx_end;
388         uint16_t tx_id;
389         uint16_t tx_last;
390         uint16_t nb_tx;
391         uint64_t tx_ol_req;
392         uint32_t new_ctx = 0;
393         uint32_t ctx = 0;
394         union igb_tx_offload tx_offload = {0};
395
396         txq = tx_queue;
397         sw_ring = txq->sw_ring;
398         txr     = txq->tx_ring;
399         tx_id   = txq->tx_tail;
400         txe = &sw_ring[tx_id];
401
402         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
403                 tx_pkt = *tx_pkts++;
404                 pkt_len = tx_pkt->pkt_len;
405
406                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
407
408                 /*
409                  * The number of descriptors that must be allocated for a
410                  * packet is the number of segments of that packet, plus 1
411                  * Context Descriptor for the VLAN Tag Identifier, if any.
412                  * Determine the last TX descriptor to allocate in the TX ring
413                  * for the packet, starting from the current position (tx_id)
414                  * in the ring.
415                  */
416                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
417
418                 ol_flags = tx_pkt->ol_flags;
419                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
420
421                 /* If a Context Descriptor need be built . */
422                 if (tx_ol_req) {
423                         tx_offload.l2_len = tx_pkt->l2_len;
424                         tx_offload.l3_len = tx_pkt->l3_len;
425                         tx_offload.l4_len = tx_pkt->l4_len;
426                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
427                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
428                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
429
430                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
431                         /* Only allocate context descriptor if required*/
432                         new_ctx = (ctx == IGB_CTX_NUM);
433                         ctx = txq->ctx_curr + txq->ctx_start;
434                         tx_last = (uint16_t) (tx_last + new_ctx);
435                 }
436                 if (tx_last >= txq->nb_tx_desc)
437                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
438
439                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
440                            " tx_first=%u tx_last=%u",
441                            (unsigned) txq->port_id,
442                            (unsigned) txq->queue_id,
443                            (unsigned) pkt_len,
444                            (unsigned) tx_id,
445                            (unsigned) tx_last);
446
447                 /*
448                  * Check if there are enough free descriptors in the TX ring
449                  * to transmit the next packet.
450                  * This operation is based on the two following rules:
451                  *
452                  *   1- Only check that the last needed TX descriptor can be
453                  *      allocated (by construction, if that descriptor is free,
454                  *      all intermediate ones are also free).
455                  *
456                  *      For this purpose, the index of the last TX descriptor
457                  *      used for a packet (the "last descriptor" of a packet)
458                  *      is recorded in the TX entries (the last one included)
459                  *      that are associated with all TX descriptors allocated
460                  *      for that packet.
461                  *
462                  *   2- Avoid to allocate the last free TX descriptor of the
463                  *      ring, in order to never set the TDT register with the
464                  *      same value stored in parallel by the NIC in the TDH
465                  *      register, which makes the TX engine of the NIC enter
466                  *      in a deadlock situation.
467                  *
468                  *      By extension, avoid to allocate a free descriptor that
469                  *      belongs to the last set of free descriptors allocated
470                  *      to the same packet previously transmitted.
471                  */
472
473                 /*
474                  * The "last descriptor" of the previously sent packet, if any,
475                  * which used the last descriptor to allocate.
476                  */
477                 tx_end = sw_ring[tx_last].last_id;
478
479                 /*
480                  * The next descriptor following that "last descriptor" in the
481                  * ring.
482                  */
483                 tx_end = sw_ring[tx_end].next_id;
484
485                 /*
486                  * The "last descriptor" associated with that next descriptor.
487                  */
488                 tx_end = sw_ring[tx_end].last_id;
489
490                 /*
491                  * Check that this descriptor is free.
492                  */
493                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
494                         if (nb_tx == 0)
495                                 return 0;
496                         goto end_of_tx;
497                 }
498
499                 /*
500                  * Set common flags of all TX Data Descriptors.
501                  *
502                  * The following bits must be set in all Data Descriptors:
503                  *   - E1000_ADVTXD_DTYP_DATA
504                  *   - E1000_ADVTXD_DCMD_DEXT
505                  *
506                  * The following bits must be set in the first Data Descriptor
507                  * and are ignored in the other ones:
508                  *   - E1000_ADVTXD_DCMD_IFCS
509                  *   - E1000_ADVTXD_MAC_1588
510                  *   - E1000_ADVTXD_DCMD_VLE
511                  *
512                  * The following bits must only be set in the last Data
513                  * Descriptor:
514                  *   - E1000_TXD_CMD_EOP
515                  *
516                  * The following bits can be set in any Data Descriptor, but
517                  * are only set in the last Data Descriptor:
518                  *   - E1000_TXD_CMD_RS
519                  */
520                 cmd_type_len = txq->txd_type |
521                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
522                 if (tx_ol_req & PKT_TX_TCP_SEG)
523                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
524                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
525 #if defined(RTE_LIBRTE_IEEE1588)
526                 if (ol_flags & PKT_TX_IEEE1588_TMST)
527                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
528 #endif
529                 if (tx_ol_req) {
530                         /* Setup TX Advanced context descriptor if required */
531                         if (new_ctx) {
532                                 volatile struct e1000_adv_tx_context_desc *
533                                     ctx_txd;
534
535                                 ctx_txd = (volatile struct
536                                     e1000_adv_tx_context_desc *)
537                                     &txr[tx_id];
538
539                                 txn = &sw_ring[txe->next_id];
540                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
541
542                                 if (txe->mbuf != NULL) {
543                                         rte_pktmbuf_free_seg(txe->mbuf);
544                                         txe->mbuf = NULL;
545                                 }
546
547                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
548
549                                 txe->last_id = tx_last;
550                                 tx_id = txe->next_id;
551                                 txe = txn;
552                         }
553
554                         /* Setup the TX Advanced Data Descriptor */
555                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
556                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
557                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
558                 }
559
560                 m_seg = tx_pkt;
561                 do {
562                         txn = &sw_ring[txe->next_id];
563                         txd = &txr[tx_id];
564
565                         if (txe->mbuf != NULL)
566                                 rte_pktmbuf_free_seg(txe->mbuf);
567                         txe->mbuf = m_seg;
568
569                         /*
570                          * Set up transmit descriptor.
571                          */
572                         slen = (uint16_t) m_seg->data_len;
573                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
574                         txd->read.buffer_addr =
575                                 rte_cpu_to_le_64(buf_dma_addr);
576                         txd->read.cmd_type_len =
577                                 rte_cpu_to_le_32(cmd_type_len | slen);
578                         txd->read.olinfo_status =
579                                 rte_cpu_to_le_32(olinfo_status);
580                         txe->last_id = tx_last;
581                         tx_id = txe->next_id;
582                         txe = txn;
583                         m_seg = m_seg->next;
584                 } while (m_seg != NULL);
585
586                 /*
587                  * The last packet data descriptor needs End Of Packet (EOP)
588                  * and Report Status (RS).
589                  */
590                 txd->read.cmd_type_len |=
591                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
592         }
593  end_of_tx:
594         rte_wmb();
595
596         /*
597          * Set the Transmit Descriptor Tail (TDT).
598          */
599         E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
600         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
601                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
602                    (unsigned) tx_id, (unsigned) nb_tx);
603         txq->tx_tail = tx_id;
604
605         return nb_tx;
606 }
607
608 /*********************************************************************
609  *
610  *  TX prep functions
611  *
612  **********************************************************************/
613 uint16_t
614 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
615                 uint16_t nb_pkts)
616 {
617         int i, ret;
618         struct rte_mbuf *m;
619
620         for (i = 0; i < nb_pkts; i++) {
621                 m = tx_pkts[i];
622
623                 /* Check some limitations for TSO in hardware */
624                 if (m->ol_flags & PKT_TX_TCP_SEG)
625                         if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
626                                         (m->l2_len + m->l3_len + m->l4_len >
627                                         IGB_TSO_MAX_HDRLEN)) {
628                                 rte_errno = -EINVAL;
629                                 return i;
630                         }
631
632                 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
633                         rte_errno = -ENOTSUP;
634                         return i;
635                 }
636
637 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
638                 ret = rte_validate_tx_offload(m);
639                 if (ret != 0) {
640                         rte_errno = ret;
641                         return i;
642                 }
643 #endif
644                 ret = rte_net_intel_cksum_prepare(m);
645                 if (ret != 0) {
646                         rte_errno = ret;
647                         return i;
648                 }
649         }
650
651         return i;
652 }
653
654 /*********************************************************************
655  *
656  *  RX functions
657  *
658  **********************************************************************/
659 #define IGB_PACKET_TYPE_IPV4              0X01
660 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
661 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
662 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
663 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
664 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
665 #define IGB_PACKET_TYPE_IPV6              0X04
666 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
667 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
668 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
669 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
670 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
671 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
672 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
673 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
674 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
675 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
676 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
677 #define IGB_PACKET_TYPE_MAX               0X80
678 #define IGB_PACKET_TYPE_MASK              0X7F
679 #define IGB_PACKET_TYPE_SHIFT             0X04
680 static inline uint32_t
681 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
682 {
683         static const uint32_t
684                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
685                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
686                         RTE_PTYPE_L3_IPV4,
687                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
688                         RTE_PTYPE_L3_IPV4_EXT,
689                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
690                         RTE_PTYPE_L3_IPV6,
691                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
692                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
693                         RTE_PTYPE_INNER_L3_IPV6,
694                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
695                         RTE_PTYPE_L3_IPV6_EXT,
696                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
697                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
698                         RTE_PTYPE_INNER_L3_IPV6_EXT,
699                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
700                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
701                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
702                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
703                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
704                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
705                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
706                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
707                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
708                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
709                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
710                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
711                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
712                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
713                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
714                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
715                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
716                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
718                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
719                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
720                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
721                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
722                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
723                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
724                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
725                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
726                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
727         };
728         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
729                 return RTE_PTYPE_UNKNOWN;
730
731         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
732
733         return ptype_table[pkt_info];
734 }
735
736 static inline uint64_t
737 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
738 {
739         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
740
741 #if defined(RTE_LIBRTE_IEEE1588)
742         static uint32_t ip_pkt_etqf_map[8] = {
743                 0, 0, 0, PKT_RX_IEEE1588_PTP,
744                 0, 0, 0, 0,
745         };
746
747         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
748         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
749
750         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
751         if (hw->mac.type == e1000_i210)
752                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
753         else
754                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
755 #else
756         RTE_SET_USED(rxq);
757 #endif
758
759         return pkt_flags;
760 }
761
762 static inline uint64_t
763 rx_desc_status_to_pkt_flags(uint32_t rx_status)
764 {
765         uint64_t pkt_flags;
766
767         /* Check if VLAN present */
768         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
769                 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
770
771 #if defined(RTE_LIBRTE_IEEE1588)
772         if (rx_status & E1000_RXD_STAT_TMST)
773                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
774 #endif
775         return pkt_flags;
776 }
777
778 static inline uint64_t
779 rx_desc_error_to_pkt_flags(uint32_t rx_status)
780 {
781         /*
782          * Bit 30: IPE, IPv4 checksum error
783          * Bit 29: L4I, L4I integrity error
784          */
785
786         static uint64_t error_to_pkt_flags_map[4] = {
787                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
788                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
789                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
790                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
791         };
792         return error_to_pkt_flags_map[(rx_status >>
793                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
794 }
795
796 uint16_t
797 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
798                uint16_t nb_pkts)
799 {
800         struct igb_rx_queue *rxq;
801         volatile union e1000_adv_rx_desc *rx_ring;
802         volatile union e1000_adv_rx_desc *rxdp;
803         struct igb_rx_entry *sw_ring;
804         struct igb_rx_entry *rxe;
805         struct rte_mbuf *rxm;
806         struct rte_mbuf *nmb;
807         union e1000_adv_rx_desc rxd;
808         uint64_t dma_addr;
809         uint32_t staterr;
810         uint32_t hlen_type_rss;
811         uint16_t pkt_len;
812         uint16_t rx_id;
813         uint16_t nb_rx;
814         uint16_t nb_hold;
815         uint64_t pkt_flags;
816
817         nb_rx = 0;
818         nb_hold = 0;
819         rxq = rx_queue;
820         rx_id = rxq->rx_tail;
821         rx_ring = rxq->rx_ring;
822         sw_ring = rxq->sw_ring;
823         while (nb_rx < nb_pkts) {
824                 /*
825                  * The order of operations here is important as the DD status
826                  * bit must not be read after any other descriptor fields.
827                  * rx_ring and rxdp are pointing to volatile data so the order
828                  * of accesses cannot be reordered by the compiler. If they were
829                  * not volatile, they could be reordered which could lead to
830                  * using invalid descriptor fields when read from rxd.
831                  */
832                 rxdp = &rx_ring[rx_id];
833                 staterr = rxdp->wb.upper.status_error;
834                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
835                         break;
836                 rxd = *rxdp;
837
838                 /*
839                  * End of packet.
840                  *
841                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
842                  * likely to be invalid and to be dropped by the various
843                  * validation checks performed by the network stack.
844                  *
845                  * Allocate a new mbuf to replenish the RX ring descriptor.
846                  * If the allocation fails:
847                  *    - arrange for that RX descriptor to be the first one
848                  *      being parsed the next time the receive function is
849                  *      invoked [on the same queue].
850                  *
851                  *    - Stop parsing the RX ring and return immediately.
852                  *
853                  * This policy do not drop the packet received in the RX
854                  * descriptor for which the allocation of a new mbuf failed.
855                  * Thus, it allows that packet to be later retrieved if
856                  * mbuf have been freed in the mean time.
857                  * As a side effect, holding RX descriptors instead of
858                  * systematically giving them back to the NIC may lead to
859                  * RX ring exhaustion situations.
860                  * However, the NIC can gracefully prevent such situations
861                  * to happen by sending specific "back-pressure" flow control
862                  * frames to its peer(s).
863                  */
864                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
865                            "staterr=0x%x pkt_len=%u",
866                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
867                            (unsigned) rx_id, (unsigned) staterr,
868                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
869
870                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
871                 if (nmb == NULL) {
872                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
873                                    "queue_id=%u", (unsigned) rxq->port_id,
874                                    (unsigned) rxq->queue_id);
875                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
876                         break;
877                 }
878
879                 nb_hold++;
880                 rxe = &sw_ring[rx_id];
881                 rx_id++;
882                 if (rx_id == rxq->nb_rx_desc)
883                         rx_id = 0;
884
885                 /* Prefetch next mbuf while processing current one. */
886                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
887
888                 /*
889                  * When next RX descriptor is on a cache-line boundary,
890                  * prefetch the next 4 RX descriptors and the next 8 pointers
891                  * to mbufs.
892                  */
893                 if ((rx_id & 0x3) == 0) {
894                         rte_igb_prefetch(&rx_ring[rx_id]);
895                         rte_igb_prefetch(&sw_ring[rx_id]);
896                 }
897
898                 rxm = rxe->mbuf;
899                 rxe->mbuf = nmb;
900                 dma_addr =
901                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
902                 rxdp->read.hdr_addr = 0;
903                 rxdp->read.pkt_addr = dma_addr;
904
905                 /*
906                  * Initialize the returned mbuf.
907                  * 1) setup generic mbuf fields:
908                  *    - number of segments,
909                  *    - next segment,
910                  *    - packet length,
911                  *    - RX port identifier.
912                  * 2) integrate hardware offload data, if any:
913                  *    - RSS flag & hash,
914                  *    - IP checksum flag,
915                  *    - VLAN TCI, if any,
916                  *    - error flags.
917                  */
918                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
919                                       rxq->crc_len);
920                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
921                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
922                 rxm->nb_segs = 1;
923                 rxm->next = NULL;
924                 rxm->pkt_len = pkt_len;
925                 rxm->data_len = pkt_len;
926                 rxm->port = rxq->port_id;
927
928                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
929                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
930
931                 /*
932                  * The vlan_tci field is only valid when PKT_RX_VLAN is
933                  * set in the pkt_flags field and must be in CPU byte order.
934                  */
935                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
936                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
937                         rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
938                 } else {
939                         rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
940                 }
941                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
942                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
943                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
944                 rxm->ol_flags = pkt_flags;
945                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
946                                                 lo_dword.hs_rss.pkt_info);
947
948                 /*
949                  * Store the mbuf address into the next entry of the array
950                  * of returned packets.
951                  */
952                 rx_pkts[nb_rx++] = rxm;
953         }
954         rxq->rx_tail = rx_id;
955
956         /*
957          * If the number of free RX descriptors is greater than the RX free
958          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
959          * register.
960          * Update the RDT with the value of the last processed RX descriptor
961          * minus 1, to guarantee that the RDT register is never equal to the
962          * RDH register, which creates a "full" ring situtation from the
963          * hardware point of view...
964          */
965         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
966         if (nb_hold > rxq->rx_free_thresh) {
967                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
968                            "nb_hold=%u nb_rx=%u",
969                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
970                            (unsigned) rx_id, (unsigned) nb_hold,
971                            (unsigned) nb_rx);
972                 rx_id = (uint16_t) ((rx_id == 0) ?
973                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
974                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
975                 nb_hold = 0;
976         }
977         rxq->nb_rx_hold = nb_hold;
978         return nb_rx;
979 }
980
981 uint16_t
982 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
983                          uint16_t nb_pkts)
984 {
985         struct igb_rx_queue *rxq;
986         volatile union e1000_adv_rx_desc *rx_ring;
987         volatile union e1000_adv_rx_desc *rxdp;
988         struct igb_rx_entry *sw_ring;
989         struct igb_rx_entry *rxe;
990         struct rte_mbuf *first_seg;
991         struct rte_mbuf *last_seg;
992         struct rte_mbuf *rxm;
993         struct rte_mbuf *nmb;
994         union e1000_adv_rx_desc rxd;
995         uint64_t dma; /* Physical address of mbuf data buffer */
996         uint32_t staterr;
997         uint32_t hlen_type_rss;
998         uint16_t rx_id;
999         uint16_t nb_rx;
1000         uint16_t nb_hold;
1001         uint16_t data_len;
1002         uint64_t pkt_flags;
1003
1004         nb_rx = 0;
1005         nb_hold = 0;
1006         rxq = rx_queue;
1007         rx_id = rxq->rx_tail;
1008         rx_ring = rxq->rx_ring;
1009         sw_ring = rxq->sw_ring;
1010
1011         /*
1012          * Retrieve RX context of current packet, if any.
1013          */
1014         first_seg = rxq->pkt_first_seg;
1015         last_seg = rxq->pkt_last_seg;
1016
1017         while (nb_rx < nb_pkts) {
1018         next_desc:
1019                 /*
1020                  * The order of operations here is important as the DD status
1021                  * bit must not be read after any other descriptor fields.
1022                  * rx_ring and rxdp are pointing to volatile data so the order
1023                  * of accesses cannot be reordered by the compiler. If they were
1024                  * not volatile, they could be reordered which could lead to
1025                  * using invalid descriptor fields when read from rxd.
1026                  */
1027                 rxdp = &rx_ring[rx_id];
1028                 staterr = rxdp->wb.upper.status_error;
1029                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1030                         break;
1031                 rxd = *rxdp;
1032
1033                 /*
1034                  * Descriptor done.
1035                  *
1036                  * Allocate a new mbuf to replenish the RX ring descriptor.
1037                  * If the allocation fails:
1038                  *    - arrange for that RX descriptor to be the first one
1039                  *      being parsed the next time the receive function is
1040                  *      invoked [on the same queue].
1041                  *
1042                  *    - Stop parsing the RX ring and return immediately.
1043                  *
1044                  * This policy does not drop the packet received in the RX
1045                  * descriptor for which the allocation of a new mbuf failed.
1046                  * Thus, it allows that packet to be later retrieved if
1047                  * mbuf have been freed in the mean time.
1048                  * As a side effect, holding RX descriptors instead of
1049                  * systematically giving them back to the NIC may lead to
1050                  * RX ring exhaustion situations.
1051                  * However, the NIC can gracefully prevent such situations
1052                  * to happen by sending specific "back-pressure" flow control
1053                  * frames to its peer(s).
1054                  */
1055                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1056                            "staterr=0x%x data_len=%u",
1057                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1058                            (unsigned) rx_id, (unsigned) staterr,
1059                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1060
1061                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1062                 if (nmb == NULL) {
1063                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1064                                    "queue_id=%u", (unsigned) rxq->port_id,
1065                                    (unsigned) rxq->queue_id);
1066                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1067                         break;
1068                 }
1069
1070                 nb_hold++;
1071                 rxe = &sw_ring[rx_id];
1072                 rx_id++;
1073                 if (rx_id == rxq->nb_rx_desc)
1074                         rx_id = 0;
1075
1076                 /* Prefetch next mbuf while processing current one. */
1077                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1078
1079                 /*
1080                  * When next RX descriptor is on a cache-line boundary,
1081                  * prefetch the next 4 RX descriptors and the next 8 pointers
1082                  * to mbufs.
1083                  */
1084                 if ((rx_id & 0x3) == 0) {
1085                         rte_igb_prefetch(&rx_ring[rx_id]);
1086                         rte_igb_prefetch(&sw_ring[rx_id]);
1087                 }
1088
1089                 /*
1090                  * Update RX descriptor with the physical address of the new
1091                  * data buffer of the new allocated mbuf.
1092                  */
1093                 rxm = rxe->mbuf;
1094                 rxe->mbuf = nmb;
1095                 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1096                 rxdp->read.pkt_addr = dma;
1097                 rxdp->read.hdr_addr = 0;
1098
1099                 /*
1100                  * Set data length & data buffer address of mbuf.
1101                  */
1102                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1103                 rxm->data_len = data_len;
1104                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1105
1106                 /*
1107                  * If this is the first buffer of the received packet,
1108                  * set the pointer to the first mbuf of the packet and
1109                  * initialize its context.
1110                  * Otherwise, update the total length and the number of segments
1111                  * of the current scattered packet, and update the pointer to
1112                  * the last mbuf of the current packet.
1113                  */
1114                 if (first_seg == NULL) {
1115                         first_seg = rxm;
1116                         first_seg->pkt_len = data_len;
1117                         first_seg->nb_segs = 1;
1118                 } else {
1119                         first_seg->pkt_len += data_len;
1120                         first_seg->nb_segs++;
1121                         last_seg->next = rxm;
1122                 }
1123
1124                 /*
1125                  * If this is not the last buffer of the received packet,
1126                  * update the pointer to the last mbuf of the current scattered
1127                  * packet and continue to parse the RX ring.
1128                  */
1129                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1130                         last_seg = rxm;
1131                         goto next_desc;
1132                 }
1133
1134                 /*
1135                  * This is the last buffer of the received packet.
1136                  * If the CRC is not stripped by the hardware:
1137                  *   - Subtract the CRC length from the total packet length.
1138                  *   - If the last buffer only contains the whole CRC or a part
1139                  *     of it, free the mbuf associated to the last buffer.
1140                  *     If part of the CRC is also contained in the previous
1141                  *     mbuf, subtract the length of that CRC part from the
1142                  *     data length of the previous mbuf.
1143                  */
1144                 rxm->next = NULL;
1145                 if (unlikely(rxq->crc_len > 0)) {
1146                         first_seg->pkt_len -= ETHER_CRC_LEN;
1147                         if (data_len <= ETHER_CRC_LEN) {
1148                                 rte_pktmbuf_free_seg(rxm);
1149                                 first_seg->nb_segs--;
1150                                 last_seg->data_len = (uint16_t)
1151                                         (last_seg->data_len -
1152                                          (ETHER_CRC_LEN - data_len));
1153                                 last_seg->next = NULL;
1154                         } else
1155                                 rxm->data_len =
1156                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1157                 }
1158
1159                 /*
1160                  * Initialize the first mbuf of the returned packet:
1161                  *    - RX port identifier,
1162                  *    - hardware offload data, if any:
1163                  *      - RSS flag & hash,
1164                  *      - IP checksum flag,
1165                  *      - VLAN TCI, if any,
1166                  *      - error flags.
1167                  */
1168                 first_seg->port = rxq->port_id;
1169                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1170
1171                 /*
1172                  * The vlan_tci field is only valid when PKT_RX_VLAN is
1173                  * set in the pkt_flags field and must be in CPU byte order.
1174                  */
1175                 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1176                                 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1177                         first_seg->vlan_tci =
1178                                 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1179                 } else {
1180                         first_seg->vlan_tci =
1181                                 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1182                 }
1183                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1184                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1185                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1186                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1187                 first_seg->ol_flags = pkt_flags;
1188                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1189                                         lower.lo_dword.hs_rss.pkt_info);
1190
1191                 /* Prefetch data of first segment, if configured to do so. */
1192                 rte_packet_prefetch((char *)first_seg->buf_addr +
1193                         first_seg->data_off);
1194
1195                 /*
1196                  * Store the mbuf address into the next entry of the array
1197                  * of returned packets.
1198                  */
1199                 rx_pkts[nb_rx++] = first_seg;
1200
1201                 /*
1202                  * Setup receipt context for a new packet.
1203                  */
1204                 first_seg = NULL;
1205         }
1206
1207         /*
1208          * Record index of the next RX descriptor to probe.
1209          */
1210         rxq->rx_tail = rx_id;
1211
1212         /*
1213          * Save receive context.
1214          */
1215         rxq->pkt_first_seg = first_seg;
1216         rxq->pkt_last_seg = last_seg;
1217
1218         /*
1219          * If the number of free RX descriptors is greater than the RX free
1220          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1221          * register.
1222          * Update the RDT with the value of the last processed RX descriptor
1223          * minus 1, to guarantee that the RDT register is never equal to the
1224          * RDH register, which creates a "full" ring situtation from the
1225          * hardware point of view...
1226          */
1227         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1228         if (nb_hold > rxq->rx_free_thresh) {
1229                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1230                            "nb_hold=%u nb_rx=%u",
1231                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1232                            (unsigned) rx_id, (unsigned) nb_hold,
1233                            (unsigned) nb_rx);
1234                 rx_id = (uint16_t) ((rx_id == 0) ?
1235                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1236                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1237                 nb_hold = 0;
1238         }
1239         rxq->nb_rx_hold = nb_hold;
1240         return nb_rx;
1241 }
1242
1243 /*
1244  * Maximum number of Ring Descriptors.
1245  *
1246  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1247  * desscriptors should meet the following condition:
1248  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1249  */
1250
1251 static void
1252 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1253 {
1254         unsigned i;
1255
1256         if (txq->sw_ring != NULL) {
1257                 for (i = 0; i < txq->nb_tx_desc; i++) {
1258                         if (txq->sw_ring[i].mbuf != NULL) {
1259                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1260                                 txq->sw_ring[i].mbuf = NULL;
1261                         }
1262                 }
1263         }
1264 }
1265
1266 static void
1267 igb_tx_queue_release(struct igb_tx_queue *txq)
1268 {
1269         if (txq != NULL) {
1270                 igb_tx_queue_release_mbufs(txq);
1271                 rte_free(txq->sw_ring);
1272                 rte_free(txq);
1273         }
1274 }
1275
1276 void
1277 eth_igb_tx_queue_release(void *txq)
1278 {
1279         igb_tx_queue_release(txq);
1280 }
1281
1282 static int
1283 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1284 {
1285         struct igb_tx_entry *sw_ring;
1286         volatile union e1000_adv_tx_desc *txr;
1287         uint16_t tx_first; /* First segment analyzed. */
1288         uint16_t tx_id;    /* Current segment being processed. */
1289         uint16_t tx_last;  /* Last segment in the current packet. */
1290         uint16_t tx_next;  /* First segment of the next packet. */
1291         int count;
1292
1293         if (txq != NULL) {
1294                 count = 0;
1295                 sw_ring = txq->sw_ring;
1296                 txr = txq->tx_ring;
1297
1298                 /*
1299                  * tx_tail is the last sent packet on the sw_ring. Goto the end
1300                  * of that packet (the last segment in the packet chain) and
1301                  * then the next segment will be the start of the oldest segment
1302                  * in the sw_ring. This is the first packet that will be
1303                  * attempted to be freed.
1304                  */
1305
1306                 /* Get last segment in most recently added packet. */
1307                 tx_first = sw_ring[txq->tx_tail].last_id;
1308
1309                 /* Get the next segment, which is the oldest segment in ring. */
1310                 tx_first = sw_ring[tx_first].next_id;
1311
1312                 /* Set the current index to the first. */
1313                 tx_id = tx_first;
1314
1315                 /*
1316                  * Loop through each packet. For each packet, verify that an
1317                  * mbuf exists and that the last segment is free. If so, free
1318                  * it and move on.
1319                  */
1320                 while (1) {
1321                         tx_last = sw_ring[tx_id].last_id;
1322
1323                         if (sw_ring[tx_last].mbuf) {
1324                                 if (txr[tx_last].wb.status &
1325                                                 E1000_TXD_STAT_DD) {
1326                                         /*
1327                                          * Increment the number of packets
1328                                          * freed.
1329                                          */
1330                                         count++;
1331
1332                                         /* Get the start of the next packet. */
1333                                         tx_next = sw_ring[tx_last].next_id;
1334
1335                                         /*
1336                                          * Loop through all segments in a
1337                                          * packet.
1338                                          */
1339                                         do {
1340                                                 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1341                                                 sw_ring[tx_id].mbuf = NULL;
1342                                                 sw_ring[tx_id].last_id = tx_id;
1343
1344                                                 /* Move to next segemnt. */
1345                                                 tx_id = sw_ring[tx_id].next_id;
1346
1347                                         } while (tx_id != tx_next);
1348
1349                                         if (unlikely(count == (int)free_cnt))
1350                                                 break;
1351                                 } else
1352                                         /*
1353                                          * mbuf still in use, nothing left to
1354                                          * free.
1355                                          */
1356                                         break;
1357                         } else {
1358                                 /*
1359                                  * There are multiple reasons to be here:
1360                                  * 1) All the packets on the ring have been
1361                                  *    freed - tx_id is equal to tx_first
1362                                  *    and some packets have been freed.
1363                                  *    - Done, exit
1364                                  * 2) Interfaces has not sent a rings worth of
1365                                  *    packets yet, so the segment after tail is
1366                                  *    still empty. Or a previous call to this
1367                                  *    function freed some of the segments but
1368                                  *    not all so there is a hole in the list.
1369                                  *    Hopefully this is a rare case.
1370                                  *    - Walk the list and find the next mbuf. If
1371                                  *      there isn't one, then done.
1372                                  */
1373                                 if (likely((tx_id == tx_first) && (count != 0)))
1374                                         break;
1375
1376                                 /*
1377                                  * Walk the list and find the next mbuf, if any.
1378                                  */
1379                                 do {
1380                                         /* Move to next segemnt. */
1381                                         tx_id = sw_ring[tx_id].next_id;
1382
1383                                         if (sw_ring[tx_id].mbuf)
1384                                                 break;
1385
1386                                 } while (tx_id != tx_first);
1387
1388                                 /*
1389                                  * Determine why previous loop bailed. If there
1390                                  * is not an mbuf, done.
1391                                  */
1392                                 if (sw_ring[tx_id].mbuf == NULL)
1393                                         break;
1394                         }
1395                 }
1396         } else
1397                 count = -ENODEV;
1398
1399         return count;
1400 }
1401
1402 int
1403 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1404 {
1405         return igb_tx_done_cleanup(txq, free_cnt);
1406 }
1407
1408 static void
1409 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1410 {
1411         txq->tx_head = 0;
1412         txq->tx_tail = 0;
1413         txq->ctx_curr = 0;
1414         memset((void*)&txq->ctx_cache, 0,
1415                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1416 }
1417
1418 static void
1419 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1420 {
1421         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1422         struct igb_tx_entry *txe = txq->sw_ring;
1423         uint16_t i, prev;
1424         struct e1000_hw *hw;
1425
1426         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427         /* Zero out HW ring memory */
1428         for (i = 0; i < txq->nb_tx_desc; i++) {
1429                 txq->tx_ring[i] = zeroed_desc;
1430         }
1431
1432         /* Initialize ring entries */
1433         prev = (uint16_t)(txq->nb_tx_desc - 1);
1434         for (i = 0; i < txq->nb_tx_desc; i++) {
1435                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1436
1437                 txd->wb.status = E1000_TXD_STAT_DD;
1438                 txe[i].mbuf = NULL;
1439                 txe[i].last_id = i;
1440                 txe[prev].next_id = i;
1441                 prev = i;
1442         }
1443
1444         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1445         /* 82575 specific, each tx queue will use 2 hw contexts */
1446         if (hw->mac.type == e1000_82575)
1447                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1448
1449         igb_reset_tx_queue_stat(txq);
1450 }
1451
1452 uint64_t
1453 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1454 {
1455         uint64_t rx_offload_capa;
1456
1457         RTE_SET_USED(dev);
1458         rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1459                           DEV_TX_OFFLOAD_IPV4_CKSUM  |
1460                           DEV_TX_OFFLOAD_UDP_CKSUM   |
1461                           DEV_TX_OFFLOAD_TCP_CKSUM   |
1462                           DEV_TX_OFFLOAD_SCTP_CKSUM  |
1463                           DEV_TX_OFFLOAD_TCP_TSO;
1464
1465         return rx_offload_capa;
1466 }
1467
1468 uint64_t
1469 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1470 {
1471         uint64_t rx_queue_offload_capa;
1472
1473         rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1474
1475         return rx_queue_offload_capa;
1476 }
1477
1478 static int
1479 igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
1480 {
1481         uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
1482         uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
1483         uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
1484
1485         if ((requested & (queue_supported | port_supported)) != requested)
1486                 return 0;
1487
1488         if ((port_offloads ^ requested) & port_supported)
1489                 return 0;
1490
1491         return 1;
1492 }
1493
1494 int
1495 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1496                          uint16_t queue_idx,
1497                          uint16_t nb_desc,
1498                          unsigned int socket_id,
1499                          const struct rte_eth_txconf *tx_conf)
1500 {
1501         const struct rte_memzone *tz;
1502         struct igb_tx_queue *txq;
1503         struct e1000_hw     *hw;
1504         uint32_t size;
1505
1506         if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
1507                 PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
1508                         " don't match port offloads 0x%" PRIx64
1509                         " or supported port offloads 0x%" PRIx64
1510                         " or supported queue offloads 0x%" PRIx64,
1511                         (void *)dev,
1512                         tx_conf->offloads,
1513                         dev->data->dev_conf.txmode.offloads,
1514                         igb_get_tx_port_offloads_capa(dev),
1515                         igb_get_tx_queue_offloads_capa(dev));
1516                 return -ENOTSUP;
1517         }
1518
1519         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1520
1521         /*
1522          * Validate number of transmit descriptors.
1523          * It must not exceed hardware maximum, and must be multiple
1524          * of E1000_ALIGN.
1525          */
1526         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1527                         (nb_desc > E1000_MAX_RING_DESC) ||
1528                         (nb_desc < E1000_MIN_RING_DESC)) {
1529                 return -EINVAL;
1530         }
1531
1532         /*
1533          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1534          * driver.
1535          */
1536         if (tx_conf->tx_free_thresh != 0)
1537                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1538                              "used for the 1G driver.");
1539         if (tx_conf->tx_rs_thresh != 0)
1540                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1541                              "used for the 1G driver.");
1542         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1543                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1544                              "consider setting the TX WTHRESH value to 4, 8, "
1545                              "or 16.");
1546
1547         /* Free memory prior to re-allocation if needed */
1548         if (dev->data->tx_queues[queue_idx] != NULL) {
1549                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1550                 dev->data->tx_queues[queue_idx] = NULL;
1551         }
1552
1553         /* First allocate the tx queue data structure */
1554         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1555                                                         RTE_CACHE_LINE_SIZE);
1556         if (txq == NULL)
1557                 return -ENOMEM;
1558
1559         /*
1560          * Allocate TX ring hardware descriptors. A memzone large enough to
1561          * handle the maximum ring size is allocated in order to allow for
1562          * resizing in later calls to the queue setup function.
1563          */
1564         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1565         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1566                                       E1000_ALIGN, socket_id);
1567         if (tz == NULL) {
1568                 igb_tx_queue_release(txq);
1569                 return -ENOMEM;
1570         }
1571
1572         txq->nb_tx_desc = nb_desc;
1573         txq->pthresh = tx_conf->tx_thresh.pthresh;
1574         txq->hthresh = tx_conf->tx_thresh.hthresh;
1575         txq->wthresh = tx_conf->tx_thresh.wthresh;
1576         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1577                 txq->wthresh = 1;
1578         txq->queue_id = queue_idx;
1579         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1580                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1581         txq->port_id = dev->data->port_id;
1582
1583         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1584         txq->tx_ring_phys_addr = tz->iova;
1585
1586         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1587         /* Allocate software ring */
1588         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1589                                    sizeof(struct igb_tx_entry) * nb_desc,
1590                                    RTE_CACHE_LINE_SIZE);
1591         if (txq->sw_ring == NULL) {
1592                 igb_tx_queue_release(txq);
1593                 return -ENOMEM;
1594         }
1595         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1596                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1597
1598         igb_reset_tx_queue(txq, dev);
1599         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1600         dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1601         dev->data->tx_queues[queue_idx] = txq;
1602         txq->offloads = tx_conf->offloads;
1603
1604         return 0;
1605 }
1606
1607 static void
1608 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1609 {
1610         unsigned i;
1611
1612         if (rxq->sw_ring != NULL) {
1613                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1614                         if (rxq->sw_ring[i].mbuf != NULL) {
1615                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1616                                 rxq->sw_ring[i].mbuf = NULL;
1617                         }
1618                 }
1619         }
1620 }
1621
1622 static void
1623 igb_rx_queue_release(struct igb_rx_queue *rxq)
1624 {
1625         if (rxq != NULL) {
1626                 igb_rx_queue_release_mbufs(rxq);
1627                 rte_free(rxq->sw_ring);
1628                 rte_free(rxq);
1629         }
1630 }
1631
1632 void
1633 eth_igb_rx_queue_release(void *rxq)
1634 {
1635         igb_rx_queue_release(rxq);
1636 }
1637
1638 static void
1639 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1640 {
1641         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1642         unsigned i;
1643
1644         /* Zero out HW ring memory */
1645         for (i = 0; i < rxq->nb_rx_desc; i++) {
1646                 rxq->rx_ring[i] = zeroed_desc;
1647         }
1648
1649         rxq->rx_tail = 0;
1650         rxq->pkt_first_seg = NULL;
1651         rxq->pkt_last_seg = NULL;
1652 }
1653
1654 uint64_t
1655 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1656 {
1657         uint64_t rx_offload_capa;
1658
1659         RTE_SET_USED(dev);
1660         rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
1661                           DEV_RX_OFFLOAD_VLAN_FILTER |
1662                           DEV_RX_OFFLOAD_IPV4_CKSUM  |
1663                           DEV_RX_OFFLOAD_UDP_CKSUM   |
1664                           DEV_RX_OFFLOAD_TCP_CKSUM   |
1665                           DEV_RX_OFFLOAD_JUMBO_FRAME |
1666                           DEV_RX_OFFLOAD_CRC_STRIP   |
1667                           DEV_RX_OFFLOAD_SCATTER;
1668
1669         return rx_offload_capa;
1670 }
1671
1672 uint64_t
1673 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1674 {
1675         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1676         uint64_t rx_queue_offload_capa;
1677
1678         switch (hw->mac.type) {
1679         case e1000_vfadapt_i350:
1680                 /*
1681                  * As only one Rx queue can be used, let per queue offloading
1682                  * capability be same to per port queue offloading capability
1683                  * for better convenience.
1684                  */
1685                 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1686                 break;
1687         default:
1688                 rx_queue_offload_capa = 0;
1689         }
1690         return rx_queue_offload_capa;
1691 }
1692
1693 static int
1694 igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
1695 {
1696         uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
1697         uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
1698         uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
1699
1700         if ((requested & (queue_supported | port_supported)) != requested)
1701                 return 0;
1702
1703         if ((port_offloads ^ requested) & port_supported)
1704                 return 0;
1705
1706         return 1;
1707 }
1708
1709 int
1710 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1711                          uint16_t queue_idx,
1712                          uint16_t nb_desc,
1713                          unsigned int socket_id,
1714                          const struct rte_eth_rxconf *rx_conf,
1715                          struct rte_mempool *mp)
1716 {
1717         const struct rte_memzone *rz;
1718         struct igb_rx_queue *rxq;
1719         struct e1000_hw     *hw;
1720         unsigned int size;
1721
1722         if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
1723                 PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
1724                         " don't match port offloads 0x%" PRIx64
1725                         " or supported port offloads 0x%" PRIx64
1726                         " or supported queue offloads 0x%" PRIx64,
1727                         (void *)dev,
1728                         rx_conf->offloads,
1729                         dev->data->dev_conf.rxmode.offloads,
1730                         igb_get_rx_port_offloads_capa(dev),
1731                         igb_get_rx_queue_offloads_capa(dev));
1732                 return -ENOTSUP;
1733         }
1734
1735         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1736
1737         /*
1738          * Validate number of receive descriptors.
1739          * It must not exceed hardware maximum, and must be multiple
1740          * of E1000_ALIGN.
1741          */
1742         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1743                         (nb_desc > E1000_MAX_RING_DESC) ||
1744                         (nb_desc < E1000_MIN_RING_DESC)) {
1745                 return -EINVAL;
1746         }
1747
1748         /* Free memory prior to re-allocation if needed */
1749         if (dev->data->rx_queues[queue_idx] != NULL) {
1750                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1751                 dev->data->rx_queues[queue_idx] = NULL;
1752         }
1753
1754         /* First allocate the RX queue data structure. */
1755         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1756                           RTE_CACHE_LINE_SIZE);
1757         if (rxq == NULL)
1758                 return -ENOMEM;
1759         rxq->offloads = rx_conf->offloads;
1760         rxq->mb_pool = mp;
1761         rxq->nb_rx_desc = nb_desc;
1762         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1763         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1764         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1765         if (rxq->wthresh > 0 &&
1766             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1767                 rxq->wthresh = 1;
1768         rxq->drop_en = rx_conf->rx_drop_en;
1769         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1770         rxq->queue_id = queue_idx;
1771         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1772                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1773         rxq->port_id = dev->data->port_id;
1774         rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
1775                         DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
1776
1777         /*
1778          *  Allocate RX ring hardware descriptors. A memzone large enough to
1779          *  handle the maximum ring size is allocated in order to allow for
1780          *  resizing in later calls to the queue setup function.
1781          */
1782         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1783         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1784                                       E1000_ALIGN, socket_id);
1785         if (rz == NULL) {
1786                 igb_rx_queue_release(rxq);
1787                 return -ENOMEM;
1788         }
1789         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1790         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1791         rxq->rx_ring_phys_addr = rz->iova;
1792         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1793
1794         /* Allocate software ring. */
1795         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1796                                    sizeof(struct igb_rx_entry) * nb_desc,
1797                                    RTE_CACHE_LINE_SIZE);
1798         if (rxq->sw_ring == NULL) {
1799                 igb_rx_queue_release(rxq);
1800                 return -ENOMEM;
1801         }
1802         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1803                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1804
1805         dev->data->rx_queues[queue_idx] = rxq;
1806         igb_reset_rx_queue(rxq);
1807
1808         return 0;
1809 }
1810
1811 uint32_t
1812 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1813 {
1814 #define IGB_RXQ_SCAN_INTERVAL 4
1815         volatile union e1000_adv_rx_desc *rxdp;
1816         struct igb_rx_queue *rxq;
1817         uint32_t desc = 0;
1818
1819         rxq = dev->data->rx_queues[rx_queue_id];
1820         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1821
1822         while ((desc < rxq->nb_rx_desc) &&
1823                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1824                 desc += IGB_RXQ_SCAN_INTERVAL;
1825                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1826                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1827                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1828                                 desc - rxq->nb_rx_desc]);
1829         }
1830
1831         return desc;
1832 }
1833
1834 int
1835 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1836 {
1837         volatile union e1000_adv_rx_desc *rxdp;
1838         struct igb_rx_queue *rxq = rx_queue;
1839         uint32_t desc;
1840
1841         if (unlikely(offset >= rxq->nb_rx_desc))
1842                 return 0;
1843         desc = rxq->rx_tail + offset;
1844         if (desc >= rxq->nb_rx_desc)
1845                 desc -= rxq->nb_rx_desc;
1846
1847         rxdp = &rxq->rx_ring[desc];
1848         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1849 }
1850
1851 int
1852 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1853 {
1854         struct igb_rx_queue *rxq = rx_queue;
1855         volatile uint32_t *status;
1856         uint32_t desc;
1857
1858         if (unlikely(offset >= rxq->nb_rx_desc))
1859                 return -EINVAL;
1860
1861         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1862                 return RTE_ETH_RX_DESC_UNAVAIL;
1863
1864         desc = rxq->rx_tail + offset;
1865         if (desc >= rxq->nb_rx_desc)
1866                 desc -= rxq->nb_rx_desc;
1867
1868         status = &rxq->rx_ring[desc].wb.upper.status_error;
1869         if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1870                 return RTE_ETH_RX_DESC_DONE;
1871
1872         return RTE_ETH_RX_DESC_AVAIL;
1873 }
1874
1875 int
1876 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1877 {
1878         struct igb_tx_queue *txq = tx_queue;
1879         volatile uint32_t *status;
1880         uint32_t desc;
1881
1882         if (unlikely(offset >= txq->nb_tx_desc))
1883                 return -EINVAL;
1884
1885         desc = txq->tx_tail + offset;
1886         if (desc >= txq->nb_tx_desc)
1887                 desc -= txq->nb_tx_desc;
1888
1889         status = &txq->tx_ring[desc].wb.status;
1890         if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1891                 return RTE_ETH_TX_DESC_DONE;
1892
1893         return RTE_ETH_TX_DESC_FULL;
1894 }
1895
1896 void
1897 igb_dev_clear_queues(struct rte_eth_dev *dev)
1898 {
1899         uint16_t i;
1900         struct igb_tx_queue *txq;
1901         struct igb_rx_queue *rxq;
1902
1903         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1904                 txq = dev->data->tx_queues[i];
1905                 if (txq != NULL) {
1906                         igb_tx_queue_release_mbufs(txq);
1907                         igb_reset_tx_queue(txq, dev);
1908                 }
1909         }
1910
1911         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1912                 rxq = dev->data->rx_queues[i];
1913                 if (rxq != NULL) {
1914                         igb_rx_queue_release_mbufs(rxq);
1915                         igb_reset_rx_queue(rxq);
1916                 }
1917         }
1918 }
1919
1920 void
1921 igb_dev_free_queues(struct rte_eth_dev *dev)
1922 {
1923         uint16_t i;
1924
1925         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1926                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1927                 dev->data->rx_queues[i] = NULL;
1928         }
1929         dev->data->nb_rx_queues = 0;
1930
1931         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1932                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1933                 dev->data->tx_queues[i] = NULL;
1934         }
1935         dev->data->nb_tx_queues = 0;
1936 }
1937
1938 /**
1939  * Receive Side Scaling (RSS).
1940  * See section 7.1.1.7 in the following document:
1941  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1942  *
1943  * Principles:
1944  * The source and destination IP addresses of the IP header and the source and
1945  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1946  * against a configurable random key to compute a 32-bit RSS hash result.
1947  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1948  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1949  * RSS output index which is used as the RX queue index where to store the
1950  * received packets.
1951  * The following output is supplied in the RX write-back descriptor:
1952  *     - 32-bit result of the Microsoft RSS hash function,
1953  *     - 4-bit RSS type field.
1954  */
1955
1956 /*
1957  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1958  * Used as the default key.
1959  */
1960 static uint8_t rss_intel_key[40] = {
1961         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1962         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1963         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1964         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1965         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1966 };
1967
1968 static void
1969 igb_rss_disable(struct rte_eth_dev *dev)
1970 {
1971         struct e1000_hw *hw;
1972         uint32_t mrqc;
1973
1974         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1975         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1976         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1977         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1978 }
1979
1980 static void
1981 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1982 {
1983         uint8_t  *hash_key;
1984         uint32_t rss_key;
1985         uint32_t mrqc;
1986         uint64_t rss_hf;
1987         uint16_t i;
1988
1989         hash_key = rss_conf->rss_key;
1990         if (hash_key != NULL) {
1991                 /* Fill in RSS hash key */
1992                 for (i = 0; i < 10; i++) {
1993                         rss_key  = hash_key[(i * 4)];
1994                         rss_key |= hash_key[(i * 4) + 1] << 8;
1995                         rss_key |= hash_key[(i * 4) + 2] << 16;
1996                         rss_key |= hash_key[(i * 4) + 3] << 24;
1997                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1998                 }
1999         }
2000
2001         /* Set configured hashing protocols in MRQC register */
2002         rss_hf = rss_conf->rss_hf;
2003         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
2004         if (rss_hf & ETH_RSS_IPV4)
2005                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
2006         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2007                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
2008         if (rss_hf & ETH_RSS_IPV6)
2009                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
2010         if (rss_hf & ETH_RSS_IPV6_EX)
2011                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
2012         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2013                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
2014         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2015                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2016         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2017                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2018         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2019                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2020         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2021                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
2022         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2023 }
2024
2025 int
2026 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
2027                         struct rte_eth_rss_conf *rss_conf)
2028 {
2029         struct e1000_hw *hw;
2030         uint32_t mrqc;
2031         uint64_t rss_hf;
2032
2033         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2034
2035         /*
2036          * Before changing anything, first check that the update RSS operation
2037          * does not attempt to disable RSS, if RSS was enabled at
2038          * initialization time, or does not attempt to enable RSS, if RSS was
2039          * disabled at initialization time.
2040          */
2041         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
2042         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2043         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
2044                 if (rss_hf != 0) /* Enable RSS */
2045                         return -(EINVAL);
2046                 return 0; /* Nothing to do */
2047         }
2048         /* RSS enabled */
2049         if (rss_hf == 0) /* Disable RSS */
2050                 return -(EINVAL);
2051         igb_hw_rss_hash_set(hw, rss_conf);
2052         return 0;
2053 }
2054
2055 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2056                               struct rte_eth_rss_conf *rss_conf)
2057 {
2058         struct e1000_hw *hw;
2059         uint8_t *hash_key;
2060         uint32_t rss_key;
2061         uint32_t mrqc;
2062         uint64_t rss_hf;
2063         uint16_t i;
2064
2065         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2066         hash_key = rss_conf->rss_key;
2067         if (hash_key != NULL) {
2068                 /* Return RSS hash key */
2069                 for (i = 0; i < 10; i++) {
2070                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2071                         hash_key[(i * 4)] = rss_key & 0x000000FF;
2072                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2073                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2074                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2075                 }
2076         }
2077
2078         /* Get RSS functions configured in MRQC register */
2079         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2080         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2081                 rss_conf->rss_hf = 0;
2082                 return 0;
2083         }
2084         rss_hf = 0;
2085         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2086                 rss_hf |= ETH_RSS_IPV4;
2087         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2088                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2089         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2090                 rss_hf |= ETH_RSS_IPV6;
2091         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2092                 rss_hf |= ETH_RSS_IPV6_EX;
2093         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2094                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2095         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2096                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2097         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2098                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2099         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2100                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2101         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2102                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2103         rss_conf->rss_hf = rss_hf;
2104         return 0;
2105 }
2106
2107 static void
2108 igb_rss_configure(struct rte_eth_dev *dev)
2109 {
2110         struct rte_eth_rss_conf rss_conf;
2111         struct e1000_hw *hw;
2112         uint32_t shift;
2113         uint16_t i;
2114
2115         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2116
2117         /* Fill in redirection table. */
2118         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2119         for (i = 0; i < 128; i++) {
2120                 union e1000_reta {
2121                         uint32_t dword;
2122                         uint8_t  bytes[4];
2123                 } reta;
2124                 uint8_t q_idx;
2125
2126                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2127                                    i % dev->data->nb_rx_queues : 0);
2128                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2129                 if ((i & 3) == 3)
2130                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2131         }
2132
2133         /*
2134          * Configure the RSS key and the RSS protocols used to compute
2135          * the RSS hash of input packets.
2136          */
2137         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2138         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2139                 igb_rss_disable(dev);
2140                 return;
2141         }
2142         if (rss_conf.rss_key == NULL)
2143                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2144         igb_hw_rss_hash_set(hw, &rss_conf);
2145 }
2146
2147 /*
2148  * Check if the mac type support VMDq or not.
2149  * Return 1 if it supports, otherwise, return 0.
2150  */
2151 static int
2152 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2153 {
2154         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155
2156         switch (hw->mac.type) {
2157         case e1000_82576:
2158         case e1000_82580:
2159         case e1000_i350:
2160                 return 1;
2161         case e1000_82540:
2162         case e1000_82541:
2163         case e1000_82542:
2164         case e1000_82543:
2165         case e1000_82544:
2166         case e1000_82545:
2167         case e1000_82546:
2168         case e1000_82547:
2169         case e1000_82571:
2170         case e1000_82572:
2171         case e1000_82573:
2172         case e1000_82574:
2173         case e1000_82583:
2174         case e1000_i210:
2175         case e1000_i211:
2176         default:
2177                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2178                 return 0;
2179         }
2180 }
2181
2182 static int
2183 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2184 {
2185         struct rte_eth_vmdq_rx_conf *cfg;
2186         struct e1000_hw *hw;
2187         uint32_t mrqc, vt_ctl, vmolr, rctl;
2188         int i;
2189
2190         PMD_INIT_FUNC_TRACE();
2191
2192         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2193         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2194
2195         /* Check if mac type can support VMDq, return value of 0 means NOT support */
2196         if (igb_is_vmdq_supported(dev) == 0)
2197                 return -1;
2198
2199         igb_rss_disable(dev);
2200
2201         /* RCTL: eanble VLAN filter */
2202         rctl = E1000_READ_REG(hw, E1000_RCTL);
2203         rctl |= E1000_RCTL_VFE;
2204         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2205
2206         /* MRQC: enable vmdq */
2207         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2208         mrqc |= E1000_MRQC_ENABLE_VMDQ;
2209         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2210
2211         /* VTCTL:  pool selection according to VLAN tag */
2212         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2213         if (cfg->enable_default_pool)
2214                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2215         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2216         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2217
2218         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2219                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2220                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2221                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2222                         E1000_VMOLR_MPME);
2223
2224                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2225                         vmolr |= E1000_VMOLR_AUPE;
2226                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2227                         vmolr |= E1000_VMOLR_ROMPE;
2228                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2229                         vmolr |= E1000_VMOLR_ROPE;
2230                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2231                         vmolr |= E1000_VMOLR_BAM;
2232                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2233                         vmolr |= E1000_VMOLR_MPME;
2234
2235                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2236         }
2237
2238         /*
2239          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2240          * Both 82576 and 82580 support it
2241          */
2242         if (hw->mac.type != e1000_i350) {
2243                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2244                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2245                         vmolr |= E1000_VMOLR_STRVLAN;
2246                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2247                 }
2248         }
2249
2250         /* VFTA - enable all vlan filters */
2251         for (i = 0; i < IGB_VFTA_SIZE; i++)
2252                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2253
2254         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2255         if (hw->mac.type != e1000_82580)
2256                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2257
2258         /*
2259          * RAH/RAL - allow pools to read specific mac addresses
2260          * In this case, all pools should be able to read from mac addr 0
2261          */
2262         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2263         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2264
2265         /* VLVF: set up filters for vlan tags as configured */
2266         for (i = 0; i < cfg->nb_pool_maps; i++) {
2267                 /* set vlan id in VF register and set the valid bit */
2268                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2269                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2270                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2271                         E1000_VLVF_POOLSEL_MASK)));
2272         }
2273
2274         E1000_WRITE_FLUSH(hw);
2275
2276         return 0;
2277 }
2278
2279
2280 /*********************************************************************
2281  *
2282  *  Enable receive unit.
2283  *
2284  **********************************************************************/
2285
2286 static int
2287 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2288 {
2289         struct igb_rx_entry *rxe = rxq->sw_ring;
2290         uint64_t dma_addr;
2291         unsigned i;
2292
2293         /* Initialize software ring entries. */
2294         for (i = 0; i < rxq->nb_rx_desc; i++) {
2295                 volatile union e1000_adv_rx_desc *rxd;
2296                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2297
2298                 if (mbuf == NULL) {
2299                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2300                                      "queue_id=%hu", rxq->queue_id);
2301                         return -ENOMEM;
2302                 }
2303                 dma_addr =
2304                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2305                 rxd = &rxq->rx_ring[i];
2306                 rxd->read.hdr_addr = 0;
2307                 rxd->read.pkt_addr = dma_addr;
2308                 rxe[i].mbuf = mbuf;
2309         }
2310
2311         return 0;
2312 }
2313
2314 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2315 static int
2316 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2317 {
2318         struct e1000_hw *hw =
2319                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2320         uint32_t mrqc;
2321
2322         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2323                 /*
2324                  * SRIOV active scheme
2325                  * FIXME if support RSS together with VMDq & SRIOV
2326                  */
2327                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2328                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2329                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2330                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2331         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2332                 /*
2333                  * SRIOV inactive scheme
2334                  */
2335                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2336                         case ETH_MQ_RX_RSS:
2337                                 igb_rss_configure(dev);
2338                                 break;
2339                         case ETH_MQ_RX_VMDQ_ONLY:
2340                                 /*Configure general VMDQ only RX parameters*/
2341                                 igb_vmdq_rx_hw_configure(dev);
2342                                 break;
2343                         case ETH_MQ_RX_NONE:
2344                                 /* if mq_mode is none, disable rss mode.*/
2345                         default:
2346                                 igb_rss_disable(dev);
2347                                 break;
2348                 }
2349         }
2350
2351         return 0;
2352 }
2353
2354 int
2355 eth_igb_rx_init(struct rte_eth_dev *dev)
2356 {
2357         struct rte_eth_rxmode *rxmode;
2358         struct e1000_hw     *hw;
2359         struct igb_rx_queue *rxq;
2360         uint32_t rctl;
2361         uint32_t rxcsum;
2362         uint32_t srrctl;
2363         uint16_t buf_size;
2364         uint16_t rctl_bsize;
2365         uint16_t i;
2366         int ret;
2367
2368         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2369         srrctl = 0;
2370
2371         /*
2372          * Make sure receives are disabled while setting
2373          * up the descriptor ring.
2374          */
2375         rctl = E1000_READ_REG(hw, E1000_RCTL);
2376         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2377
2378         rxmode = &dev->data->dev_conf.rxmode;
2379
2380         /*
2381          * Configure support of jumbo frames, if any.
2382          */
2383         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2384                 rctl |= E1000_RCTL_LPE;
2385
2386                 /*
2387                  * Set maximum packet length by default, and might be updated
2388                  * together with enabling/disabling dual VLAN.
2389                  */
2390                 E1000_WRITE_REG(hw, E1000_RLPML,
2391                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2392                                                 VLAN_TAG_SIZE);
2393         } else
2394                 rctl &= ~E1000_RCTL_LPE;
2395
2396         /* Configure and enable each RX queue. */
2397         rctl_bsize = 0;
2398         dev->rx_pkt_burst = eth_igb_recv_pkts;
2399         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2400                 uint64_t bus_addr;
2401                 uint32_t rxdctl;
2402
2403                 rxq = dev->data->rx_queues[i];
2404
2405                 rxq->flags = 0;
2406                 /*
2407                  * i350 and i354 vlan packets have vlan tags byte swapped.
2408                  */
2409                 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2410                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2411                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2412                 } else {
2413                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2414                 }
2415
2416                 /* Allocate buffers for descriptor rings and set up queue */
2417                 ret = igb_alloc_rx_queue_mbufs(rxq);
2418                 if (ret)
2419                         return ret;
2420
2421                 /*
2422                  * Reset crc_len in case it was changed after queue setup by a
2423                  *  call to configure
2424                  */
2425                 rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
2426                                 DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
2427
2428                 bus_addr = rxq->rx_ring_phys_addr;
2429                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2430                                 rxq->nb_rx_desc *
2431                                 sizeof(union e1000_adv_rx_desc));
2432                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2433                                 (uint32_t)(bus_addr >> 32));
2434                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2435
2436                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2437
2438                 /*
2439                  * Configure RX buffer size.
2440                  */
2441                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2442                         RTE_PKTMBUF_HEADROOM);
2443                 if (buf_size >= 1024) {
2444                         /*
2445                          * Configure the BSIZEPACKET field of the SRRCTL
2446                          * register of the queue.
2447                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2448                          * If this field is equal to 0b, then RCTL.BSIZE
2449                          * determines the RX packet buffer size.
2450                          */
2451                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2452                                    E1000_SRRCTL_BSIZEPKT_MASK);
2453                         buf_size = (uint16_t) ((srrctl &
2454                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2455                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2456
2457                         /* It adds dual VLAN length for supporting dual VLAN */
2458                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2459                                                 2 * VLAN_TAG_SIZE) > buf_size){
2460                                 if (!dev->data->scattered_rx)
2461                                         PMD_INIT_LOG(DEBUG,
2462                                                      "forcing scatter mode");
2463                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2464                                 dev->data->scattered_rx = 1;
2465                         }
2466                 } else {
2467                         /*
2468                          * Use BSIZE field of the device RCTL register.
2469                          */
2470                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2471                                 rctl_bsize = buf_size;
2472                         if (!dev->data->scattered_rx)
2473                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2474                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2475                         dev->data->scattered_rx = 1;
2476                 }
2477
2478                 /* Set if packets are dropped when no descriptors available */
2479                 if (rxq->drop_en)
2480                         srrctl |= E1000_SRRCTL_DROP_EN;
2481
2482                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2483
2484                 /* Enable this RX queue. */
2485                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2486                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2487                 rxdctl &= 0xFFF00000;
2488                 rxdctl |= (rxq->pthresh & 0x1F);
2489                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2490                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2491                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2492         }
2493
2494         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2495                 if (!dev->data->scattered_rx)
2496                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2497                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2498                 dev->data->scattered_rx = 1;
2499         }
2500
2501         /*
2502          * Setup BSIZE field of RCTL register, if needed.
2503          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2504          * register, since the code above configures the SRRCTL register of
2505          * the RX queue in such a case.
2506          * All configurable sizes are:
2507          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2508          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2509          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2510          *  2048: rctl |= E1000_RCTL_SZ_2048;
2511          *  1024: rctl |= E1000_RCTL_SZ_1024;
2512          *   512: rctl |= E1000_RCTL_SZ_512;
2513          *   256: rctl |= E1000_RCTL_SZ_256;
2514          */
2515         if (rctl_bsize > 0) {
2516                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2517                         rctl |= E1000_RCTL_SZ_512;
2518                 else /* 256 <= buf_size < 512 - use 256 */
2519                         rctl |= E1000_RCTL_SZ_256;
2520         }
2521
2522         /*
2523          * Configure RSS if device configured with multiple RX queues.
2524          */
2525         igb_dev_mq_rx_configure(dev);
2526
2527         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2528         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2529
2530         /*
2531          * Setup the Checksum Register.
2532          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2533          */
2534         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2535         rxcsum |= E1000_RXCSUM_PCSD;
2536
2537         /* Enable both L3/L4 rx checksum offload */
2538         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2539                 rxcsum |= E1000_RXCSUM_IPOFL;
2540         else
2541                 rxcsum &= ~E1000_RXCSUM_IPOFL;
2542         if (rxmode->offloads &
2543                 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2544                 rxcsum |= E1000_RXCSUM_TUOFL;
2545         else
2546                 rxcsum &= ~E1000_RXCSUM_TUOFL;
2547         if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2548                 rxcsum |= E1000_RXCSUM_CRCOFL;
2549         else
2550                 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2551
2552         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2553
2554         /* Setup the Receive Control Register. */
2555         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
2556                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2557
2558                 /* set STRCRC bit in all queues */
2559                 if (hw->mac.type == e1000_i350 ||
2560                     hw->mac.type == e1000_i210 ||
2561                     hw->mac.type == e1000_i211 ||
2562                     hw->mac.type == e1000_i354) {
2563                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2564                                 rxq = dev->data->rx_queues[i];
2565                                 uint32_t dvmolr = E1000_READ_REG(hw,
2566                                         E1000_DVMOLR(rxq->reg_idx));
2567                                 dvmolr |= E1000_DVMOLR_STRCRC;
2568                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2569                         }
2570                 }
2571         } else {
2572                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2573
2574                 /* clear STRCRC bit in all queues */
2575                 if (hw->mac.type == e1000_i350 ||
2576                     hw->mac.type == e1000_i210 ||
2577                     hw->mac.type == e1000_i211 ||
2578                     hw->mac.type == e1000_i354) {
2579                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2580                                 rxq = dev->data->rx_queues[i];
2581                                 uint32_t dvmolr = E1000_READ_REG(hw,
2582                                         E1000_DVMOLR(rxq->reg_idx));
2583                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2584                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2585                         }
2586                 }
2587         }
2588
2589         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2590         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2591                 E1000_RCTL_RDMTS_HALF |
2592                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2593
2594         /* Make sure VLAN Filters are off. */
2595         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2596                 rctl &= ~E1000_RCTL_VFE;
2597         /* Don't store bad packets. */
2598         rctl &= ~E1000_RCTL_SBP;
2599
2600         /* Enable Receives. */
2601         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2602
2603         /*
2604          * Setup the HW Rx Head and Tail Descriptor Pointers.
2605          * This needs to be done after enable.
2606          */
2607         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2608                 rxq = dev->data->rx_queues[i];
2609                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2610                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2611         }
2612
2613         return 0;
2614 }
2615
2616 /*********************************************************************
2617  *
2618  *  Enable transmit unit.
2619  *
2620  **********************************************************************/
2621 void
2622 eth_igb_tx_init(struct rte_eth_dev *dev)
2623 {
2624         struct e1000_hw     *hw;
2625         struct igb_tx_queue *txq;
2626         uint32_t tctl;
2627         uint32_t txdctl;
2628         uint16_t i;
2629
2630         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2631
2632         /* Setup the Base and Length of the Tx Descriptor Rings. */
2633         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2634                 uint64_t bus_addr;
2635                 txq = dev->data->tx_queues[i];
2636                 bus_addr = txq->tx_ring_phys_addr;
2637
2638                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2639                                 txq->nb_tx_desc *
2640                                 sizeof(union e1000_adv_tx_desc));
2641                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2642                                 (uint32_t)(bus_addr >> 32));
2643                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2644
2645                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2646                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2647                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2648
2649                 /* Setup Transmit threshold registers. */
2650                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2651                 txdctl |= txq->pthresh & 0x1F;
2652                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2653                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2654                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2655                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2656         }
2657
2658         /* Program the Transmit Control Register. */
2659         tctl = E1000_READ_REG(hw, E1000_TCTL);
2660         tctl &= ~E1000_TCTL_CT;
2661         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2662                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2663
2664         e1000_config_collision_dist(hw);
2665
2666         /* This write will effectively turn on the transmit unit. */
2667         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2668 }
2669
2670 /*********************************************************************
2671  *
2672  *  Enable VF receive unit.
2673  *
2674  **********************************************************************/
2675 int
2676 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2677 {
2678         struct e1000_hw     *hw;
2679         struct igb_rx_queue *rxq;
2680         uint32_t srrctl;
2681         uint16_t buf_size;
2682         uint16_t rctl_bsize;
2683         uint16_t i;
2684         int ret;
2685
2686         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2687
2688         /* setup MTU */
2689         e1000_rlpml_set_vf(hw,
2690                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2691                 VLAN_TAG_SIZE));
2692
2693         /* Configure and enable each RX queue. */
2694         rctl_bsize = 0;
2695         dev->rx_pkt_burst = eth_igb_recv_pkts;
2696         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2697                 uint64_t bus_addr;
2698                 uint32_t rxdctl;
2699
2700                 rxq = dev->data->rx_queues[i];
2701
2702                 rxq->flags = 0;
2703                 /*
2704                  * i350VF LB vlan packets have vlan tags byte swapped.
2705                  */
2706                 if (hw->mac.type == e1000_vfadapt_i350) {
2707                         rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2708                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2709                 } else {
2710                         PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2711                 }
2712
2713                 /* Allocate buffers for descriptor rings and set up queue */
2714                 ret = igb_alloc_rx_queue_mbufs(rxq);
2715                 if (ret)
2716                         return ret;
2717
2718                 bus_addr = rxq->rx_ring_phys_addr;
2719                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2720                                 rxq->nb_rx_desc *
2721                                 sizeof(union e1000_adv_rx_desc));
2722                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2723                                 (uint32_t)(bus_addr >> 32));
2724                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2725
2726                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2727
2728                 /*
2729                  * Configure RX buffer size.
2730                  */
2731                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2732                         RTE_PKTMBUF_HEADROOM);
2733                 if (buf_size >= 1024) {
2734                         /*
2735                          * Configure the BSIZEPACKET field of the SRRCTL
2736                          * register of the queue.
2737                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2738                          * If this field is equal to 0b, then RCTL.BSIZE
2739                          * determines the RX packet buffer size.
2740                          */
2741                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2742                                    E1000_SRRCTL_BSIZEPKT_MASK);
2743                         buf_size = (uint16_t) ((srrctl &
2744                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2745                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2746
2747                         /* It adds dual VLAN length for supporting dual VLAN */
2748                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2749                                                 2 * VLAN_TAG_SIZE) > buf_size){
2750                                 if (!dev->data->scattered_rx)
2751                                         PMD_INIT_LOG(DEBUG,
2752                                                      "forcing scatter mode");
2753                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2754                                 dev->data->scattered_rx = 1;
2755                         }
2756                 } else {
2757                         /*
2758                          * Use BSIZE field of the device RCTL register.
2759                          */
2760                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2761                                 rctl_bsize = buf_size;
2762                         if (!dev->data->scattered_rx)
2763                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2764                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2765                         dev->data->scattered_rx = 1;
2766                 }
2767
2768                 /* Set if packets are dropped when no descriptors available */
2769                 if (rxq->drop_en)
2770                         srrctl |= E1000_SRRCTL_DROP_EN;
2771
2772                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2773
2774                 /* Enable this RX queue. */
2775                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2776                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2777                 rxdctl &= 0xFFF00000;
2778                 rxdctl |= (rxq->pthresh & 0x1F);
2779                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2780                 if (hw->mac.type == e1000_vfadapt) {
2781                         /*
2782                          * Workaround of 82576 VF Erratum
2783                          * force set WTHRESH to 1
2784                          * to avoid Write-Back not triggered sometimes
2785                          */
2786                         rxdctl |= 0x10000;
2787                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2788                 }
2789                 else
2790                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2791                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2792         }
2793
2794         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2795                 if (!dev->data->scattered_rx)
2796                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2797                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2798                 dev->data->scattered_rx = 1;
2799         }
2800
2801         /*
2802          * Setup the HW Rx Head and Tail Descriptor Pointers.
2803          * This needs to be done after enable.
2804          */
2805         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2806                 rxq = dev->data->rx_queues[i];
2807                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2808                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2809         }
2810
2811         return 0;
2812 }
2813
2814 /*********************************************************************
2815  *
2816  *  Enable VF transmit unit.
2817  *
2818  **********************************************************************/
2819 void
2820 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2821 {
2822         struct e1000_hw     *hw;
2823         struct igb_tx_queue *txq;
2824         uint32_t txdctl;
2825         uint16_t i;
2826
2827         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2828
2829         /* Setup the Base and Length of the Tx Descriptor Rings. */
2830         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2831                 uint64_t bus_addr;
2832
2833                 txq = dev->data->tx_queues[i];
2834                 bus_addr = txq->tx_ring_phys_addr;
2835                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2836                                 txq->nb_tx_desc *
2837                                 sizeof(union e1000_adv_tx_desc));
2838                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2839                                 (uint32_t)(bus_addr >> 32));
2840                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2841
2842                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2843                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2844                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2845
2846                 /* Setup Transmit threshold registers. */
2847                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2848                 txdctl |= txq->pthresh & 0x1F;
2849                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2850                 if (hw->mac.type == e1000_82576) {
2851                         /*
2852                          * Workaround of 82576 VF Erratum
2853                          * force set WTHRESH to 1
2854                          * to avoid Write-Back not triggered sometimes
2855                          */
2856                         txdctl |= 0x10000;
2857                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2858                 }
2859                 else
2860                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2861                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2862                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2863         }
2864
2865 }
2866
2867 void
2868 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2869         struct rte_eth_rxq_info *qinfo)
2870 {
2871         struct igb_rx_queue *rxq;
2872
2873         rxq = dev->data->rx_queues[queue_id];
2874
2875         qinfo->mp = rxq->mb_pool;
2876         qinfo->scattered_rx = dev->data->scattered_rx;
2877         qinfo->nb_desc = rxq->nb_rx_desc;
2878
2879         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2880         qinfo->conf.rx_drop_en = rxq->drop_en;
2881         qinfo->conf.offloads = rxq->offloads;
2882 }
2883
2884 void
2885 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2886         struct rte_eth_txq_info *qinfo)
2887 {
2888         struct igb_tx_queue *txq;
2889
2890         txq = dev->data->tx_queues[queue_id];
2891
2892         qinfo->nb_desc = txq->nb_tx_desc;
2893
2894         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2895         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2896         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2897         qinfo->conf.offloads = txq->offloads;
2898 }
2899
2900 int
2901 igb_config_rss_filter(struct rte_eth_dev *dev,
2902                 struct igb_rte_flow_rss_conf *conf, bool add)
2903 {
2904         uint32_t shift;
2905         uint16_t i, j;
2906         struct rte_eth_rss_conf rss_conf = conf->rss_conf;
2907         struct e1000_filter_info *filter_info =
2908                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2909         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2910
2911         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2912
2913         if (!add) {
2914                 if (memcmp(conf, &filter_info->rss_info,
2915                         sizeof(struct igb_rte_flow_rss_conf)) == 0) {
2916                         igb_rss_disable(dev);
2917                         memset(&filter_info->rss_info, 0,
2918                                 sizeof(struct igb_rte_flow_rss_conf));
2919                         return 0;
2920                 }
2921                 return -EINVAL;
2922         }
2923
2924         if (filter_info->rss_info.num)
2925                 return -EINVAL;
2926
2927         /* Fill in redirection table. */
2928         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2929         for (i = 0, j = 0; i < 128; i++, j++) {
2930                 union e1000_reta {
2931                         uint32_t dword;
2932                         uint8_t  bytes[4];
2933                 } reta;
2934                 uint8_t q_idx;
2935
2936                 if (j == conf->num)
2937                         j = 0;
2938                 q_idx = conf->queue[j];
2939                 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2940                 if ((i & 3) == 3)
2941                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2942         }
2943
2944         /* Configure the RSS key and the RSS protocols used to compute
2945          * the RSS hash of input packets.
2946          */
2947         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2948                 igb_rss_disable(dev);
2949                 return 0;
2950         }
2951         if (rss_conf.rss_key == NULL)
2952                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2953         igb_hw_rss_hash_set(hw, &rss_conf);
2954
2955         rte_memcpy(&filter_info->rss_info,
2956                 conf, sizeof(struct igb_rte_flow_rss_conf));
2957
2958         return 0;
2959 }