4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
81 static inline struct rte_mbuf *
82 rte_rxmbuf_alloc(struct rte_mempool *mp)
86 m = __rte_mbuf_raw_alloc(mp);
87 __rte_mbuf_sanity_check_raw(m, 0);
91 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
92 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
94 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
95 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
98 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct igb_rx_entry {
101 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
105 * Structure associated with each descriptor of the TX ring of a TX queue.
107 struct igb_tx_entry {
108 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
109 uint16_t next_id; /**< Index of next descriptor in ring. */
110 uint16_t last_id; /**< Index of last scattered descriptor. */
114 * Structure associated with each RX queue.
116 struct igb_rx_queue {
117 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
118 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
119 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
120 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
121 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
122 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
123 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
124 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
125 uint16_t nb_rx_desc; /**< number of RX descriptors. */
126 uint16_t rx_tail; /**< current value of RDT register. */
127 uint16_t nb_rx_hold; /**< number of held free RX desc. */
128 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
129 uint16_t queue_id; /**< RX queue index. */
130 uint16_t reg_idx; /**< RX queue register index. */
131 uint8_t port_id; /**< Device port identifier. */
132 uint8_t pthresh; /**< Prefetch threshold register. */
133 uint8_t hthresh; /**< Host threshold register. */
134 uint8_t wthresh; /**< Write-back threshold register. */
135 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
136 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
140 * Hardware context number
142 enum igb_advctx_num {
143 IGB_CTX_0 = 0, /**< CTX0 */
144 IGB_CTX_1 = 1, /**< CTX1 */
145 IGB_CTX_NUM = 2, /**< CTX_NUM */
148 /** Offload features */
149 union igb_vlan_macip {
152 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
154 /**< VLAN Tag Control Identifier (CPU order). */
159 * Compare mask for vlan_macip_len.data,
160 * should be in sync with igb_vlan_macip.f layout.
162 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
163 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
164 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
165 /** MAC+IP length. */
166 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
169 * Strucutre to check if new context need be built
171 struct igb_advctx_info {
172 uint64_t flags; /**< ol_flags related to context build. */
173 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
174 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
178 * Structure associated with each TX queue.
180 struct igb_tx_queue {
181 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
182 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
183 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
184 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
185 uint32_t txd_type; /**< Device-specific TXD type */
186 uint16_t nb_tx_desc; /**< number of TX descriptors. */
187 uint16_t tx_tail; /**< Current value of TDT register. */
189 /**< Index of first used TX descriptor. */
190 uint16_t queue_id; /**< TX queue index. */
191 uint16_t reg_idx; /**< TX queue register index. */
192 uint8_t port_id; /**< Device port identifier. */
193 uint8_t pthresh; /**< Prefetch threshold register. */
194 uint8_t hthresh; /**< Host threshold register. */
195 uint8_t wthresh; /**< Write-back threshold register. */
197 /**< Current used hardware descriptor. */
199 /**< Start context position for transmit queue. */
200 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
201 /**< Hardware context history.*/
205 #define RTE_PMD_USE_PREFETCH
208 #ifdef RTE_PMD_USE_PREFETCH
209 #define rte_igb_prefetch(p) rte_prefetch0(p)
211 #define rte_igb_prefetch(p) do {} while(0)
214 #ifdef RTE_PMD_PACKET_PREFETCH
215 #define rte_packet_prefetch(p) rte_prefetch1(p)
217 #define rte_packet_prefetch(p) do {} while(0)
221 * Macro for VMDq feature for 1 GbE NIC.
223 #define E1000_VMOLR_SIZE (8)
225 /*********************************************************************
229 **********************************************************************/
232 * Advanced context descriptor are almost same between igb/ixgbe
233 * This is a separate function, looking for optimization opportunity here
234 * Rework required to go with the pre-defined values.
238 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
239 volatile struct e1000_adv_tx_context_desc *ctx_txd,
240 uint64_t ol_flags, uint32_t vlan_macip_lens)
242 uint32_t type_tucmd_mlhl;
243 uint32_t mss_l4len_idx;
244 uint32_t ctx_idx, ctx_curr;
247 ctx_curr = txq->ctx_curr;
248 ctx_idx = ctx_curr + txq->ctx_start;
253 if (ol_flags & PKT_TX_VLAN_PKT) {
254 cmp_mask |= TX_VLAN_CMP_MASK;
257 if (ol_flags & PKT_TX_IP_CKSUM) {
258 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
259 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
262 /* Specify which HW CTX to upload. */
263 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264 switch (ol_flags & PKT_TX_L4_MASK) {
265 case PKT_TX_UDP_CKSUM:
266 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
267 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
268 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
269 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
271 case PKT_TX_TCP_CKSUM:
272 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
273 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
277 case PKT_TX_SCTP_CKSUM:
278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
279 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
284 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
289 txq->ctx_cache[ctx_curr].flags = ol_flags;
290 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
291 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
292 vlan_macip_lens & cmp_mask;
294 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
295 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
296 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
297 ctx_txd->seqnum_seed = 0;
301 * Check which hardware context can be used. Use the existing match
302 * or create a new context descriptor.
304 static inline uint32_t
305 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
306 uint32_t vlan_macip_lens)
308 /* If match with the current context */
309 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
310 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
311 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
312 return txq->ctx_curr;
315 /* If match with the second context */
317 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320 return txq->ctx_curr;
323 /* Mismatch, use the previous context */
324 return (IGB_CTX_NUM);
327 static inline uint32_t
328 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
330 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
331 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
334 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
335 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
339 static inline uint32_t
340 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
342 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
343 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
347 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
350 struct igb_tx_queue *txq;
351 struct igb_tx_entry *sw_ring;
352 struct igb_tx_entry *txe, *txn;
353 volatile union e1000_adv_tx_desc *txr;
354 volatile union e1000_adv_tx_desc *txd;
355 struct rte_mbuf *tx_pkt;
356 struct rte_mbuf *m_seg;
357 union igb_vlan_macip vlan_macip_lens;
365 uint64_t buf_dma_addr;
366 uint32_t olinfo_status;
367 uint32_t cmd_type_len;
376 uint32_t new_ctx = 0;
380 sw_ring = txq->sw_ring;
382 tx_id = txq->tx_tail;
383 txe = &sw_ring[tx_id];
385 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
387 pkt_len = tx_pkt->pkt_len;
389 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
392 * The number of descriptors that must be allocated for a
393 * packet is the number of segments of that packet, plus 1
394 * Context Descriptor for the VLAN Tag Identifier, if any.
395 * Determine the last TX descriptor to allocate in the TX ring
396 * for the packet, starting from the current position (tx_id)
399 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
401 ol_flags = tx_pkt->ol_flags;
402 l2_l3_len.l2_len = tx_pkt->l2_len;
403 l2_l3_len.l3_len = tx_pkt->l3_len;
404 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
405 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
406 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
408 /* If a Context Descriptor need be built . */
410 ctx = what_advctx_update(txq, tx_ol_req,
411 vlan_macip_lens.data);
412 /* Only allocate context descriptor if required*/
413 new_ctx = (ctx == IGB_CTX_NUM);
415 tx_last = (uint16_t) (tx_last + new_ctx);
417 if (tx_last >= txq->nb_tx_desc)
418 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
420 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
421 " tx_first=%u tx_last=%u",
422 (unsigned) txq->port_id,
423 (unsigned) txq->queue_id,
429 * Check if there are enough free descriptors in the TX ring
430 * to transmit the next packet.
431 * This operation is based on the two following rules:
433 * 1- Only check that the last needed TX descriptor can be
434 * allocated (by construction, if that descriptor is free,
435 * all intermediate ones are also free).
437 * For this purpose, the index of the last TX descriptor
438 * used for a packet (the "last descriptor" of a packet)
439 * is recorded in the TX entries (the last one included)
440 * that are associated with all TX descriptors allocated
443 * 2- Avoid to allocate the last free TX descriptor of the
444 * ring, in order to never set the TDT register with the
445 * same value stored in parallel by the NIC in the TDH
446 * register, which makes the TX engine of the NIC enter
447 * in a deadlock situation.
449 * By extension, avoid to allocate a free descriptor that
450 * belongs to the last set of free descriptors allocated
451 * to the same packet previously transmitted.
455 * The "last descriptor" of the previously sent packet, if any,
456 * which used the last descriptor to allocate.
458 tx_end = sw_ring[tx_last].last_id;
461 * The next descriptor following that "last descriptor" in the
464 tx_end = sw_ring[tx_end].next_id;
467 * The "last descriptor" associated with that next descriptor.
469 tx_end = sw_ring[tx_end].last_id;
472 * Check that this descriptor is free.
474 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
481 * Set common flags of all TX Data Descriptors.
483 * The following bits must be set in all Data Descriptors:
484 * - E1000_ADVTXD_DTYP_DATA
485 * - E1000_ADVTXD_DCMD_DEXT
487 * The following bits must be set in the first Data Descriptor
488 * and are ignored in the other ones:
489 * - E1000_ADVTXD_DCMD_IFCS
490 * - E1000_ADVTXD_MAC_1588
491 * - E1000_ADVTXD_DCMD_VLE
493 * The following bits must only be set in the last Data
495 * - E1000_TXD_CMD_EOP
497 * The following bits can be set in any Data Descriptor, but
498 * are only set in the last Data Descriptor:
501 cmd_type_len = txq->txd_type |
502 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
503 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
504 #if defined(RTE_LIBRTE_IEEE1588)
505 if (ol_flags & PKT_TX_IEEE1588_TMST)
506 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
509 /* Setup TX Advanced context descriptor if required */
511 volatile struct e1000_adv_tx_context_desc *
514 ctx_txd = (volatile struct
515 e1000_adv_tx_context_desc *)
518 txn = &sw_ring[txe->next_id];
519 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
521 if (txe->mbuf != NULL) {
522 rte_pktmbuf_free_seg(txe->mbuf);
526 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
527 vlan_macip_lens.data);
529 txe->last_id = tx_last;
530 tx_id = txe->next_id;
534 /* Setup the TX Advanced Data Descriptor */
535 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
536 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
537 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
542 txn = &sw_ring[txe->next_id];
545 if (txe->mbuf != NULL)
546 rte_pktmbuf_free_seg(txe->mbuf);
550 * Set up transmit descriptor.
552 slen = (uint16_t) m_seg->data_len;
553 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
554 txd->read.buffer_addr =
555 rte_cpu_to_le_64(buf_dma_addr);
556 txd->read.cmd_type_len =
557 rte_cpu_to_le_32(cmd_type_len | slen);
558 txd->read.olinfo_status =
559 rte_cpu_to_le_32(olinfo_status);
560 txe->last_id = tx_last;
561 tx_id = txe->next_id;
564 } while (m_seg != NULL);
567 * The last packet data descriptor needs End Of Packet (EOP)
568 * and Report Status (RS).
570 txd->read.cmd_type_len |=
571 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
577 * Set the Transmit Descriptor Tail (TDT).
579 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
580 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
581 (unsigned) txq->port_id, (unsigned) txq->queue_id,
582 (unsigned) tx_id, (unsigned) nb_tx);
583 txq->tx_tail = tx_id;
588 /*********************************************************************
592 **********************************************************************/
594 #define IGB_PACKET_TYPE_IPV4 0X01
595 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
596 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
597 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
598 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
599 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
600 #define IGB_PACKET_TYPE_IPV6 0X04
601 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
602 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
603 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
604 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
605 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
606 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
607 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
608 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
609 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
610 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
611 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
612 #define IGB_PACKET_TYPE_MAX 0X80
613 #define IGB_PACKET_TYPE_MASK 0X7F
614 #define IGB_PACKET_TYPE_SHIFT 0X04
615 static inline uint32_t
616 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
618 static const uint32_t
619 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
620 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
622 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
623 RTE_PTYPE_L3_IPV4_EXT,
624 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
626 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
627 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
628 RTE_PTYPE_INNER_L3_IPV6,
629 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
630 RTE_PTYPE_L3_IPV6_EXT,
631 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
632 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
633 RTE_PTYPE_INNER_L3_IPV6_EXT,
634 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
635 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
636 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
637 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
638 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
639 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
640 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
641 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
642 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
643 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
644 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
645 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
646 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
647 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
648 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
649 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
650 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
651 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
652 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
653 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
654 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
655 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
656 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
657 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
658 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
659 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
660 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
661 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
663 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
664 return RTE_PTYPE_UNKNOWN;
666 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
668 return ptype_table[pkt_info];
671 static inline uint64_t
672 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
674 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
676 #if defined(RTE_LIBRTE_IEEE1588)
677 static uint32_t ip_pkt_etqf_map[8] = {
678 0, 0, 0, PKT_RX_IEEE1588_PTP,
682 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
687 #else /* RTE_NEXT_ABI */
688 static inline uint64_t
689 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
693 static uint64_t ip_pkt_types_map[16] = {
694 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
695 PKT_RX_IPV6_HDR, 0, 0, 0,
696 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
697 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
700 #if defined(RTE_LIBRTE_IEEE1588)
701 static uint32_t ip_pkt_etqf_map[8] = {
702 0, 0, 0, PKT_RX_IEEE1588_PTP,
706 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
707 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
708 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
710 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
711 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
713 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
715 #endif /* RTE_NEXT_ABI */
717 static inline uint64_t
718 rx_desc_status_to_pkt_flags(uint32_t rx_status)
722 /* Check if VLAN present */
723 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
725 #if defined(RTE_LIBRTE_IEEE1588)
726 if (rx_status & E1000_RXD_STAT_TMST)
727 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
732 static inline uint64_t
733 rx_desc_error_to_pkt_flags(uint32_t rx_status)
736 * Bit 30: IPE, IPv4 checksum error
737 * Bit 29: L4I, L4I integrity error
740 static uint64_t error_to_pkt_flags_map[4] = {
741 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
742 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
744 return error_to_pkt_flags_map[(rx_status >>
745 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
749 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
752 struct igb_rx_queue *rxq;
753 volatile union e1000_adv_rx_desc *rx_ring;
754 volatile union e1000_adv_rx_desc *rxdp;
755 struct igb_rx_entry *sw_ring;
756 struct igb_rx_entry *rxe;
757 struct rte_mbuf *rxm;
758 struct rte_mbuf *nmb;
759 union e1000_adv_rx_desc rxd;
762 uint32_t hlen_type_rss;
772 rx_id = rxq->rx_tail;
773 rx_ring = rxq->rx_ring;
774 sw_ring = rxq->sw_ring;
775 while (nb_rx < nb_pkts) {
777 * The order of operations here is important as the DD status
778 * bit must not be read after any other descriptor fields.
779 * rx_ring and rxdp are pointing to volatile data so the order
780 * of accesses cannot be reordered by the compiler. If they were
781 * not volatile, they could be reordered which could lead to
782 * using invalid descriptor fields when read from rxd.
784 rxdp = &rx_ring[rx_id];
785 staterr = rxdp->wb.upper.status_error;
786 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
793 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
794 * likely to be invalid and to be dropped by the various
795 * validation checks performed by the network stack.
797 * Allocate a new mbuf to replenish the RX ring descriptor.
798 * If the allocation fails:
799 * - arrange for that RX descriptor to be the first one
800 * being parsed the next time the receive function is
801 * invoked [on the same queue].
803 * - Stop parsing the RX ring and return immediately.
805 * This policy do not drop the packet received in the RX
806 * descriptor for which the allocation of a new mbuf failed.
807 * Thus, it allows that packet to be later retrieved if
808 * mbuf have been freed in the mean time.
809 * As a side effect, holding RX descriptors instead of
810 * systematically giving them back to the NIC may lead to
811 * RX ring exhaustion situations.
812 * However, the NIC can gracefully prevent such situations
813 * to happen by sending specific "back-pressure" flow control
814 * frames to its peer(s).
816 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
817 "staterr=0x%x pkt_len=%u",
818 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819 (unsigned) rx_id, (unsigned) staterr,
820 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
822 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
824 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
825 "queue_id=%u", (unsigned) rxq->port_id,
826 (unsigned) rxq->queue_id);
827 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
832 rxe = &sw_ring[rx_id];
834 if (rx_id == rxq->nb_rx_desc)
837 /* Prefetch next mbuf while processing current one. */
838 rte_igb_prefetch(sw_ring[rx_id].mbuf);
841 * When next RX descriptor is on a cache-line boundary,
842 * prefetch the next 4 RX descriptors and the next 8 pointers
845 if ((rx_id & 0x3) == 0) {
846 rte_igb_prefetch(&rx_ring[rx_id]);
847 rte_igb_prefetch(&sw_ring[rx_id]);
853 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
854 rxdp->read.hdr_addr = dma_addr;
855 rxdp->read.pkt_addr = dma_addr;
858 * Initialize the returned mbuf.
859 * 1) setup generic mbuf fields:
860 * - number of segments,
863 * - RX port identifier.
864 * 2) integrate hardware offload data, if any:
866 * - IP checksum flag,
867 * - VLAN TCI, if any,
870 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
872 rxm->data_off = RTE_PKTMBUF_HEADROOM;
873 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
876 rxm->pkt_len = pkt_len;
877 rxm->data_len = pkt_len;
878 rxm->port = rxq->port_id;
880 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
881 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
882 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
883 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
885 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
886 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
887 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
888 rxm->ol_flags = pkt_flags;
890 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
891 lo_dword.hs_rss.pkt_info);
895 * Store the mbuf address into the next entry of the array
896 * of returned packets.
898 rx_pkts[nb_rx++] = rxm;
900 rxq->rx_tail = rx_id;
903 * If the number of free RX descriptors is greater than the RX free
904 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
906 * Update the RDT with the value of the last processed RX descriptor
907 * minus 1, to guarantee that the RDT register is never equal to the
908 * RDH register, which creates a "full" ring situtation from the
909 * hardware point of view...
911 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
912 if (nb_hold > rxq->rx_free_thresh) {
913 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
914 "nb_hold=%u nb_rx=%u",
915 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
916 (unsigned) rx_id, (unsigned) nb_hold,
918 rx_id = (uint16_t) ((rx_id == 0) ?
919 (rxq->nb_rx_desc - 1) : (rx_id - 1));
920 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
923 rxq->nb_rx_hold = nb_hold;
928 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
931 struct igb_rx_queue *rxq;
932 volatile union e1000_adv_rx_desc *rx_ring;
933 volatile union e1000_adv_rx_desc *rxdp;
934 struct igb_rx_entry *sw_ring;
935 struct igb_rx_entry *rxe;
936 struct rte_mbuf *first_seg;
937 struct rte_mbuf *last_seg;
938 struct rte_mbuf *rxm;
939 struct rte_mbuf *nmb;
940 union e1000_adv_rx_desc rxd;
941 uint64_t dma; /* Physical address of mbuf data buffer */
943 uint32_t hlen_type_rss;
953 rx_id = rxq->rx_tail;
954 rx_ring = rxq->rx_ring;
955 sw_ring = rxq->sw_ring;
958 * Retrieve RX context of current packet, if any.
960 first_seg = rxq->pkt_first_seg;
961 last_seg = rxq->pkt_last_seg;
963 while (nb_rx < nb_pkts) {
966 * The order of operations here is important as the DD status
967 * bit must not be read after any other descriptor fields.
968 * rx_ring and rxdp are pointing to volatile data so the order
969 * of accesses cannot be reordered by the compiler. If they were
970 * not volatile, they could be reordered which could lead to
971 * using invalid descriptor fields when read from rxd.
973 rxdp = &rx_ring[rx_id];
974 staterr = rxdp->wb.upper.status_error;
975 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
982 * Allocate a new mbuf to replenish the RX ring descriptor.
983 * If the allocation fails:
984 * - arrange for that RX descriptor to be the first one
985 * being parsed the next time the receive function is
986 * invoked [on the same queue].
988 * - Stop parsing the RX ring and return immediately.
990 * This policy does not drop the packet received in the RX
991 * descriptor for which the allocation of a new mbuf failed.
992 * Thus, it allows that packet to be later retrieved if
993 * mbuf have been freed in the mean time.
994 * As a side effect, holding RX descriptors instead of
995 * systematically giving them back to the NIC may lead to
996 * RX ring exhaustion situations.
997 * However, the NIC can gracefully prevent such situations
998 * to happen by sending specific "back-pressure" flow control
999 * frames to its peer(s).
1001 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1002 "staterr=0x%x data_len=%u",
1003 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1004 (unsigned) rx_id, (unsigned) staterr,
1005 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1007 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1009 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1010 "queue_id=%u", (unsigned) rxq->port_id,
1011 (unsigned) rxq->queue_id);
1012 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1017 rxe = &sw_ring[rx_id];
1019 if (rx_id == rxq->nb_rx_desc)
1022 /* Prefetch next mbuf while processing current one. */
1023 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1026 * When next RX descriptor is on a cache-line boundary,
1027 * prefetch the next 4 RX descriptors and the next 8 pointers
1030 if ((rx_id & 0x3) == 0) {
1031 rte_igb_prefetch(&rx_ring[rx_id]);
1032 rte_igb_prefetch(&sw_ring[rx_id]);
1036 * Update RX descriptor with the physical address of the new
1037 * data buffer of the new allocated mbuf.
1041 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1042 rxdp->read.pkt_addr = dma;
1043 rxdp->read.hdr_addr = dma;
1046 * Set data length & data buffer address of mbuf.
1048 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1049 rxm->data_len = data_len;
1050 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1053 * If this is the first buffer of the received packet,
1054 * set the pointer to the first mbuf of the packet and
1055 * initialize its context.
1056 * Otherwise, update the total length and the number of segments
1057 * of the current scattered packet, and update the pointer to
1058 * the last mbuf of the current packet.
1060 if (first_seg == NULL) {
1062 first_seg->pkt_len = data_len;
1063 first_seg->nb_segs = 1;
1065 first_seg->pkt_len += data_len;
1066 first_seg->nb_segs++;
1067 last_seg->next = rxm;
1071 * If this is not the last buffer of the received packet,
1072 * update the pointer to the last mbuf of the current scattered
1073 * packet and continue to parse the RX ring.
1075 if (! (staterr & E1000_RXD_STAT_EOP)) {
1081 * This is the last buffer of the received packet.
1082 * If the CRC is not stripped by the hardware:
1083 * - Subtract the CRC length from the total packet length.
1084 * - If the last buffer only contains the whole CRC or a part
1085 * of it, free the mbuf associated to the last buffer.
1086 * If part of the CRC is also contained in the previous
1087 * mbuf, subtract the length of that CRC part from the
1088 * data length of the previous mbuf.
1091 if (unlikely(rxq->crc_len > 0)) {
1092 first_seg->pkt_len -= ETHER_CRC_LEN;
1093 if (data_len <= ETHER_CRC_LEN) {
1094 rte_pktmbuf_free_seg(rxm);
1095 first_seg->nb_segs--;
1096 last_seg->data_len = (uint16_t)
1097 (last_seg->data_len -
1098 (ETHER_CRC_LEN - data_len));
1099 last_seg->next = NULL;
1102 (uint16_t) (data_len - ETHER_CRC_LEN);
1106 * Initialize the first mbuf of the returned packet:
1107 * - RX port identifier,
1108 * - hardware offload data, if any:
1109 * - RSS flag & hash,
1110 * - IP checksum flag,
1111 * - VLAN TCI, if any,
1114 first_seg->port = rxq->port_id;
1115 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1118 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1119 * set in the pkt_flags field.
1121 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1122 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1123 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1124 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1125 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1126 first_seg->ol_flags = pkt_flags;
1128 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1129 lower.lo_dword.hs_rss.pkt_info);
1132 /* Prefetch data of first segment, if configured to do so. */
1133 rte_packet_prefetch((char *)first_seg->buf_addr +
1134 first_seg->data_off);
1137 * Store the mbuf address into the next entry of the array
1138 * of returned packets.
1140 rx_pkts[nb_rx++] = first_seg;
1143 * Setup receipt context for a new packet.
1149 * Record index of the next RX descriptor to probe.
1151 rxq->rx_tail = rx_id;
1154 * Save receive context.
1156 rxq->pkt_first_seg = first_seg;
1157 rxq->pkt_last_seg = last_seg;
1160 * If the number of free RX descriptors is greater than the RX free
1161 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1163 * Update the RDT with the value of the last processed RX descriptor
1164 * minus 1, to guarantee that the RDT register is never equal to the
1165 * RDH register, which creates a "full" ring situtation from the
1166 * hardware point of view...
1168 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1169 if (nb_hold > rxq->rx_free_thresh) {
1170 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1171 "nb_hold=%u nb_rx=%u",
1172 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1173 (unsigned) rx_id, (unsigned) nb_hold,
1175 rx_id = (uint16_t) ((rx_id == 0) ?
1176 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1177 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1180 rxq->nb_rx_hold = nb_hold;
1185 * Rings setup and release.
1187 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1188 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1189 * This will also optimize cache line size effect.
1190 * H/W supports up to cache line size 128.
1192 #define IGB_ALIGN 128
1195 * Maximum number of Ring Descriptors.
1197 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1198 * desscriptors should meet the following condition:
1199 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1201 #define IGB_MIN_RING_DESC 32
1202 #define IGB_MAX_RING_DESC 4096
1204 static const struct rte_memzone *
1205 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1206 uint16_t queue_id, uint32_t ring_size, int socket_id)
1208 char z_name[RTE_MEMZONE_NAMESIZE];
1209 const struct rte_memzone *mz;
1211 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1212 dev->driver->pci_drv.name, ring_name,
1213 dev->data->port_id, queue_id);
1214 mz = rte_memzone_lookup(z_name);
1218 #ifdef RTE_LIBRTE_XEN_DOM0
1219 return rte_memzone_reserve_bounded(z_name, ring_size,
1220 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1222 return rte_memzone_reserve_aligned(z_name, ring_size,
1223 socket_id, 0, IGB_ALIGN);
1228 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1232 if (txq->sw_ring != NULL) {
1233 for (i = 0; i < txq->nb_tx_desc; i++) {
1234 if (txq->sw_ring[i].mbuf != NULL) {
1235 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1236 txq->sw_ring[i].mbuf = NULL;
1243 igb_tx_queue_release(struct igb_tx_queue *txq)
1246 igb_tx_queue_release_mbufs(txq);
1247 rte_free(txq->sw_ring);
1253 eth_igb_tx_queue_release(void *txq)
1255 igb_tx_queue_release(txq);
1259 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1264 memset((void*)&txq->ctx_cache, 0,
1265 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1269 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1271 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1272 struct igb_tx_entry *txe = txq->sw_ring;
1274 struct e1000_hw *hw;
1276 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1277 /* Zero out HW ring memory */
1278 for (i = 0; i < txq->nb_tx_desc; i++) {
1279 txq->tx_ring[i] = zeroed_desc;
1282 /* Initialize ring entries */
1283 prev = (uint16_t)(txq->nb_tx_desc - 1);
1284 for (i = 0; i < txq->nb_tx_desc; i++) {
1285 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1287 txd->wb.status = E1000_TXD_STAT_DD;
1290 txe[prev].next_id = i;
1294 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1295 /* 82575 specific, each tx queue will use 2 hw contexts */
1296 if (hw->mac.type == e1000_82575)
1297 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1299 igb_reset_tx_queue_stat(txq);
1303 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1306 unsigned int socket_id,
1307 const struct rte_eth_txconf *tx_conf)
1309 const struct rte_memzone *tz;
1310 struct igb_tx_queue *txq;
1311 struct e1000_hw *hw;
1314 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1317 * Validate number of transmit descriptors.
1318 * It must not exceed hardware maximum, and must be multiple
1321 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1322 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1327 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1330 if (tx_conf->tx_free_thresh != 0)
1331 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1332 "used for the 1G driver.");
1333 if (tx_conf->tx_rs_thresh != 0)
1334 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1335 "used for the 1G driver.");
1336 if (tx_conf->tx_thresh.wthresh == 0)
1337 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1338 "consider setting the TX WTHRESH value to 4, 8, "
1341 /* Free memory prior to re-allocation if needed */
1342 if (dev->data->tx_queues[queue_idx] != NULL) {
1343 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1344 dev->data->tx_queues[queue_idx] = NULL;
1347 /* First allocate the tx queue data structure */
1348 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1349 RTE_CACHE_LINE_SIZE);
1354 * Allocate TX ring hardware descriptors. A memzone large enough to
1355 * handle the maximum ring size is allocated in order to allow for
1356 * resizing in later calls to the queue setup function.
1358 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1359 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1362 igb_tx_queue_release(txq);
1366 txq->nb_tx_desc = nb_desc;
1367 txq->pthresh = tx_conf->tx_thresh.pthresh;
1368 txq->hthresh = tx_conf->tx_thresh.hthresh;
1369 txq->wthresh = tx_conf->tx_thresh.wthresh;
1370 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1372 txq->queue_id = queue_idx;
1373 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1374 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1375 txq->port_id = dev->data->port_id;
1377 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1378 #ifndef RTE_LIBRTE_XEN_DOM0
1379 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1381 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1383 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1384 /* Allocate software ring */
1385 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1386 sizeof(struct igb_tx_entry) * nb_desc,
1387 RTE_CACHE_LINE_SIZE);
1388 if (txq->sw_ring == NULL) {
1389 igb_tx_queue_release(txq);
1392 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1393 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1395 igb_reset_tx_queue(txq, dev);
1396 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1397 dev->data->tx_queues[queue_idx] = txq;
1403 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1407 if (rxq->sw_ring != NULL) {
1408 for (i = 0; i < rxq->nb_rx_desc; i++) {
1409 if (rxq->sw_ring[i].mbuf != NULL) {
1410 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1411 rxq->sw_ring[i].mbuf = NULL;
1418 igb_rx_queue_release(struct igb_rx_queue *rxq)
1421 igb_rx_queue_release_mbufs(rxq);
1422 rte_free(rxq->sw_ring);
1428 eth_igb_rx_queue_release(void *rxq)
1430 igb_rx_queue_release(rxq);
1434 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1436 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1439 /* Zero out HW ring memory */
1440 for (i = 0; i < rxq->nb_rx_desc; i++) {
1441 rxq->rx_ring[i] = zeroed_desc;
1445 rxq->pkt_first_seg = NULL;
1446 rxq->pkt_last_seg = NULL;
1450 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1453 unsigned int socket_id,
1454 const struct rte_eth_rxconf *rx_conf,
1455 struct rte_mempool *mp)
1457 const struct rte_memzone *rz;
1458 struct igb_rx_queue *rxq;
1459 struct e1000_hw *hw;
1462 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1465 * Validate number of receive descriptors.
1466 * It must not exceed hardware maximum, and must be multiple
1469 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1470 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1474 /* Free memory prior to re-allocation if needed */
1475 if (dev->data->rx_queues[queue_idx] != NULL) {
1476 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1477 dev->data->rx_queues[queue_idx] = NULL;
1480 /* First allocate the RX queue data structure. */
1481 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1482 RTE_CACHE_LINE_SIZE);
1486 rxq->nb_rx_desc = nb_desc;
1487 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1488 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1489 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1490 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1492 rxq->drop_en = rx_conf->rx_drop_en;
1493 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1494 rxq->queue_id = queue_idx;
1495 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1496 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1497 rxq->port_id = dev->data->port_id;
1498 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1502 * Allocate RX ring hardware descriptors. A memzone large enough to
1503 * handle the maximum ring size is allocated in order to allow for
1504 * resizing in later calls to the queue setup function.
1506 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1507 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1509 igb_rx_queue_release(rxq);
1512 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1513 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1514 #ifndef RTE_LIBRTE_XEN_DOM0
1515 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1517 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1519 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1521 /* Allocate software ring. */
1522 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1523 sizeof(struct igb_rx_entry) * nb_desc,
1524 RTE_CACHE_LINE_SIZE);
1525 if (rxq->sw_ring == NULL) {
1526 igb_rx_queue_release(rxq);
1529 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1530 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1532 dev->data->rx_queues[queue_idx] = rxq;
1533 igb_reset_rx_queue(rxq);
1539 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1541 #define IGB_RXQ_SCAN_INTERVAL 4
1542 volatile union e1000_adv_rx_desc *rxdp;
1543 struct igb_rx_queue *rxq;
1546 if (rx_queue_id >= dev->data->nb_rx_queues) {
1547 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1551 rxq = dev->data->rx_queues[rx_queue_id];
1552 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1554 while ((desc < rxq->nb_rx_desc) &&
1555 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1556 desc += IGB_RXQ_SCAN_INTERVAL;
1557 rxdp += IGB_RXQ_SCAN_INTERVAL;
1558 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1559 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1560 desc - rxq->nb_rx_desc]);
1567 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1569 volatile union e1000_adv_rx_desc *rxdp;
1570 struct igb_rx_queue *rxq = rx_queue;
1573 if (unlikely(offset >= rxq->nb_rx_desc))
1575 desc = rxq->rx_tail + offset;
1576 if (desc >= rxq->nb_rx_desc)
1577 desc -= rxq->nb_rx_desc;
1579 rxdp = &rxq->rx_ring[desc];
1580 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1584 igb_dev_clear_queues(struct rte_eth_dev *dev)
1587 struct igb_tx_queue *txq;
1588 struct igb_rx_queue *rxq;
1590 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1591 txq = dev->data->tx_queues[i];
1593 igb_tx_queue_release_mbufs(txq);
1594 igb_reset_tx_queue(txq, dev);
1598 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1599 rxq = dev->data->rx_queues[i];
1601 igb_rx_queue_release_mbufs(rxq);
1602 igb_reset_rx_queue(rxq);
1608 igb_dev_free_queues(struct rte_eth_dev *dev)
1612 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1613 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1614 dev->data->rx_queues[i] = NULL;
1616 dev->data->nb_rx_queues = 0;
1618 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1619 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1620 dev->data->tx_queues[i] = NULL;
1622 dev->data->nb_tx_queues = 0;
1626 * Receive Side Scaling (RSS).
1627 * See section 7.1.1.7 in the following document:
1628 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1631 * The source and destination IP addresses of the IP header and the source and
1632 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1633 * against a configurable random key to compute a 32-bit RSS hash result.
1634 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1635 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1636 * RSS output index which is used as the RX queue index where to store the
1638 * The following output is supplied in the RX write-back descriptor:
1639 * - 32-bit result of the Microsoft RSS hash function,
1640 * - 4-bit RSS type field.
1644 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1645 * Used as the default key.
1647 static uint8_t rss_intel_key[40] = {
1648 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1649 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1650 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1651 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1652 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1656 igb_rss_disable(struct rte_eth_dev *dev)
1658 struct e1000_hw *hw;
1661 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1662 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1663 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1664 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1668 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1676 hash_key = rss_conf->rss_key;
1677 if (hash_key != NULL) {
1678 /* Fill in RSS hash key */
1679 for (i = 0; i < 10; i++) {
1680 rss_key = hash_key[(i * 4)];
1681 rss_key |= hash_key[(i * 4) + 1] << 8;
1682 rss_key |= hash_key[(i * 4) + 2] << 16;
1683 rss_key |= hash_key[(i * 4) + 3] << 24;
1684 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1688 /* Set configured hashing protocols in MRQC register */
1689 rss_hf = rss_conf->rss_hf;
1690 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1691 if (rss_hf & ETH_RSS_IPV4)
1692 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1693 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1694 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1695 if (rss_hf & ETH_RSS_IPV6)
1696 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1697 if (rss_hf & ETH_RSS_IPV6_EX)
1698 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1699 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1700 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1701 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1702 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1703 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1704 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1705 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1706 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1707 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1708 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1709 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1713 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1714 struct rte_eth_rss_conf *rss_conf)
1716 struct e1000_hw *hw;
1720 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 * Before changing anything, first check that the update RSS operation
1724 * does not attempt to disable RSS, if RSS was enabled at
1725 * initialization time, or does not attempt to enable RSS, if RSS was
1726 * disabled at initialization time.
1728 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1729 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1730 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1731 if (rss_hf != 0) /* Enable RSS */
1733 return 0; /* Nothing to do */
1736 if (rss_hf == 0) /* Disable RSS */
1738 igb_hw_rss_hash_set(hw, rss_conf);
1742 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1743 struct rte_eth_rss_conf *rss_conf)
1745 struct e1000_hw *hw;
1752 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1753 hash_key = rss_conf->rss_key;
1754 if (hash_key != NULL) {
1755 /* Return RSS hash key */
1756 for (i = 0; i < 10; i++) {
1757 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1758 hash_key[(i * 4)] = rss_key & 0x000000FF;
1759 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1760 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1761 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1765 /* Get RSS functions configured in MRQC register */
1766 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1767 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1768 rss_conf->rss_hf = 0;
1772 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1773 rss_hf |= ETH_RSS_IPV4;
1774 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1775 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1776 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1777 rss_hf |= ETH_RSS_IPV6;
1778 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1779 rss_hf |= ETH_RSS_IPV6_EX;
1780 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1781 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1782 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1783 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1784 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1785 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1786 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1787 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1788 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1789 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1790 rss_conf->rss_hf = rss_hf;
1795 igb_rss_configure(struct rte_eth_dev *dev)
1797 struct rte_eth_rss_conf rss_conf;
1798 struct e1000_hw *hw;
1802 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 /* Fill in redirection table. */
1805 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1806 for (i = 0; i < 128; i++) {
1813 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1814 i % dev->data->nb_rx_queues : 0);
1815 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1817 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1821 * Configure the RSS key and the RSS protocols used to compute
1822 * the RSS hash of input packets.
1824 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1825 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1826 igb_rss_disable(dev);
1829 if (rss_conf.rss_key == NULL)
1830 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1831 igb_hw_rss_hash_set(hw, &rss_conf);
1835 * Check if the mac type support VMDq or not.
1836 * Return 1 if it supports, otherwise, return 0.
1839 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1841 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1843 switch (hw->mac.type) {
1864 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1870 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1872 struct rte_eth_vmdq_rx_conf *cfg;
1873 struct e1000_hw *hw;
1874 uint32_t mrqc, vt_ctl, vmolr, rctl;
1877 PMD_INIT_FUNC_TRACE();
1879 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1882 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1883 if (igb_is_vmdq_supported(dev) == 0)
1886 igb_rss_disable(dev);
1888 /* RCTL: eanble VLAN filter */
1889 rctl = E1000_READ_REG(hw, E1000_RCTL);
1890 rctl |= E1000_RCTL_VFE;
1891 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1893 /* MRQC: enable vmdq */
1894 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1895 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1896 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1898 /* VTCTL: pool selection according to VLAN tag */
1899 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1900 if (cfg->enable_default_pool)
1901 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1902 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1903 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1905 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1906 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1907 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1908 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1911 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1912 vmolr |= E1000_VMOLR_AUPE;
1913 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1914 vmolr |= E1000_VMOLR_ROMPE;
1915 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1916 vmolr |= E1000_VMOLR_ROPE;
1917 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1918 vmolr |= E1000_VMOLR_BAM;
1919 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1920 vmolr |= E1000_VMOLR_MPME;
1922 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1926 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1927 * Both 82576 and 82580 support it
1929 if (hw->mac.type != e1000_i350) {
1930 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1931 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1932 vmolr |= E1000_VMOLR_STRVLAN;
1933 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1937 /* VFTA - enable all vlan filters */
1938 for (i = 0; i < IGB_VFTA_SIZE; i++)
1939 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1941 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1942 if (hw->mac.type != e1000_82580)
1943 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1946 * RAH/RAL - allow pools to read specific mac addresses
1947 * In this case, all pools should be able to read from mac addr 0
1949 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1950 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1952 /* VLVF: set up filters for vlan tags as configured */
1953 for (i = 0; i < cfg->nb_pool_maps; i++) {
1954 /* set vlan id in VF register and set the valid bit */
1955 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1956 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1957 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1958 E1000_VLVF_POOLSEL_MASK)));
1961 E1000_WRITE_FLUSH(hw);
1967 /*********************************************************************
1969 * Enable receive unit.
1971 **********************************************************************/
1974 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1976 struct igb_rx_entry *rxe = rxq->sw_ring;
1980 /* Initialize software ring entries. */
1981 for (i = 0; i < rxq->nb_rx_desc; i++) {
1982 volatile union e1000_adv_rx_desc *rxd;
1983 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1986 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1987 "queue_id=%hu", rxq->queue_id);
1991 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1992 rxd = &rxq->rx_ring[i];
1993 rxd->read.hdr_addr = dma_addr;
1994 rxd->read.pkt_addr = dma_addr;
2001 #define E1000_MRQC_DEF_Q_SHIFT (3)
2003 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2005 struct e1000_hw *hw =
2006 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2009 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2011 * SRIOV active scheme
2012 * FIXME if support RSS together with VMDq & SRIOV
2014 mrqc = E1000_MRQC_ENABLE_VMDQ;
2015 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2016 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2017 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2018 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2020 * SRIOV inactive scheme
2022 switch (dev->data->dev_conf.rxmode.mq_mode) {
2024 igb_rss_configure(dev);
2026 case ETH_MQ_RX_VMDQ_ONLY:
2027 /*Configure general VMDQ only RX parameters*/
2028 igb_vmdq_rx_hw_configure(dev);
2030 case ETH_MQ_RX_NONE:
2031 /* if mq_mode is none, disable rss mode.*/
2033 igb_rss_disable(dev);
2042 eth_igb_rx_init(struct rte_eth_dev *dev)
2044 struct e1000_hw *hw;
2045 struct igb_rx_queue *rxq;
2050 uint16_t rctl_bsize;
2054 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2058 * Make sure receives are disabled while setting
2059 * up the descriptor ring.
2061 rctl = E1000_READ_REG(hw, E1000_RCTL);
2062 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2065 * Configure support of jumbo frames, if any.
2067 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2068 rctl |= E1000_RCTL_LPE;
2071 * Set maximum packet length by default, and might be updated
2072 * together with enabling/disabling dual VLAN.
2074 E1000_WRITE_REG(hw, E1000_RLPML,
2075 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2078 rctl &= ~E1000_RCTL_LPE;
2080 /* Configure and enable each RX queue. */
2082 dev->rx_pkt_burst = eth_igb_recv_pkts;
2083 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2087 rxq = dev->data->rx_queues[i];
2089 /* Allocate buffers for descriptor rings and set up queue */
2090 ret = igb_alloc_rx_queue_mbufs(rxq);
2095 * Reset crc_len in case it was changed after queue setup by a
2099 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2102 bus_addr = rxq->rx_ring_phys_addr;
2103 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2105 sizeof(union e1000_adv_rx_desc));
2106 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2107 (uint32_t)(bus_addr >> 32));
2108 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2110 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2113 * Configure RX buffer size.
2115 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2116 RTE_PKTMBUF_HEADROOM);
2117 if (buf_size >= 1024) {
2119 * Configure the BSIZEPACKET field of the SRRCTL
2120 * register of the queue.
2121 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2122 * If this field is equal to 0b, then RCTL.BSIZE
2123 * determines the RX packet buffer size.
2125 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2126 E1000_SRRCTL_BSIZEPKT_MASK);
2127 buf_size = (uint16_t) ((srrctl &
2128 E1000_SRRCTL_BSIZEPKT_MASK) <<
2129 E1000_SRRCTL_BSIZEPKT_SHIFT);
2131 /* It adds dual VLAN length for supporting dual VLAN */
2132 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2133 2 * VLAN_TAG_SIZE) > buf_size){
2134 if (!dev->data->scattered_rx)
2136 "forcing scatter mode");
2137 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2138 dev->data->scattered_rx = 1;
2142 * Use BSIZE field of the device RCTL register.
2144 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2145 rctl_bsize = buf_size;
2146 if (!dev->data->scattered_rx)
2147 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2148 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2149 dev->data->scattered_rx = 1;
2152 /* Set if packets are dropped when no descriptors available */
2154 srrctl |= E1000_SRRCTL_DROP_EN;
2156 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2158 /* Enable this RX queue. */
2159 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2160 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2161 rxdctl &= 0xFFF00000;
2162 rxdctl |= (rxq->pthresh & 0x1F);
2163 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2164 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2165 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2168 if (dev->data->dev_conf.rxmode.enable_scatter) {
2169 if (!dev->data->scattered_rx)
2170 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2171 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2172 dev->data->scattered_rx = 1;
2176 * Setup BSIZE field of RCTL register, if needed.
2177 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2178 * register, since the code above configures the SRRCTL register of
2179 * the RX queue in such a case.
2180 * All configurable sizes are:
2181 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2182 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2183 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2184 * 2048: rctl |= E1000_RCTL_SZ_2048;
2185 * 1024: rctl |= E1000_RCTL_SZ_1024;
2186 * 512: rctl |= E1000_RCTL_SZ_512;
2187 * 256: rctl |= E1000_RCTL_SZ_256;
2189 if (rctl_bsize > 0) {
2190 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2191 rctl |= E1000_RCTL_SZ_512;
2192 else /* 256 <= buf_size < 512 - use 256 */
2193 rctl |= E1000_RCTL_SZ_256;
2197 * Configure RSS if device configured with multiple RX queues.
2199 igb_dev_mq_rx_configure(dev);
2201 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2202 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2205 * Setup the Checksum Register.
2206 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2208 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2209 rxcsum |= E1000_RXCSUM_PCSD;
2211 /* Enable both L3/L4 rx checksum offload */
2212 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2213 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2215 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2216 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2218 /* Setup the Receive Control Register. */
2219 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2220 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2222 /* set STRCRC bit in all queues */
2223 if (hw->mac.type == e1000_i350 ||
2224 hw->mac.type == e1000_i210 ||
2225 hw->mac.type == e1000_i211 ||
2226 hw->mac.type == e1000_i354) {
2227 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2228 rxq = dev->data->rx_queues[i];
2229 uint32_t dvmolr = E1000_READ_REG(hw,
2230 E1000_DVMOLR(rxq->reg_idx));
2231 dvmolr |= E1000_DVMOLR_STRCRC;
2232 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2236 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2238 /* clear STRCRC bit in all queues */
2239 if (hw->mac.type == e1000_i350 ||
2240 hw->mac.type == e1000_i210 ||
2241 hw->mac.type == e1000_i211 ||
2242 hw->mac.type == e1000_i354) {
2243 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2244 rxq = dev->data->rx_queues[i];
2245 uint32_t dvmolr = E1000_READ_REG(hw,
2246 E1000_DVMOLR(rxq->reg_idx));
2247 dvmolr &= ~E1000_DVMOLR_STRCRC;
2248 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2253 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2254 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2255 E1000_RCTL_RDMTS_HALF |
2256 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2258 /* Make sure VLAN Filters are off. */
2259 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2260 rctl &= ~E1000_RCTL_VFE;
2261 /* Don't store bad packets. */
2262 rctl &= ~E1000_RCTL_SBP;
2264 /* Enable Receives. */
2265 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2268 * Setup the HW Rx Head and Tail Descriptor Pointers.
2269 * This needs to be done after enable.
2271 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2272 rxq = dev->data->rx_queues[i];
2273 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2274 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2280 /*********************************************************************
2282 * Enable transmit unit.
2284 **********************************************************************/
2286 eth_igb_tx_init(struct rte_eth_dev *dev)
2288 struct e1000_hw *hw;
2289 struct igb_tx_queue *txq;
2294 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2296 /* Setup the Base and Length of the Tx Descriptor Rings. */
2297 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2299 txq = dev->data->tx_queues[i];
2300 bus_addr = txq->tx_ring_phys_addr;
2302 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2304 sizeof(union e1000_adv_tx_desc));
2305 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2306 (uint32_t)(bus_addr >> 32));
2307 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2309 /* Setup the HW Tx Head and Tail descriptor pointers. */
2310 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2311 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2313 /* Setup Transmit threshold registers. */
2314 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2315 txdctl |= txq->pthresh & 0x1F;
2316 txdctl |= ((txq->hthresh & 0x1F) << 8);
2317 txdctl |= ((txq->wthresh & 0x1F) << 16);
2318 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2319 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2322 /* Program the Transmit Control Register. */
2323 tctl = E1000_READ_REG(hw, E1000_TCTL);
2324 tctl &= ~E1000_TCTL_CT;
2325 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2326 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2328 e1000_config_collision_dist(hw);
2330 /* This write will effectively turn on the transmit unit. */
2331 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2334 /*********************************************************************
2336 * Enable VF receive unit.
2338 **********************************************************************/
2340 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2342 struct e1000_hw *hw;
2343 struct igb_rx_queue *rxq;
2346 uint16_t rctl_bsize;
2350 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2353 e1000_rlpml_set_vf(hw,
2354 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2357 /* Configure and enable each RX queue. */
2359 dev->rx_pkt_burst = eth_igb_recv_pkts;
2360 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2364 rxq = dev->data->rx_queues[i];
2366 /* Allocate buffers for descriptor rings and set up queue */
2367 ret = igb_alloc_rx_queue_mbufs(rxq);
2371 bus_addr = rxq->rx_ring_phys_addr;
2372 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2374 sizeof(union e1000_adv_rx_desc));
2375 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2376 (uint32_t)(bus_addr >> 32));
2377 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2379 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2382 * Configure RX buffer size.
2384 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2385 RTE_PKTMBUF_HEADROOM);
2386 if (buf_size >= 1024) {
2388 * Configure the BSIZEPACKET field of the SRRCTL
2389 * register of the queue.
2390 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2391 * If this field is equal to 0b, then RCTL.BSIZE
2392 * determines the RX packet buffer size.
2394 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2395 E1000_SRRCTL_BSIZEPKT_MASK);
2396 buf_size = (uint16_t) ((srrctl &
2397 E1000_SRRCTL_BSIZEPKT_MASK) <<
2398 E1000_SRRCTL_BSIZEPKT_SHIFT);
2400 /* It adds dual VLAN length for supporting dual VLAN */
2401 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2402 2 * VLAN_TAG_SIZE) > buf_size){
2403 if (!dev->data->scattered_rx)
2405 "forcing scatter mode");
2406 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2407 dev->data->scattered_rx = 1;
2411 * Use BSIZE field of the device RCTL register.
2413 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2414 rctl_bsize = buf_size;
2415 if (!dev->data->scattered_rx)
2416 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2417 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2418 dev->data->scattered_rx = 1;
2421 /* Set if packets are dropped when no descriptors available */
2423 srrctl |= E1000_SRRCTL_DROP_EN;
2425 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2427 /* Enable this RX queue. */
2428 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2429 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2430 rxdctl &= 0xFFF00000;
2431 rxdctl |= (rxq->pthresh & 0x1F);
2432 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2433 if (hw->mac.type == e1000_vfadapt) {
2435 * Workaround of 82576 VF Erratum
2436 * force set WTHRESH to 1
2437 * to avoid Write-Back not triggered sometimes
2440 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2443 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2444 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2447 if (dev->data->dev_conf.rxmode.enable_scatter) {
2448 if (!dev->data->scattered_rx)
2449 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2450 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2451 dev->data->scattered_rx = 1;
2455 * Setup the HW Rx Head and Tail Descriptor Pointers.
2456 * This needs to be done after enable.
2458 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2459 rxq = dev->data->rx_queues[i];
2460 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2461 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2467 /*********************************************************************
2469 * Enable VF transmit unit.
2471 **********************************************************************/
2473 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2475 struct e1000_hw *hw;
2476 struct igb_tx_queue *txq;
2480 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2482 /* Setup the Base and Length of the Tx Descriptor Rings. */
2483 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2486 txq = dev->data->tx_queues[i];
2487 bus_addr = txq->tx_ring_phys_addr;
2488 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2490 sizeof(union e1000_adv_tx_desc));
2491 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2492 (uint32_t)(bus_addr >> 32));
2493 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2495 /* Setup the HW Tx Head and Tail descriptor pointers. */
2496 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2497 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2499 /* Setup Transmit threshold registers. */
2500 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2501 txdctl |= txq->pthresh & 0x1F;
2502 txdctl |= ((txq->hthresh & 0x1F) << 8);
2503 if (hw->mac.type == e1000_82576) {
2505 * Workaround of 82576 VF Erratum
2506 * force set WTHRESH to 1
2507 * to avoid Write-Back not triggered sometimes
2510 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2513 txdctl |= ((txq->wthresh & 0x1F) << 16);
2514 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2515 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);