4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
81 static inline struct rte_mbuf *
82 rte_rxmbuf_alloc(struct rte_mempool *mp)
86 m = __rte_mbuf_raw_alloc(mp);
87 __rte_mbuf_sanity_check_raw(m, 0);
91 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
92 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
94 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
95 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
98 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct igb_rx_entry {
101 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
105 * Structure associated with each descriptor of the TX ring of a TX queue.
107 struct igb_tx_entry {
108 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
109 uint16_t next_id; /**< Index of next descriptor in ring. */
110 uint16_t last_id; /**< Index of last scattered descriptor. */
114 * Structure associated with each RX queue.
116 struct igb_rx_queue {
117 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
118 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
119 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
120 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
121 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
122 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
123 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
124 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
125 uint16_t nb_rx_desc; /**< number of RX descriptors. */
126 uint16_t rx_tail; /**< current value of RDT register. */
127 uint16_t nb_rx_hold; /**< number of held free RX desc. */
128 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
129 uint16_t queue_id; /**< RX queue index. */
130 uint16_t reg_idx; /**< RX queue register index. */
131 uint8_t port_id; /**< Device port identifier. */
132 uint8_t pthresh; /**< Prefetch threshold register. */
133 uint8_t hthresh; /**< Host threshold register. */
134 uint8_t wthresh; /**< Write-back threshold register. */
135 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
136 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
140 * Hardware context number
142 enum igb_advctx_num {
143 IGB_CTX_0 = 0, /**< CTX0 */
144 IGB_CTX_1 = 1, /**< CTX1 */
145 IGB_CTX_NUM = 2, /**< CTX_NUM */
148 /** Offload features */
149 union igb_vlan_macip {
152 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
154 /**< VLAN Tag Control Identifier (CPU order). */
159 * Compare mask for vlan_macip_len.data,
160 * should be in sync with igb_vlan_macip.f layout.
162 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
163 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
164 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
165 /** MAC+IP length. */
166 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
169 * Strucutre to check if new context need be built
171 struct igb_advctx_info {
172 uint64_t flags; /**< ol_flags related to context build. */
173 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
174 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
178 * Structure associated with each TX queue.
180 struct igb_tx_queue {
181 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
182 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
183 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
184 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
185 uint32_t txd_type; /**< Device-specific TXD type */
186 uint16_t nb_tx_desc; /**< number of TX descriptors. */
187 uint16_t tx_tail; /**< Current value of TDT register. */
189 /**< Index of first used TX descriptor. */
190 uint16_t queue_id; /**< TX queue index. */
191 uint16_t reg_idx; /**< TX queue register index. */
192 uint8_t port_id; /**< Device port identifier. */
193 uint8_t pthresh; /**< Prefetch threshold register. */
194 uint8_t hthresh; /**< Host threshold register. */
195 uint8_t wthresh; /**< Write-back threshold register. */
197 /**< Current used hardware descriptor. */
199 /**< Start context position for transmit queue. */
200 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
201 /**< Hardware context history.*/
205 #define RTE_PMD_USE_PREFETCH
208 #ifdef RTE_PMD_USE_PREFETCH
209 #define rte_igb_prefetch(p) rte_prefetch0(p)
211 #define rte_igb_prefetch(p) do {} while(0)
214 #ifdef RTE_PMD_PACKET_PREFETCH
215 #define rte_packet_prefetch(p) rte_prefetch1(p)
217 #define rte_packet_prefetch(p) do {} while(0)
221 * Macro for VMDq feature for 1 GbE NIC.
223 #define E1000_VMOLR_SIZE (8)
225 /*********************************************************************
229 **********************************************************************/
232 * Advanced context descriptor are almost same between igb/ixgbe
233 * This is a separate function, looking for optimization opportunity here
234 * Rework required to go with the pre-defined values.
238 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
239 volatile struct e1000_adv_tx_context_desc *ctx_txd,
240 uint64_t ol_flags, uint32_t vlan_macip_lens)
242 uint32_t type_tucmd_mlhl;
243 uint32_t mss_l4len_idx;
244 uint32_t ctx_idx, ctx_curr;
247 ctx_curr = txq->ctx_curr;
248 ctx_idx = ctx_curr + txq->ctx_start;
253 if (ol_flags & PKT_TX_VLAN_PKT) {
254 cmp_mask |= TX_VLAN_CMP_MASK;
257 if (ol_flags & PKT_TX_IP_CKSUM) {
258 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
259 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
262 /* Specify which HW CTX to upload. */
263 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264 switch (ol_flags & PKT_TX_L4_MASK) {
265 case PKT_TX_UDP_CKSUM:
266 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
267 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
268 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
269 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
271 case PKT_TX_TCP_CKSUM:
272 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
273 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
277 case PKT_TX_SCTP_CKSUM:
278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
279 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
284 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
289 txq->ctx_cache[ctx_curr].flags = ol_flags;
290 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
291 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
292 vlan_macip_lens & cmp_mask;
294 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
295 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
296 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
297 ctx_txd->seqnum_seed = 0;
301 * Check which hardware context can be used. Use the existing match
302 * or create a new context descriptor.
304 static inline uint32_t
305 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
306 uint32_t vlan_macip_lens)
308 /* If match with the current context */
309 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
310 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
311 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
312 return txq->ctx_curr;
315 /* If match with the second context */
317 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320 return txq->ctx_curr;
323 /* Mismatch, use the previous context */
324 return (IGB_CTX_NUM);
327 static inline uint32_t
328 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
330 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
331 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
334 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
335 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
339 static inline uint32_t
340 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
342 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
343 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
347 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
350 struct igb_tx_queue *txq;
351 struct igb_tx_entry *sw_ring;
352 struct igb_tx_entry *txe, *txn;
353 volatile union e1000_adv_tx_desc *txr;
354 volatile union e1000_adv_tx_desc *txd;
355 struct rte_mbuf *tx_pkt;
356 struct rte_mbuf *m_seg;
357 union igb_vlan_macip vlan_macip_lens;
365 uint64_t buf_dma_addr;
366 uint32_t olinfo_status;
367 uint32_t cmd_type_len;
376 uint32_t new_ctx = 0;
380 sw_ring = txq->sw_ring;
382 tx_id = txq->tx_tail;
383 txe = &sw_ring[tx_id];
385 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
387 pkt_len = tx_pkt->pkt_len;
389 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
392 * The number of descriptors that must be allocated for a
393 * packet is the number of segments of that packet, plus 1
394 * Context Descriptor for the VLAN Tag Identifier, if any.
395 * Determine the last TX descriptor to allocate in the TX ring
396 * for the packet, starting from the current position (tx_id)
399 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
401 ol_flags = tx_pkt->ol_flags;
402 l2_l3_len.l2_len = tx_pkt->l2_len;
403 l2_l3_len.l3_len = tx_pkt->l3_len;
404 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
405 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
406 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
408 /* If a Context Descriptor need be built . */
410 ctx = what_advctx_update(txq, tx_ol_req,
411 vlan_macip_lens.data);
412 /* Only allocate context descriptor if required*/
413 new_ctx = (ctx == IGB_CTX_NUM);
415 tx_last = (uint16_t) (tx_last + new_ctx);
417 if (tx_last >= txq->nb_tx_desc)
418 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
420 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
421 " tx_first=%u tx_last=%u",
422 (unsigned) txq->port_id,
423 (unsigned) txq->queue_id,
429 * Check if there are enough free descriptors in the TX ring
430 * to transmit the next packet.
431 * This operation is based on the two following rules:
433 * 1- Only check that the last needed TX descriptor can be
434 * allocated (by construction, if that descriptor is free,
435 * all intermediate ones are also free).
437 * For this purpose, the index of the last TX descriptor
438 * used for a packet (the "last descriptor" of a packet)
439 * is recorded in the TX entries (the last one included)
440 * that are associated with all TX descriptors allocated
443 * 2- Avoid to allocate the last free TX descriptor of the
444 * ring, in order to never set the TDT register with the
445 * same value stored in parallel by the NIC in the TDH
446 * register, which makes the TX engine of the NIC enter
447 * in a deadlock situation.
449 * By extension, avoid to allocate a free descriptor that
450 * belongs to the last set of free descriptors allocated
451 * to the same packet previously transmitted.
455 * The "last descriptor" of the previously sent packet, if any,
456 * which used the last descriptor to allocate.
458 tx_end = sw_ring[tx_last].last_id;
461 * The next descriptor following that "last descriptor" in the
464 tx_end = sw_ring[tx_end].next_id;
467 * The "last descriptor" associated with that next descriptor.
469 tx_end = sw_ring[tx_end].last_id;
472 * Check that this descriptor is free.
474 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
481 * Set common flags of all TX Data Descriptors.
483 * The following bits must be set in all Data Descriptors:
484 * - E1000_ADVTXD_DTYP_DATA
485 * - E1000_ADVTXD_DCMD_DEXT
487 * The following bits must be set in the first Data Descriptor
488 * and are ignored in the other ones:
489 * - E1000_ADVTXD_DCMD_IFCS
490 * - E1000_ADVTXD_MAC_1588
491 * - E1000_ADVTXD_DCMD_VLE
493 * The following bits must only be set in the last Data
495 * - E1000_TXD_CMD_EOP
497 * The following bits can be set in any Data Descriptor, but
498 * are only set in the last Data Descriptor:
501 cmd_type_len = txq->txd_type |
502 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
503 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
504 #if defined(RTE_LIBRTE_IEEE1588)
505 if (ol_flags & PKT_TX_IEEE1588_TMST)
506 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
509 /* Setup TX Advanced context descriptor if required */
511 volatile struct e1000_adv_tx_context_desc *
514 ctx_txd = (volatile struct
515 e1000_adv_tx_context_desc *)
518 txn = &sw_ring[txe->next_id];
519 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
521 if (txe->mbuf != NULL) {
522 rte_pktmbuf_free_seg(txe->mbuf);
526 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
527 vlan_macip_lens.data);
529 txe->last_id = tx_last;
530 tx_id = txe->next_id;
534 /* Setup the TX Advanced Data Descriptor */
535 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
536 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
537 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
542 txn = &sw_ring[txe->next_id];
545 if (txe->mbuf != NULL)
546 rte_pktmbuf_free_seg(txe->mbuf);
550 * Set up transmit descriptor.
552 slen = (uint16_t) m_seg->data_len;
553 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
554 txd->read.buffer_addr =
555 rte_cpu_to_le_64(buf_dma_addr);
556 txd->read.cmd_type_len =
557 rte_cpu_to_le_32(cmd_type_len | slen);
558 txd->read.olinfo_status =
559 rte_cpu_to_le_32(olinfo_status);
560 txe->last_id = tx_last;
561 tx_id = txe->next_id;
564 } while (m_seg != NULL);
567 * The last packet data descriptor needs End Of Packet (EOP)
568 * and Report Status (RS).
570 txd->read.cmd_type_len |=
571 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
577 * Set the Transmit Descriptor Tail (TDT).
579 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
580 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
581 (unsigned) txq->port_id, (unsigned) txq->queue_id,
582 (unsigned) tx_id, (unsigned) nb_tx);
583 txq->tx_tail = tx_id;
588 /*********************************************************************
592 **********************************************************************/
593 static inline uint64_t
594 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
598 static uint64_t ip_pkt_types_map[16] = {
599 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
600 PKT_RX_IPV6_HDR, 0, 0, 0,
601 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
602 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
605 #if defined(RTE_LIBRTE_IEEE1588)
606 static uint32_t ip_pkt_etqf_map[8] = {
607 0, 0, 0, PKT_RX_IEEE1588_PTP,
611 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
612 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
613 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
615 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
616 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
618 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
621 static inline uint64_t
622 rx_desc_status_to_pkt_flags(uint32_t rx_status)
626 /* Check if VLAN present */
627 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
629 #if defined(RTE_LIBRTE_IEEE1588)
630 if (rx_status & E1000_RXD_STAT_TMST)
631 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
636 static inline uint64_t
637 rx_desc_error_to_pkt_flags(uint32_t rx_status)
640 * Bit 30: IPE, IPv4 checksum error
641 * Bit 29: L4I, L4I integrity error
644 static uint64_t error_to_pkt_flags_map[4] = {
645 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
646 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
648 return error_to_pkt_flags_map[(rx_status >>
649 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
653 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
656 struct igb_rx_queue *rxq;
657 volatile union e1000_adv_rx_desc *rx_ring;
658 volatile union e1000_adv_rx_desc *rxdp;
659 struct igb_rx_entry *sw_ring;
660 struct igb_rx_entry *rxe;
661 struct rte_mbuf *rxm;
662 struct rte_mbuf *nmb;
663 union e1000_adv_rx_desc rxd;
666 uint32_t hlen_type_rss;
676 rx_id = rxq->rx_tail;
677 rx_ring = rxq->rx_ring;
678 sw_ring = rxq->sw_ring;
679 while (nb_rx < nb_pkts) {
681 * The order of operations here is important as the DD status
682 * bit must not be read after any other descriptor fields.
683 * rx_ring and rxdp are pointing to volatile data so the order
684 * of accesses cannot be reordered by the compiler. If they were
685 * not volatile, they could be reordered which could lead to
686 * using invalid descriptor fields when read from rxd.
688 rxdp = &rx_ring[rx_id];
689 staterr = rxdp->wb.upper.status_error;
690 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
697 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
698 * likely to be invalid and to be dropped by the various
699 * validation checks performed by the network stack.
701 * Allocate a new mbuf to replenish the RX ring descriptor.
702 * If the allocation fails:
703 * - arrange for that RX descriptor to be the first one
704 * being parsed the next time the receive function is
705 * invoked [on the same queue].
707 * - Stop parsing the RX ring and return immediately.
709 * This policy do not drop the packet received in the RX
710 * descriptor for which the allocation of a new mbuf failed.
711 * Thus, it allows that packet to be later retrieved if
712 * mbuf have been freed in the mean time.
713 * As a side effect, holding RX descriptors instead of
714 * systematically giving them back to the NIC may lead to
715 * RX ring exhaustion situations.
716 * However, the NIC can gracefully prevent such situations
717 * to happen by sending specific "back-pressure" flow control
718 * frames to its peer(s).
720 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
721 "staterr=0x%x pkt_len=%u",
722 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
723 (unsigned) rx_id, (unsigned) staterr,
724 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
726 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
728 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
729 "queue_id=%u", (unsigned) rxq->port_id,
730 (unsigned) rxq->queue_id);
731 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
736 rxe = &sw_ring[rx_id];
738 if (rx_id == rxq->nb_rx_desc)
741 /* Prefetch next mbuf while processing current one. */
742 rte_igb_prefetch(sw_ring[rx_id].mbuf);
745 * When next RX descriptor is on a cache-line boundary,
746 * prefetch the next 4 RX descriptors and the next 8 pointers
749 if ((rx_id & 0x3) == 0) {
750 rte_igb_prefetch(&rx_ring[rx_id]);
751 rte_igb_prefetch(&sw_ring[rx_id]);
757 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
758 rxdp->read.hdr_addr = dma_addr;
759 rxdp->read.pkt_addr = dma_addr;
762 * Initialize the returned mbuf.
763 * 1) setup generic mbuf fields:
764 * - number of segments,
767 * - RX port identifier.
768 * 2) integrate hardware offload data, if any:
770 * - IP checksum flag,
771 * - VLAN TCI, if any,
774 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
776 rxm->data_off = RTE_PKTMBUF_HEADROOM;
777 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
780 rxm->pkt_len = pkt_len;
781 rxm->data_len = pkt_len;
782 rxm->port = rxq->port_id;
784 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
785 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
786 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
787 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
789 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
790 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
791 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
792 rxm->ol_flags = pkt_flags;
795 * Store the mbuf address into the next entry of the array
796 * of returned packets.
798 rx_pkts[nb_rx++] = rxm;
800 rxq->rx_tail = rx_id;
803 * If the number of free RX descriptors is greater than the RX free
804 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
806 * Update the RDT with the value of the last processed RX descriptor
807 * minus 1, to guarantee that the RDT register is never equal to the
808 * RDH register, which creates a "full" ring situtation from the
809 * hardware point of view...
811 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
812 if (nb_hold > rxq->rx_free_thresh) {
813 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
814 "nb_hold=%u nb_rx=%u",
815 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
816 (unsigned) rx_id, (unsigned) nb_hold,
818 rx_id = (uint16_t) ((rx_id == 0) ?
819 (rxq->nb_rx_desc - 1) : (rx_id - 1));
820 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
823 rxq->nb_rx_hold = nb_hold;
828 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
831 struct igb_rx_queue *rxq;
832 volatile union e1000_adv_rx_desc *rx_ring;
833 volatile union e1000_adv_rx_desc *rxdp;
834 struct igb_rx_entry *sw_ring;
835 struct igb_rx_entry *rxe;
836 struct rte_mbuf *first_seg;
837 struct rte_mbuf *last_seg;
838 struct rte_mbuf *rxm;
839 struct rte_mbuf *nmb;
840 union e1000_adv_rx_desc rxd;
841 uint64_t dma; /* Physical address of mbuf data buffer */
843 uint32_t hlen_type_rss;
853 rx_id = rxq->rx_tail;
854 rx_ring = rxq->rx_ring;
855 sw_ring = rxq->sw_ring;
858 * Retrieve RX context of current packet, if any.
860 first_seg = rxq->pkt_first_seg;
861 last_seg = rxq->pkt_last_seg;
863 while (nb_rx < nb_pkts) {
866 * The order of operations here is important as the DD status
867 * bit must not be read after any other descriptor fields.
868 * rx_ring and rxdp are pointing to volatile data so the order
869 * of accesses cannot be reordered by the compiler. If they were
870 * not volatile, they could be reordered which could lead to
871 * using invalid descriptor fields when read from rxd.
873 rxdp = &rx_ring[rx_id];
874 staterr = rxdp->wb.upper.status_error;
875 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
882 * Allocate a new mbuf to replenish the RX ring descriptor.
883 * If the allocation fails:
884 * - arrange for that RX descriptor to be the first one
885 * being parsed the next time the receive function is
886 * invoked [on the same queue].
888 * - Stop parsing the RX ring and return immediately.
890 * This policy does not drop the packet received in the RX
891 * descriptor for which the allocation of a new mbuf failed.
892 * Thus, it allows that packet to be later retrieved if
893 * mbuf have been freed in the mean time.
894 * As a side effect, holding RX descriptors instead of
895 * systematically giving them back to the NIC may lead to
896 * RX ring exhaustion situations.
897 * However, the NIC can gracefully prevent such situations
898 * to happen by sending specific "back-pressure" flow control
899 * frames to its peer(s).
901 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
902 "staterr=0x%x data_len=%u",
903 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
904 (unsigned) rx_id, (unsigned) staterr,
905 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
907 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
909 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
910 "queue_id=%u", (unsigned) rxq->port_id,
911 (unsigned) rxq->queue_id);
912 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
917 rxe = &sw_ring[rx_id];
919 if (rx_id == rxq->nb_rx_desc)
922 /* Prefetch next mbuf while processing current one. */
923 rte_igb_prefetch(sw_ring[rx_id].mbuf);
926 * When next RX descriptor is on a cache-line boundary,
927 * prefetch the next 4 RX descriptors and the next 8 pointers
930 if ((rx_id & 0x3) == 0) {
931 rte_igb_prefetch(&rx_ring[rx_id]);
932 rte_igb_prefetch(&sw_ring[rx_id]);
936 * Update RX descriptor with the physical address of the new
937 * data buffer of the new allocated mbuf.
941 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
942 rxdp->read.pkt_addr = dma;
943 rxdp->read.hdr_addr = dma;
946 * Set data length & data buffer address of mbuf.
948 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
949 rxm->data_len = data_len;
950 rxm->data_off = RTE_PKTMBUF_HEADROOM;
953 * If this is the first buffer of the received packet,
954 * set the pointer to the first mbuf of the packet and
955 * initialize its context.
956 * Otherwise, update the total length and the number of segments
957 * of the current scattered packet, and update the pointer to
958 * the last mbuf of the current packet.
960 if (first_seg == NULL) {
962 first_seg->pkt_len = data_len;
963 first_seg->nb_segs = 1;
965 first_seg->pkt_len += data_len;
966 first_seg->nb_segs++;
967 last_seg->next = rxm;
971 * If this is not the last buffer of the received packet,
972 * update the pointer to the last mbuf of the current scattered
973 * packet and continue to parse the RX ring.
975 if (! (staterr & E1000_RXD_STAT_EOP)) {
981 * This is the last buffer of the received packet.
982 * If the CRC is not stripped by the hardware:
983 * - Subtract the CRC length from the total packet length.
984 * - If the last buffer only contains the whole CRC or a part
985 * of it, free the mbuf associated to the last buffer.
986 * If part of the CRC is also contained in the previous
987 * mbuf, subtract the length of that CRC part from the
988 * data length of the previous mbuf.
991 if (unlikely(rxq->crc_len > 0)) {
992 first_seg->pkt_len -= ETHER_CRC_LEN;
993 if (data_len <= ETHER_CRC_LEN) {
994 rte_pktmbuf_free_seg(rxm);
995 first_seg->nb_segs--;
996 last_seg->data_len = (uint16_t)
997 (last_seg->data_len -
998 (ETHER_CRC_LEN - data_len));
999 last_seg->next = NULL;
1002 (uint16_t) (data_len - ETHER_CRC_LEN);
1006 * Initialize the first mbuf of the returned packet:
1007 * - RX port identifier,
1008 * - hardware offload data, if any:
1009 * - RSS flag & hash,
1010 * - IP checksum flag,
1011 * - VLAN TCI, if any,
1014 first_seg->port = rxq->port_id;
1015 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1018 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1019 * set in the pkt_flags field.
1021 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1022 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1023 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1024 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1025 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1026 first_seg->ol_flags = pkt_flags;
1028 /* Prefetch data of first segment, if configured to do so. */
1029 rte_packet_prefetch((char *)first_seg->buf_addr +
1030 first_seg->data_off);
1033 * Store the mbuf address into the next entry of the array
1034 * of returned packets.
1036 rx_pkts[nb_rx++] = first_seg;
1039 * Setup receipt context for a new packet.
1045 * Record index of the next RX descriptor to probe.
1047 rxq->rx_tail = rx_id;
1050 * Save receive context.
1052 rxq->pkt_first_seg = first_seg;
1053 rxq->pkt_last_seg = last_seg;
1056 * If the number of free RX descriptors is greater than the RX free
1057 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1059 * Update the RDT with the value of the last processed RX descriptor
1060 * minus 1, to guarantee that the RDT register is never equal to the
1061 * RDH register, which creates a "full" ring situtation from the
1062 * hardware point of view...
1064 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1065 if (nb_hold > rxq->rx_free_thresh) {
1066 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1067 "nb_hold=%u nb_rx=%u",
1068 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1069 (unsigned) rx_id, (unsigned) nb_hold,
1071 rx_id = (uint16_t) ((rx_id == 0) ?
1072 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1073 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1076 rxq->nb_rx_hold = nb_hold;
1081 * Rings setup and release.
1083 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1084 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1085 * This will also optimize cache line size effect.
1086 * H/W supports up to cache line size 128.
1088 #define IGB_ALIGN 128
1091 * Maximum number of Ring Descriptors.
1093 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1094 * desscriptors should meet the following condition:
1095 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1097 #define IGB_MIN_RING_DESC 32
1098 #define IGB_MAX_RING_DESC 4096
1100 static const struct rte_memzone *
1101 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1102 uint16_t queue_id, uint32_t ring_size, int socket_id)
1104 char z_name[RTE_MEMZONE_NAMESIZE];
1105 const struct rte_memzone *mz;
1107 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1108 dev->driver->pci_drv.name, ring_name,
1109 dev->data->port_id, queue_id);
1110 mz = rte_memzone_lookup(z_name);
1114 #ifdef RTE_LIBRTE_XEN_DOM0
1115 return rte_memzone_reserve_bounded(z_name, ring_size,
1116 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1118 return rte_memzone_reserve_aligned(z_name, ring_size,
1119 socket_id, 0, IGB_ALIGN);
1124 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1128 if (txq->sw_ring != NULL) {
1129 for (i = 0; i < txq->nb_tx_desc; i++) {
1130 if (txq->sw_ring[i].mbuf != NULL) {
1131 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1132 txq->sw_ring[i].mbuf = NULL;
1139 igb_tx_queue_release(struct igb_tx_queue *txq)
1142 igb_tx_queue_release_mbufs(txq);
1143 rte_free(txq->sw_ring);
1149 eth_igb_tx_queue_release(void *txq)
1151 igb_tx_queue_release(txq);
1155 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1160 memset((void*)&txq->ctx_cache, 0,
1161 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1165 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1167 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1168 struct igb_tx_entry *txe = txq->sw_ring;
1170 struct e1000_hw *hw;
1172 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173 /* Zero out HW ring memory */
1174 for (i = 0; i < txq->nb_tx_desc; i++) {
1175 txq->tx_ring[i] = zeroed_desc;
1178 /* Initialize ring entries */
1179 prev = (uint16_t)(txq->nb_tx_desc - 1);
1180 for (i = 0; i < txq->nb_tx_desc; i++) {
1181 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1183 txd->wb.status = E1000_TXD_STAT_DD;
1186 txe[prev].next_id = i;
1190 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1191 /* 82575 specific, each tx queue will use 2 hw contexts */
1192 if (hw->mac.type == e1000_82575)
1193 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1195 igb_reset_tx_queue_stat(txq);
1199 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1202 unsigned int socket_id,
1203 const struct rte_eth_txconf *tx_conf)
1205 const struct rte_memzone *tz;
1206 struct igb_tx_queue *txq;
1207 struct e1000_hw *hw;
1210 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1213 * Validate number of transmit descriptors.
1214 * It must not exceed hardware maximum, and must be multiple
1217 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1218 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1223 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1226 if (tx_conf->tx_free_thresh != 0)
1227 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1228 "used for the 1G driver.");
1229 if (tx_conf->tx_rs_thresh != 0)
1230 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1231 "used for the 1G driver.");
1232 if (tx_conf->tx_thresh.wthresh == 0)
1233 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1234 "consider setting the TX WTHRESH value to 4, 8, "
1237 /* Free memory prior to re-allocation if needed */
1238 if (dev->data->tx_queues[queue_idx] != NULL) {
1239 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1240 dev->data->tx_queues[queue_idx] = NULL;
1243 /* First allocate the tx queue data structure */
1244 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1245 RTE_CACHE_LINE_SIZE);
1250 * Allocate TX ring hardware descriptors. A memzone large enough to
1251 * handle the maximum ring size is allocated in order to allow for
1252 * resizing in later calls to the queue setup function.
1254 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1255 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1258 igb_tx_queue_release(txq);
1262 txq->nb_tx_desc = nb_desc;
1263 txq->pthresh = tx_conf->tx_thresh.pthresh;
1264 txq->hthresh = tx_conf->tx_thresh.hthresh;
1265 txq->wthresh = tx_conf->tx_thresh.wthresh;
1266 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1268 txq->queue_id = queue_idx;
1269 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1270 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1271 txq->port_id = dev->data->port_id;
1273 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1274 #ifndef RTE_LIBRTE_XEN_DOM0
1275 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1277 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1279 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1280 /* Allocate software ring */
1281 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1282 sizeof(struct igb_tx_entry) * nb_desc,
1283 RTE_CACHE_LINE_SIZE);
1284 if (txq->sw_ring == NULL) {
1285 igb_tx_queue_release(txq);
1288 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1289 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1291 igb_reset_tx_queue(txq, dev);
1292 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1293 dev->data->tx_queues[queue_idx] = txq;
1299 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1303 if (rxq->sw_ring != NULL) {
1304 for (i = 0; i < rxq->nb_rx_desc; i++) {
1305 if (rxq->sw_ring[i].mbuf != NULL) {
1306 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1307 rxq->sw_ring[i].mbuf = NULL;
1314 igb_rx_queue_release(struct igb_rx_queue *rxq)
1317 igb_rx_queue_release_mbufs(rxq);
1318 rte_free(rxq->sw_ring);
1324 eth_igb_rx_queue_release(void *rxq)
1326 igb_rx_queue_release(rxq);
1330 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1332 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1335 /* Zero out HW ring memory */
1336 for (i = 0; i < rxq->nb_rx_desc; i++) {
1337 rxq->rx_ring[i] = zeroed_desc;
1341 rxq->pkt_first_seg = NULL;
1342 rxq->pkt_last_seg = NULL;
1346 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1349 unsigned int socket_id,
1350 const struct rte_eth_rxconf *rx_conf,
1351 struct rte_mempool *mp)
1353 const struct rte_memzone *rz;
1354 struct igb_rx_queue *rxq;
1355 struct e1000_hw *hw;
1358 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1361 * Validate number of receive descriptors.
1362 * It must not exceed hardware maximum, and must be multiple
1365 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1366 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1370 /* Free memory prior to re-allocation if needed */
1371 if (dev->data->rx_queues[queue_idx] != NULL) {
1372 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1373 dev->data->rx_queues[queue_idx] = NULL;
1376 /* First allocate the RX queue data structure. */
1377 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1378 RTE_CACHE_LINE_SIZE);
1382 rxq->nb_rx_desc = nb_desc;
1383 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1384 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1385 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1386 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1388 rxq->drop_en = rx_conf->rx_drop_en;
1389 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1390 rxq->queue_id = queue_idx;
1391 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1392 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1393 rxq->port_id = dev->data->port_id;
1394 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1398 * Allocate RX ring hardware descriptors. A memzone large enough to
1399 * handle the maximum ring size is allocated in order to allow for
1400 * resizing in later calls to the queue setup function.
1402 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1403 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1405 igb_rx_queue_release(rxq);
1408 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1409 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1410 #ifndef RTE_LIBRTE_XEN_DOM0
1411 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1413 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1415 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1417 /* Allocate software ring. */
1418 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1419 sizeof(struct igb_rx_entry) * nb_desc,
1420 RTE_CACHE_LINE_SIZE);
1421 if (rxq->sw_ring == NULL) {
1422 igb_rx_queue_release(rxq);
1425 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1426 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1428 dev->data->rx_queues[queue_idx] = rxq;
1429 igb_reset_rx_queue(rxq);
1435 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1437 #define IGB_RXQ_SCAN_INTERVAL 4
1438 volatile union e1000_adv_rx_desc *rxdp;
1439 struct igb_rx_queue *rxq;
1442 if (rx_queue_id >= dev->data->nb_rx_queues) {
1443 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1447 rxq = dev->data->rx_queues[rx_queue_id];
1448 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1450 while ((desc < rxq->nb_rx_desc) &&
1451 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1452 desc += IGB_RXQ_SCAN_INTERVAL;
1453 rxdp += IGB_RXQ_SCAN_INTERVAL;
1454 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1455 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1456 desc - rxq->nb_rx_desc]);
1463 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1465 volatile union e1000_adv_rx_desc *rxdp;
1466 struct igb_rx_queue *rxq = rx_queue;
1469 if (unlikely(offset >= rxq->nb_rx_desc))
1471 desc = rxq->rx_tail + offset;
1472 if (desc >= rxq->nb_rx_desc)
1473 desc -= rxq->nb_rx_desc;
1475 rxdp = &rxq->rx_ring[desc];
1476 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1480 igb_dev_clear_queues(struct rte_eth_dev *dev)
1483 struct igb_tx_queue *txq;
1484 struct igb_rx_queue *rxq;
1486 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1487 txq = dev->data->tx_queues[i];
1489 igb_tx_queue_release_mbufs(txq);
1490 igb_reset_tx_queue(txq, dev);
1494 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1495 rxq = dev->data->rx_queues[i];
1497 igb_rx_queue_release_mbufs(rxq);
1498 igb_reset_rx_queue(rxq);
1504 * Receive Side Scaling (RSS).
1505 * See section 7.1.1.7 in the following document:
1506 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1509 * The source and destination IP addresses of the IP header and the source and
1510 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1511 * against a configurable random key to compute a 32-bit RSS hash result.
1512 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1513 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1514 * RSS output index which is used as the RX queue index where to store the
1516 * The following output is supplied in the RX write-back descriptor:
1517 * - 32-bit result of the Microsoft RSS hash function,
1518 * - 4-bit RSS type field.
1522 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1523 * Used as the default key.
1525 static uint8_t rss_intel_key[40] = {
1526 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1527 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1528 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1529 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1530 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1534 igb_rss_disable(struct rte_eth_dev *dev)
1536 struct e1000_hw *hw;
1539 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1540 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1541 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1542 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1546 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1554 hash_key = rss_conf->rss_key;
1555 if (hash_key != NULL) {
1556 /* Fill in RSS hash key */
1557 for (i = 0; i < 10; i++) {
1558 rss_key = hash_key[(i * 4)];
1559 rss_key |= hash_key[(i * 4) + 1] << 8;
1560 rss_key |= hash_key[(i * 4) + 2] << 16;
1561 rss_key |= hash_key[(i * 4) + 3] << 24;
1562 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1566 /* Set configured hashing protocols in MRQC register */
1567 rss_hf = rss_conf->rss_hf;
1568 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1569 if (rss_hf & ETH_RSS_IPV4)
1570 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1571 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1572 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1573 if (rss_hf & ETH_RSS_IPV6)
1574 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1575 if (rss_hf & ETH_RSS_IPV6_EX)
1576 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1577 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1578 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1579 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1580 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1581 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1582 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1583 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1584 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1585 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1586 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1587 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1591 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1592 struct rte_eth_rss_conf *rss_conf)
1594 struct e1000_hw *hw;
1598 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1601 * Before changing anything, first check that the update RSS operation
1602 * does not attempt to disable RSS, if RSS was enabled at
1603 * initialization time, or does not attempt to enable RSS, if RSS was
1604 * disabled at initialization time.
1606 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1607 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1608 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1609 if (rss_hf != 0) /* Enable RSS */
1611 return 0; /* Nothing to do */
1614 if (rss_hf == 0) /* Disable RSS */
1616 igb_hw_rss_hash_set(hw, rss_conf);
1620 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1621 struct rte_eth_rss_conf *rss_conf)
1623 struct e1000_hw *hw;
1630 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631 hash_key = rss_conf->rss_key;
1632 if (hash_key != NULL) {
1633 /* Return RSS hash key */
1634 for (i = 0; i < 10; i++) {
1635 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1636 hash_key[(i * 4)] = rss_key & 0x000000FF;
1637 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1638 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1639 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1643 /* Get RSS functions configured in MRQC register */
1644 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1645 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1646 rss_conf->rss_hf = 0;
1650 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1651 rss_hf |= ETH_RSS_IPV4;
1652 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1653 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1654 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1655 rss_hf |= ETH_RSS_IPV6;
1656 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1657 rss_hf |= ETH_RSS_IPV6_EX;
1658 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1659 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1660 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1661 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1662 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1663 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1664 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1665 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1666 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1667 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1668 rss_conf->rss_hf = rss_hf;
1673 igb_rss_configure(struct rte_eth_dev *dev)
1675 struct rte_eth_rss_conf rss_conf;
1676 struct e1000_hw *hw;
1680 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1682 /* Fill in redirection table. */
1683 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1684 for (i = 0; i < 128; i++) {
1691 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1692 i % dev->data->nb_rx_queues : 0);
1693 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1695 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1699 * Configure the RSS key and the RSS protocols used to compute
1700 * the RSS hash of input packets.
1702 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1703 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1704 igb_rss_disable(dev);
1707 if (rss_conf.rss_key == NULL)
1708 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1709 igb_hw_rss_hash_set(hw, &rss_conf);
1713 * Check if the mac type support VMDq or not.
1714 * Return 1 if it supports, otherwise, return 0.
1717 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1719 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721 switch (hw->mac.type) {
1742 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1748 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1750 struct rte_eth_vmdq_rx_conf *cfg;
1751 struct e1000_hw *hw;
1752 uint32_t mrqc, vt_ctl, vmolr, rctl;
1755 PMD_INIT_FUNC_TRACE();
1757 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1758 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1760 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1761 if (igb_is_vmdq_supported(dev) == 0)
1764 igb_rss_disable(dev);
1766 /* RCTL: eanble VLAN filter */
1767 rctl = E1000_READ_REG(hw, E1000_RCTL);
1768 rctl |= E1000_RCTL_VFE;
1769 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1771 /* MRQC: enable vmdq */
1772 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1773 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1774 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1776 /* VTCTL: pool selection according to VLAN tag */
1777 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1778 if (cfg->enable_default_pool)
1779 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1780 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1781 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1783 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1784 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1785 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1786 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1789 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1790 vmolr |= E1000_VMOLR_AUPE;
1791 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1792 vmolr |= E1000_VMOLR_ROMPE;
1793 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1794 vmolr |= E1000_VMOLR_ROPE;
1795 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1796 vmolr |= E1000_VMOLR_BAM;
1797 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1798 vmolr |= E1000_VMOLR_MPME;
1800 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1804 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1805 * Both 82576 and 82580 support it
1807 if (hw->mac.type != e1000_i350) {
1808 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1809 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1810 vmolr |= E1000_VMOLR_STRVLAN;
1811 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1815 /* VFTA - enable all vlan filters */
1816 for (i = 0; i < IGB_VFTA_SIZE; i++)
1817 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1819 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1820 if (hw->mac.type != e1000_82580)
1821 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1824 * RAH/RAL - allow pools to read specific mac addresses
1825 * In this case, all pools should be able to read from mac addr 0
1827 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1828 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1830 /* VLVF: set up filters for vlan tags as configured */
1831 for (i = 0; i < cfg->nb_pool_maps; i++) {
1832 /* set vlan id in VF register and set the valid bit */
1833 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1834 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1835 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1836 E1000_VLVF_POOLSEL_MASK)));
1839 E1000_WRITE_FLUSH(hw);
1845 /*********************************************************************
1847 * Enable receive unit.
1849 **********************************************************************/
1852 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1854 struct igb_rx_entry *rxe = rxq->sw_ring;
1858 /* Initialize software ring entries. */
1859 for (i = 0; i < rxq->nb_rx_desc; i++) {
1860 volatile union e1000_adv_rx_desc *rxd;
1861 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1864 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1865 "queue_id=%hu", rxq->queue_id);
1869 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1870 rxd = &rxq->rx_ring[i];
1871 rxd->read.hdr_addr = dma_addr;
1872 rxd->read.pkt_addr = dma_addr;
1879 #define E1000_MRQC_DEF_Q_SHIFT (3)
1881 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1883 struct e1000_hw *hw =
1884 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1887 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1889 * SRIOV active scheme
1890 * FIXME if support RSS together with VMDq & SRIOV
1892 mrqc = E1000_MRQC_ENABLE_VMDQ;
1893 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1894 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1895 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1896 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1898 * SRIOV inactive scheme
1900 switch (dev->data->dev_conf.rxmode.mq_mode) {
1902 igb_rss_configure(dev);
1904 case ETH_MQ_RX_VMDQ_ONLY:
1905 /*Configure general VMDQ only RX parameters*/
1906 igb_vmdq_rx_hw_configure(dev);
1908 case ETH_MQ_RX_NONE:
1909 /* if mq_mode is none, disable rss mode.*/
1911 igb_rss_disable(dev);
1920 eth_igb_rx_init(struct rte_eth_dev *dev)
1922 struct e1000_hw *hw;
1923 struct igb_rx_queue *rxq;
1928 uint16_t rctl_bsize;
1932 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1936 * Make sure receives are disabled while setting
1937 * up the descriptor ring.
1939 rctl = E1000_READ_REG(hw, E1000_RCTL);
1940 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1943 * Configure support of jumbo frames, if any.
1945 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1946 rctl |= E1000_RCTL_LPE;
1949 * Set maximum packet length by default, and might be updated
1950 * together with enabling/disabling dual VLAN.
1952 E1000_WRITE_REG(hw, E1000_RLPML,
1953 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1956 rctl &= ~E1000_RCTL_LPE;
1958 /* Configure and enable each RX queue. */
1960 dev->rx_pkt_burst = eth_igb_recv_pkts;
1961 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1965 rxq = dev->data->rx_queues[i];
1967 /* Allocate buffers for descriptor rings and set up queue */
1968 ret = igb_alloc_rx_queue_mbufs(rxq);
1973 * Reset crc_len in case it was changed after queue setup by a
1977 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1980 bus_addr = rxq->rx_ring_phys_addr;
1981 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1983 sizeof(union e1000_adv_rx_desc));
1984 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1985 (uint32_t)(bus_addr >> 32));
1986 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1988 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1991 * Configure RX buffer size.
1993 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1994 RTE_PKTMBUF_HEADROOM);
1995 if (buf_size >= 1024) {
1997 * Configure the BSIZEPACKET field of the SRRCTL
1998 * register of the queue.
1999 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2000 * If this field is equal to 0b, then RCTL.BSIZE
2001 * determines the RX packet buffer size.
2003 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2004 E1000_SRRCTL_BSIZEPKT_MASK);
2005 buf_size = (uint16_t) ((srrctl &
2006 E1000_SRRCTL_BSIZEPKT_MASK) <<
2007 E1000_SRRCTL_BSIZEPKT_SHIFT);
2009 /* It adds dual VLAN length for supporting dual VLAN */
2010 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2011 2 * VLAN_TAG_SIZE) > buf_size){
2012 if (!dev->data->scattered_rx)
2014 "forcing scatter mode");
2015 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2016 dev->data->scattered_rx = 1;
2020 * Use BSIZE field of the device RCTL register.
2022 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2023 rctl_bsize = buf_size;
2024 if (!dev->data->scattered_rx)
2025 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2026 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2027 dev->data->scattered_rx = 1;
2030 /* Set if packets are dropped when no descriptors available */
2032 srrctl |= E1000_SRRCTL_DROP_EN;
2034 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2036 /* Enable this RX queue. */
2037 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2038 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2039 rxdctl &= 0xFFF00000;
2040 rxdctl |= (rxq->pthresh & 0x1F);
2041 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2042 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2043 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2046 if (dev->data->dev_conf.rxmode.enable_scatter) {
2047 if (!dev->data->scattered_rx)
2048 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2049 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2050 dev->data->scattered_rx = 1;
2054 * Setup BSIZE field of RCTL register, if needed.
2055 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2056 * register, since the code above configures the SRRCTL register of
2057 * the RX queue in such a case.
2058 * All configurable sizes are:
2059 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2060 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2061 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2062 * 2048: rctl |= E1000_RCTL_SZ_2048;
2063 * 1024: rctl |= E1000_RCTL_SZ_1024;
2064 * 512: rctl |= E1000_RCTL_SZ_512;
2065 * 256: rctl |= E1000_RCTL_SZ_256;
2067 if (rctl_bsize > 0) {
2068 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2069 rctl |= E1000_RCTL_SZ_512;
2070 else /* 256 <= buf_size < 512 - use 256 */
2071 rctl |= E1000_RCTL_SZ_256;
2075 * Configure RSS if device configured with multiple RX queues.
2077 igb_dev_mq_rx_configure(dev);
2079 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2080 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2083 * Setup the Checksum Register.
2084 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2086 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2087 rxcsum |= E1000_RXCSUM_PCSD;
2089 /* Enable both L3/L4 rx checksum offload */
2090 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2091 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2093 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2094 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2096 /* Setup the Receive Control Register. */
2097 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2098 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2100 /* set STRCRC bit in all queues */
2101 if (hw->mac.type == e1000_i350 ||
2102 hw->mac.type == e1000_i210 ||
2103 hw->mac.type == e1000_i211 ||
2104 hw->mac.type == e1000_i354) {
2105 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2106 rxq = dev->data->rx_queues[i];
2107 uint32_t dvmolr = E1000_READ_REG(hw,
2108 E1000_DVMOLR(rxq->reg_idx));
2109 dvmolr |= E1000_DVMOLR_STRCRC;
2110 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2114 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2116 /* clear STRCRC bit in all queues */
2117 if (hw->mac.type == e1000_i350 ||
2118 hw->mac.type == e1000_i210 ||
2119 hw->mac.type == e1000_i211 ||
2120 hw->mac.type == e1000_i354) {
2121 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2122 rxq = dev->data->rx_queues[i];
2123 uint32_t dvmolr = E1000_READ_REG(hw,
2124 E1000_DVMOLR(rxq->reg_idx));
2125 dvmolr &= ~E1000_DVMOLR_STRCRC;
2126 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2131 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2132 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2133 E1000_RCTL_RDMTS_HALF |
2134 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2136 /* Make sure VLAN Filters are off. */
2137 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2138 rctl &= ~E1000_RCTL_VFE;
2139 /* Don't store bad packets. */
2140 rctl &= ~E1000_RCTL_SBP;
2142 /* Enable Receives. */
2143 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2146 * Setup the HW Rx Head and Tail Descriptor Pointers.
2147 * This needs to be done after enable.
2149 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2150 rxq = dev->data->rx_queues[i];
2151 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2152 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2158 /*********************************************************************
2160 * Enable transmit unit.
2162 **********************************************************************/
2164 eth_igb_tx_init(struct rte_eth_dev *dev)
2166 struct e1000_hw *hw;
2167 struct igb_tx_queue *txq;
2172 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2174 /* Setup the Base and Length of the Tx Descriptor Rings. */
2175 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2177 txq = dev->data->tx_queues[i];
2178 bus_addr = txq->tx_ring_phys_addr;
2180 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2182 sizeof(union e1000_adv_tx_desc));
2183 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2184 (uint32_t)(bus_addr >> 32));
2185 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2187 /* Setup the HW Tx Head and Tail descriptor pointers. */
2188 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2189 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2191 /* Setup Transmit threshold registers. */
2192 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2193 txdctl |= txq->pthresh & 0x1F;
2194 txdctl |= ((txq->hthresh & 0x1F) << 8);
2195 txdctl |= ((txq->wthresh & 0x1F) << 16);
2196 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2197 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2200 /* Program the Transmit Control Register. */
2201 tctl = E1000_READ_REG(hw, E1000_TCTL);
2202 tctl &= ~E1000_TCTL_CT;
2203 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2204 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2206 e1000_config_collision_dist(hw);
2208 /* This write will effectively turn on the transmit unit. */
2209 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2212 /*********************************************************************
2214 * Enable VF receive unit.
2216 **********************************************************************/
2218 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2220 struct e1000_hw *hw;
2221 struct igb_rx_queue *rxq;
2224 uint16_t rctl_bsize;
2228 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2231 e1000_rlpml_set_vf(hw,
2232 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2235 /* Configure and enable each RX queue. */
2237 dev->rx_pkt_burst = eth_igb_recv_pkts;
2238 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2242 rxq = dev->data->rx_queues[i];
2244 /* Allocate buffers for descriptor rings and set up queue */
2245 ret = igb_alloc_rx_queue_mbufs(rxq);
2249 bus_addr = rxq->rx_ring_phys_addr;
2250 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2252 sizeof(union e1000_adv_rx_desc));
2253 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2254 (uint32_t)(bus_addr >> 32));
2255 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2257 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2260 * Configure RX buffer size.
2262 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2263 RTE_PKTMBUF_HEADROOM);
2264 if (buf_size >= 1024) {
2266 * Configure the BSIZEPACKET field of the SRRCTL
2267 * register of the queue.
2268 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2269 * If this field is equal to 0b, then RCTL.BSIZE
2270 * determines the RX packet buffer size.
2272 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2273 E1000_SRRCTL_BSIZEPKT_MASK);
2274 buf_size = (uint16_t) ((srrctl &
2275 E1000_SRRCTL_BSIZEPKT_MASK) <<
2276 E1000_SRRCTL_BSIZEPKT_SHIFT);
2278 /* It adds dual VLAN length for supporting dual VLAN */
2279 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2280 2 * VLAN_TAG_SIZE) > buf_size){
2281 if (!dev->data->scattered_rx)
2283 "forcing scatter mode");
2284 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2285 dev->data->scattered_rx = 1;
2289 * Use BSIZE field of the device RCTL register.
2291 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2292 rctl_bsize = buf_size;
2293 if (!dev->data->scattered_rx)
2294 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2295 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2296 dev->data->scattered_rx = 1;
2299 /* Set if packets are dropped when no descriptors available */
2301 srrctl |= E1000_SRRCTL_DROP_EN;
2303 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2305 /* Enable this RX queue. */
2306 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2307 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2308 rxdctl &= 0xFFF00000;
2309 rxdctl |= (rxq->pthresh & 0x1F);
2310 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2311 if (hw->mac.type == e1000_vfadapt) {
2313 * Workaround of 82576 VF Erratum
2314 * force set WTHRESH to 1
2315 * to avoid Write-Back not triggered sometimes
2318 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2321 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2322 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2325 if (dev->data->dev_conf.rxmode.enable_scatter) {
2326 if (!dev->data->scattered_rx)
2327 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2328 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2329 dev->data->scattered_rx = 1;
2333 * Setup the HW Rx Head and Tail Descriptor Pointers.
2334 * This needs to be done after enable.
2336 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2337 rxq = dev->data->rx_queues[i];
2338 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2339 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2345 /*********************************************************************
2347 * Enable VF transmit unit.
2349 **********************************************************************/
2351 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2353 struct e1000_hw *hw;
2354 struct igb_tx_queue *txq;
2358 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360 /* Setup the Base and Length of the Tx Descriptor Rings. */
2361 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2364 txq = dev->data->tx_queues[i];
2365 bus_addr = txq->tx_ring_phys_addr;
2366 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2368 sizeof(union e1000_adv_tx_desc));
2369 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2370 (uint32_t)(bus_addr >> 32));
2371 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2373 /* Setup the HW Tx Head and Tail descriptor pointers. */
2374 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2375 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2377 /* Setup Transmit threshold registers. */
2378 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2379 txdctl |= txq->pthresh & 0x1F;
2380 txdctl |= ((txq->hthresh & 0x1F) << 8);
2381 if (hw->mac.type == e1000_82576) {
2383 * Workaround of 82576 VF Erratum
2384 * force set WTHRESH to 1
2385 * to avoid Write-Back not triggered sometimes
2388 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2391 txdctl |= ((txq->wthresh & 0x1F) << 16);
2392 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2393 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);