4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 /* Bit Mask to indicate what bits required for building TX context */
77 #define IGB_TX_OFFLOAD_MASK ( \
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
87 m = __rte_mbuf_raw_alloc(mp);
88 __rte_mbuf_sanity_check_raw(m, 0);
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
95 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
96 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
99 * Structure associated with each descriptor of the RX ring of a RX queue.
101 struct igb_rx_entry {
102 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
106 * Structure associated with each descriptor of the TX ring of a TX queue.
108 struct igb_tx_entry {
109 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
110 uint16_t next_id; /**< Index of next descriptor in ring. */
111 uint16_t last_id; /**< Index of last scattered descriptor. */
115 * Structure associated with each RX queue.
117 struct igb_rx_queue {
118 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
119 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
120 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
121 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
122 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
123 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
124 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
125 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
126 uint16_t nb_rx_desc; /**< number of RX descriptors. */
127 uint16_t rx_tail; /**< current value of RDT register. */
128 uint16_t nb_rx_hold; /**< number of held free RX desc. */
129 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
130 uint16_t queue_id; /**< RX queue index. */
131 uint16_t reg_idx; /**< RX queue register index. */
132 uint8_t port_id; /**< Device port identifier. */
133 uint8_t pthresh; /**< Prefetch threshold register. */
134 uint8_t hthresh; /**< Host threshold register. */
135 uint8_t wthresh; /**< Write-back threshold register. */
136 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
137 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
141 * Hardware context number
143 enum igb_advctx_num {
144 IGB_CTX_0 = 0, /**< CTX0 */
145 IGB_CTX_1 = 1, /**< CTX1 */
146 IGB_CTX_NUM = 2, /**< CTX_NUM */
149 /** Offload features */
150 union igb_vlan_macip {
153 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
155 /**< VLAN Tag Control Identifier (CPU order). */
160 * Compare mask for vlan_macip_len.data,
161 * should be in sync with igb_vlan_macip.f layout.
163 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
164 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
165 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
166 /** MAC+IP length. */
167 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
170 * Strucutre to check if new context need be built
172 struct igb_advctx_info {
173 uint64_t flags; /**< ol_flags related to context build. */
174 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
175 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
179 * Structure associated with each TX queue.
181 struct igb_tx_queue {
182 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
183 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
184 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
185 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
186 uint32_t txd_type; /**< Device-specific TXD type */
187 uint16_t nb_tx_desc; /**< number of TX descriptors. */
188 uint16_t tx_tail; /**< Current value of TDT register. */
190 /**< Index of first used TX descriptor. */
191 uint16_t queue_id; /**< TX queue index. */
192 uint16_t reg_idx; /**< TX queue register index. */
193 uint8_t port_id; /**< Device port identifier. */
194 uint8_t pthresh; /**< Prefetch threshold register. */
195 uint8_t hthresh; /**< Host threshold register. */
196 uint8_t wthresh; /**< Write-back threshold register. */
198 /**< Current used hardware descriptor. */
200 /**< Start context position for transmit queue. */
201 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
202 /**< Hardware context history.*/
206 #define RTE_PMD_USE_PREFETCH
209 #ifdef RTE_PMD_USE_PREFETCH
210 #define rte_igb_prefetch(p) rte_prefetch0(p)
212 #define rte_igb_prefetch(p) do {} while(0)
215 #ifdef RTE_PMD_PACKET_PREFETCH
216 #define rte_packet_prefetch(p) rte_prefetch1(p)
218 #define rte_packet_prefetch(p) do {} while(0)
222 * Macro for VMDq feature for 1 GbE NIC.
224 #define E1000_VMOLR_SIZE (8)
226 /*********************************************************************
230 **********************************************************************/
233 * Advanced context descriptor are almost same between igb/ixgbe
234 * This is a separate function, looking for optimization opportunity here
235 * Rework required to go with the pre-defined values.
239 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
240 volatile struct e1000_adv_tx_context_desc *ctx_txd,
241 uint64_t ol_flags, uint32_t vlan_macip_lens)
243 uint32_t type_tucmd_mlhl;
244 uint32_t mss_l4len_idx;
245 uint32_t ctx_idx, ctx_curr;
248 ctx_curr = txq->ctx_curr;
249 ctx_idx = ctx_curr + txq->ctx_start;
254 if (ol_flags & PKT_TX_VLAN_PKT) {
255 cmp_mask |= TX_VLAN_CMP_MASK;
258 if (ol_flags & PKT_TX_IP_CKSUM) {
259 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
260 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
263 /* Specify which HW CTX to upload. */
264 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
265 switch (ol_flags & PKT_TX_L4_MASK) {
266 case PKT_TX_UDP_CKSUM:
267 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
268 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
269 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
270 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
272 case PKT_TX_TCP_CKSUM:
273 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
274 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
275 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
276 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
278 case PKT_TX_SCTP_CKSUM:
279 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
280 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
281 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
282 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
285 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
286 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
290 txq->ctx_cache[ctx_curr].flags = ol_flags;
291 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
292 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
293 vlan_macip_lens & cmp_mask;
295 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
296 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
297 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
298 ctx_txd->seqnum_seed = 0;
302 * Check which hardware context can be used. Use the existing match
303 * or create a new context descriptor.
305 static inline uint32_t
306 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
307 uint32_t vlan_macip_lens)
309 /* If match with the current context */
310 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
311 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
312 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
313 return txq->ctx_curr;
316 /* If match with the second context */
318 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
319 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
320 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
321 return txq->ctx_curr;
324 /* Mismatch, use the previous context */
325 return (IGB_CTX_NUM);
328 static inline uint32_t
329 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
331 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
332 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
335 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
336 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
340 static inline uint32_t
341 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
343 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
344 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
348 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
351 struct igb_tx_queue *txq;
352 struct igb_tx_entry *sw_ring;
353 struct igb_tx_entry *txe, *txn;
354 volatile union e1000_adv_tx_desc *txr;
355 volatile union e1000_adv_tx_desc *txd;
356 struct rte_mbuf *tx_pkt;
357 struct rte_mbuf *m_seg;
358 union igb_vlan_macip vlan_macip_lens;
366 uint64_t buf_dma_addr;
367 uint32_t olinfo_status;
368 uint32_t cmd_type_len;
377 uint32_t new_ctx = 0;
381 sw_ring = txq->sw_ring;
383 tx_id = txq->tx_tail;
384 txe = &sw_ring[tx_id];
386 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
388 pkt_len = tx_pkt->pkt_len;
390 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
393 * The number of descriptors that must be allocated for a
394 * packet is the number of segments of that packet, plus 1
395 * Context Descriptor for the VLAN Tag Identifier, if any.
396 * Determine the last TX descriptor to allocate in the TX ring
397 * for the packet, starting from the current position (tx_id)
400 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
402 ol_flags = tx_pkt->ol_flags;
403 l2_l3_len.l2_len = tx_pkt->l2_len;
404 l2_l3_len.l3_len = tx_pkt->l3_len;
405 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
406 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
407 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
409 /* If a Context Descriptor need be built . */
411 ctx = what_advctx_update(txq, tx_ol_req,
412 vlan_macip_lens.data);
413 /* Only allocate context descriptor if required*/
414 new_ctx = (ctx == IGB_CTX_NUM);
416 tx_last = (uint16_t) (tx_last + new_ctx);
418 if (tx_last >= txq->nb_tx_desc)
419 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
421 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
422 " tx_first=%u tx_last=%u",
423 (unsigned) txq->port_id,
424 (unsigned) txq->queue_id,
430 * Check if there are enough free descriptors in the TX ring
431 * to transmit the next packet.
432 * This operation is based on the two following rules:
434 * 1- Only check that the last needed TX descriptor can be
435 * allocated (by construction, if that descriptor is free,
436 * all intermediate ones are also free).
438 * For this purpose, the index of the last TX descriptor
439 * used for a packet (the "last descriptor" of a packet)
440 * is recorded in the TX entries (the last one included)
441 * that are associated with all TX descriptors allocated
444 * 2- Avoid to allocate the last free TX descriptor of the
445 * ring, in order to never set the TDT register with the
446 * same value stored in parallel by the NIC in the TDH
447 * register, which makes the TX engine of the NIC enter
448 * in a deadlock situation.
450 * By extension, avoid to allocate a free descriptor that
451 * belongs to the last set of free descriptors allocated
452 * to the same packet previously transmitted.
456 * The "last descriptor" of the previously sent packet, if any,
457 * which used the last descriptor to allocate.
459 tx_end = sw_ring[tx_last].last_id;
462 * The next descriptor following that "last descriptor" in the
465 tx_end = sw_ring[tx_end].next_id;
468 * The "last descriptor" associated with that next descriptor.
470 tx_end = sw_ring[tx_end].last_id;
473 * Check that this descriptor is free.
475 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
482 * Set common flags of all TX Data Descriptors.
484 * The following bits must be set in all Data Descriptors:
485 * - E1000_ADVTXD_DTYP_DATA
486 * - E1000_ADVTXD_DCMD_DEXT
488 * The following bits must be set in the first Data Descriptor
489 * and are ignored in the other ones:
490 * - E1000_ADVTXD_DCMD_IFCS
491 * - E1000_ADVTXD_MAC_1588
492 * - E1000_ADVTXD_DCMD_VLE
494 * The following bits must only be set in the last Data
496 * - E1000_TXD_CMD_EOP
498 * The following bits can be set in any Data Descriptor, but
499 * are only set in the last Data Descriptor:
502 cmd_type_len = txq->txd_type |
503 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
504 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
505 #if defined(RTE_LIBRTE_IEEE1588)
506 if (ol_flags & PKT_TX_IEEE1588_TMST)
507 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
510 /* Setup TX Advanced context descriptor if required */
512 volatile struct e1000_adv_tx_context_desc *
515 ctx_txd = (volatile struct
516 e1000_adv_tx_context_desc *)
519 txn = &sw_ring[txe->next_id];
520 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
522 if (txe->mbuf != NULL) {
523 rte_pktmbuf_free_seg(txe->mbuf);
527 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
528 vlan_macip_lens.data);
530 txe->last_id = tx_last;
531 tx_id = txe->next_id;
535 /* Setup the TX Advanced Data Descriptor */
536 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
537 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
538 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
543 txn = &sw_ring[txe->next_id];
546 if (txe->mbuf != NULL)
547 rte_pktmbuf_free_seg(txe->mbuf);
551 * Set up transmit descriptor.
553 slen = (uint16_t) m_seg->data_len;
554 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
555 txd->read.buffer_addr =
556 rte_cpu_to_le_64(buf_dma_addr);
557 txd->read.cmd_type_len =
558 rte_cpu_to_le_32(cmd_type_len | slen);
559 txd->read.olinfo_status =
560 rte_cpu_to_le_32(olinfo_status);
561 txe->last_id = tx_last;
562 tx_id = txe->next_id;
565 } while (m_seg != NULL);
568 * The last packet data descriptor needs End Of Packet (EOP)
569 * and Report Status (RS).
571 txd->read.cmd_type_len |=
572 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
578 * Set the Transmit Descriptor Tail (TDT).
580 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
581 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
582 (unsigned) txq->port_id, (unsigned) txq->queue_id,
583 (unsigned) tx_id, (unsigned) nb_tx);
584 txq->tx_tail = tx_id;
589 /*********************************************************************
593 **********************************************************************/
594 static inline uint64_t
595 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
599 static uint64_t ip_pkt_types_map[16] = {
600 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
601 PKT_RX_IPV6_HDR, 0, 0, 0,
602 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
603 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
606 #if defined(RTE_LIBRTE_IEEE1588)
607 static uint32_t ip_pkt_etqf_map[8] = {
608 0, 0, 0, PKT_RX_IEEE1588_PTP,
612 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
613 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
614 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
616 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
617 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
619 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
622 static inline uint64_t
623 rx_desc_status_to_pkt_flags(uint32_t rx_status)
627 /* Check if VLAN present */
628 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
630 #if defined(RTE_LIBRTE_IEEE1588)
631 if (rx_status & E1000_RXD_STAT_TMST)
632 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
637 static inline uint64_t
638 rx_desc_error_to_pkt_flags(uint32_t rx_status)
641 * Bit 30: IPE, IPv4 checksum error
642 * Bit 29: L4I, L4I integrity error
645 static uint64_t error_to_pkt_flags_map[4] = {
646 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
647 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
649 return error_to_pkt_flags_map[(rx_status >>
650 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
654 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
657 struct igb_rx_queue *rxq;
658 volatile union e1000_adv_rx_desc *rx_ring;
659 volatile union e1000_adv_rx_desc *rxdp;
660 struct igb_rx_entry *sw_ring;
661 struct igb_rx_entry *rxe;
662 struct rte_mbuf *rxm;
663 struct rte_mbuf *nmb;
664 union e1000_adv_rx_desc rxd;
667 uint32_t hlen_type_rss;
677 rx_id = rxq->rx_tail;
678 rx_ring = rxq->rx_ring;
679 sw_ring = rxq->sw_ring;
680 while (nb_rx < nb_pkts) {
682 * The order of operations here is important as the DD status
683 * bit must not be read after any other descriptor fields.
684 * rx_ring and rxdp are pointing to volatile data so the order
685 * of accesses cannot be reordered by the compiler. If they were
686 * not volatile, they could be reordered which could lead to
687 * using invalid descriptor fields when read from rxd.
689 rxdp = &rx_ring[rx_id];
690 staterr = rxdp->wb.upper.status_error;
691 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
698 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
699 * likely to be invalid and to be dropped by the various
700 * validation checks performed by the network stack.
702 * Allocate a new mbuf to replenish the RX ring descriptor.
703 * If the allocation fails:
704 * - arrange for that RX descriptor to be the first one
705 * being parsed the next time the receive function is
706 * invoked [on the same queue].
708 * - Stop parsing the RX ring and return immediately.
710 * This policy do not drop the packet received in the RX
711 * descriptor for which the allocation of a new mbuf failed.
712 * Thus, it allows that packet to be later retrieved if
713 * mbuf have been freed in the mean time.
714 * As a side effect, holding RX descriptors instead of
715 * systematically giving them back to the NIC may lead to
716 * RX ring exhaustion situations.
717 * However, the NIC can gracefully prevent such situations
718 * to happen by sending specific "back-pressure" flow control
719 * frames to its peer(s).
721 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
722 "staterr=0x%x pkt_len=%u",
723 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
724 (unsigned) rx_id, (unsigned) staterr,
725 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
727 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
729 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
730 "queue_id=%u", (unsigned) rxq->port_id,
731 (unsigned) rxq->queue_id);
732 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
737 rxe = &sw_ring[rx_id];
739 if (rx_id == rxq->nb_rx_desc)
742 /* Prefetch next mbuf while processing current one. */
743 rte_igb_prefetch(sw_ring[rx_id].mbuf);
746 * When next RX descriptor is on a cache-line boundary,
747 * prefetch the next 4 RX descriptors and the next 8 pointers
750 if ((rx_id & 0x3) == 0) {
751 rte_igb_prefetch(&rx_ring[rx_id]);
752 rte_igb_prefetch(&sw_ring[rx_id]);
758 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
759 rxdp->read.hdr_addr = dma_addr;
760 rxdp->read.pkt_addr = dma_addr;
763 * Initialize the returned mbuf.
764 * 1) setup generic mbuf fields:
765 * - number of segments,
768 * - RX port identifier.
769 * 2) integrate hardware offload data, if any:
771 * - IP checksum flag,
772 * - VLAN TCI, if any,
775 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
777 rxm->data_off = RTE_PKTMBUF_HEADROOM;
778 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
781 rxm->pkt_len = pkt_len;
782 rxm->data_len = pkt_len;
783 rxm->port = rxq->port_id;
785 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
786 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
787 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
788 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
790 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
791 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
792 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
793 rxm->ol_flags = pkt_flags;
796 * Store the mbuf address into the next entry of the array
797 * of returned packets.
799 rx_pkts[nb_rx++] = rxm;
801 rxq->rx_tail = rx_id;
804 * If the number of free RX descriptors is greater than the RX free
805 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
807 * Update the RDT with the value of the last processed RX descriptor
808 * minus 1, to guarantee that the RDT register is never equal to the
809 * RDH register, which creates a "full" ring situtation from the
810 * hardware point of view...
812 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
813 if (nb_hold > rxq->rx_free_thresh) {
814 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
815 "nb_hold=%u nb_rx=%u",
816 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
817 (unsigned) rx_id, (unsigned) nb_hold,
819 rx_id = (uint16_t) ((rx_id == 0) ?
820 (rxq->nb_rx_desc - 1) : (rx_id - 1));
821 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
824 rxq->nb_rx_hold = nb_hold;
829 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
832 struct igb_rx_queue *rxq;
833 volatile union e1000_adv_rx_desc *rx_ring;
834 volatile union e1000_adv_rx_desc *rxdp;
835 struct igb_rx_entry *sw_ring;
836 struct igb_rx_entry *rxe;
837 struct rte_mbuf *first_seg;
838 struct rte_mbuf *last_seg;
839 struct rte_mbuf *rxm;
840 struct rte_mbuf *nmb;
841 union e1000_adv_rx_desc rxd;
842 uint64_t dma; /* Physical address of mbuf data buffer */
844 uint32_t hlen_type_rss;
854 rx_id = rxq->rx_tail;
855 rx_ring = rxq->rx_ring;
856 sw_ring = rxq->sw_ring;
859 * Retrieve RX context of current packet, if any.
861 first_seg = rxq->pkt_first_seg;
862 last_seg = rxq->pkt_last_seg;
864 while (nb_rx < nb_pkts) {
867 * The order of operations here is important as the DD status
868 * bit must not be read after any other descriptor fields.
869 * rx_ring and rxdp are pointing to volatile data so the order
870 * of accesses cannot be reordered by the compiler. If they were
871 * not volatile, they could be reordered which could lead to
872 * using invalid descriptor fields when read from rxd.
874 rxdp = &rx_ring[rx_id];
875 staterr = rxdp->wb.upper.status_error;
876 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
883 * Allocate a new mbuf to replenish the RX ring descriptor.
884 * If the allocation fails:
885 * - arrange for that RX descriptor to be the first one
886 * being parsed the next time the receive function is
887 * invoked [on the same queue].
889 * - Stop parsing the RX ring and return immediately.
891 * This policy does not drop the packet received in the RX
892 * descriptor for which the allocation of a new mbuf failed.
893 * Thus, it allows that packet to be later retrieved if
894 * mbuf have been freed in the mean time.
895 * As a side effect, holding RX descriptors instead of
896 * systematically giving them back to the NIC may lead to
897 * RX ring exhaustion situations.
898 * However, the NIC can gracefully prevent such situations
899 * to happen by sending specific "back-pressure" flow control
900 * frames to its peer(s).
902 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
903 "staterr=0x%x data_len=%u",
904 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
905 (unsigned) rx_id, (unsigned) staterr,
906 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
908 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
910 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
911 "queue_id=%u", (unsigned) rxq->port_id,
912 (unsigned) rxq->queue_id);
913 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
918 rxe = &sw_ring[rx_id];
920 if (rx_id == rxq->nb_rx_desc)
923 /* Prefetch next mbuf while processing current one. */
924 rte_igb_prefetch(sw_ring[rx_id].mbuf);
927 * When next RX descriptor is on a cache-line boundary,
928 * prefetch the next 4 RX descriptors and the next 8 pointers
931 if ((rx_id & 0x3) == 0) {
932 rte_igb_prefetch(&rx_ring[rx_id]);
933 rte_igb_prefetch(&sw_ring[rx_id]);
937 * Update RX descriptor with the physical address of the new
938 * data buffer of the new allocated mbuf.
942 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
943 rxdp->read.pkt_addr = dma;
944 rxdp->read.hdr_addr = dma;
947 * Set data length & data buffer address of mbuf.
949 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
950 rxm->data_len = data_len;
951 rxm->data_off = RTE_PKTMBUF_HEADROOM;
954 * If this is the first buffer of the received packet,
955 * set the pointer to the first mbuf of the packet and
956 * initialize its context.
957 * Otherwise, update the total length and the number of segments
958 * of the current scattered packet, and update the pointer to
959 * the last mbuf of the current packet.
961 if (first_seg == NULL) {
963 first_seg->pkt_len = data_len;
964 first_seg->nb_segs = 1;
966 first_seg->pkt_len += data_len;
967 first_seg->nb_segs++;
968 last_seg->next = rxm;
972 * If this is not the last buffer of the received packet,
973 * update the pointer to the last mbuf of the current scattered
974 * packet and continue to parse the RX ring.
976 if (! (staterr & E1000_RXD_STAT_EOP)) {
982 * This is the last buffer of the received packet.
983 * If the CRC is not stripped by the hardware:
984 * - Subtract the CRC length from the total packet length.
985 * - If the last buffer only contains the whole CRC or a part
986 * of it, free the mbuf associated to the last buffer.
987 * If part of the CRC is also contained in the previous
988 * mbuf, subtract the length of that CRC part from the
989 * data length of the previous mbuf.
992 if (unlikely(rxq->crc_len > 0)) {
993 first_seg->pkt_len -= ETHER_CRC_LEN;
994 if (data_len <= ETHER_CRC_LEN) {
995 rte_pktmbuf_free_seg(rxm);
996 first_seg->nb_segs--;
997 last_seg->data_len = (uint16_t)
998 (last_seg->data_len -
999 (ETHER_CRC_LEN - data_len));
1000 last_seg->next = NULL;
1003 (uint16_t) (data_len - ETHER_CRC_LEN);
1007 * Initialize the first mbuf of the returned packet:
1008 * - RX port identifier,
1009 * - hardware offload data, if any:
1010 * - RSS flag & hash,
1011 * - IP checksum flag,
1012 * - VLAN TCI, if any,
1015 first_seg->port = rxq->port_id;
1016 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1019 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1020 * set in the pkt_flags field.
1022 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1023 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1024 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1025 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1026 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1027 first_seg->ol_flags = pkt_flags;
1029 /* Prefetch data of first segment, if configured to do so. */
1030 rte_packet_prefetch((char *)first_seg->buf_addr +
1031 first_seg->data_off);
1034 * Store the mbuf address into the next entry of the array
1035 * of returned packets.
1037 rx_pkts[nb_rx++] = first_seg;
1040 * Setup receipt context for a new packet.
1046 * Record index of the next RX descriptor to probe.
1048 rxq->rx_tail = rx_id;
1051 * Save receive context.
1053 rxq->pkt_first_seg = first_seg;
1054 rxq->pkt_last_seg = last_seg;
1057 * If the number of free RX descriptors is greater than the RX free
1058 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1060 * Update the RDT with the value of the last processed RX descriptor
1061 * minus 1, to guarantee that the RDT register is never equal to the
1062 * RDH register, which creates a "full" ring situtation from the
1063 * hardware point of view...
1065 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1066 if (nb_hold > rxq->rx_free_thresh) {
1067 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1068 "nb_hold=%u nb_rx=%u",
1069 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1070 (unsigned) rx_id, (unsigned) nb_hold,
1072 rx_id = (uint16_t) ((rx_id == 0) ?
1073 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1074 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1077 rxq->nb_rx_hold = nb_hold;
1082 * Rings setup and release.
1084 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1085 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1086 * This will also optimize cache line size effect.
1087 * H/W supports up to cache line size 128.
1089 #define IGB_ALIGN 128
1092 * Maximum number of Ring Descriptors.
1094 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1095 * desscriptors should meet the following condition:
1096 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1098 #define IGB_MIN_RING_DESC 32
1099 #define IGB_MAX_RING_DESC 4096
1101 static const struct rte_memzone *
1102 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1103 uint16_t queue_id, uint32_t ring_size, int socket_id)
1105 char z_name[RTE_MEMZONE_NAMESIZE];
1106 const struct rte_memzone *mz;
1108 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1109 dev->driver->pci_drv.name, ring_name,
1110 dev->data->port_id, queue_id);
1111 mz = rte_memzone_lookup(z_name);
1115 #ifdef RTE_LIBRTE_XEN_DOM0
1116 return rte_memzone_reserve_bounded(z_name, ring_size,
1117 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1119 return rte_memzone_reserve_aligned(z_name, ring_size,
1120 socket_id, 0, IGB_ALIGN);
1125 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1129 if (txq->sw_ring != NULL) {
1130 for (i = 0; i < txq->nb_tx_desc; i++) {
1131 if (txq->sw_ring[i].mbuf != NULL) {
1132 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1133 txq->sw_ring[i].mbuf = NULL;
1140 igb_tx_queue_release(struct igb_tx_queue *txq)
1143 igb_tx_queue_release_mbufs(txq);
1144 rte_free(txq->sw_ring);
1150 eth_igb_tx_queue_release(void *txq)
1152 igb_tx_queue_release(txq);
1156 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1161 memset((void*)&txq->ctx_cache, 0,
1162 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1166 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1168 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1170 struct igb_tx_entry *txe = txq->sw_ring;
1172 struct e1000_hw *hw;
1174 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175 /* Zero out HW ring memory */
1176 for (i = 0; i < txq->nb_tx_desc; i++) {
1177 txq->tx_ring[i] = zeroed_desc;
1180 /* Initialize ring entries */
1181 prev = (uint16_t)(txq->nb_tx_desc - 1);
1182 for (i = 0; i < txq->nb_tx_desc; i++) {
1183 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1185 txd->wb.status = E1000_TXD_STAT_DD;
1188 txe[prev].next_id = i;
1192 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1193 /* 82575 specific, each tx queue will use 2 hw contexts */
1194 if (hw->mac.type == e1000_82575)
1195 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1197 igb_reset_tx_queue_stat(txq);
1201 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1204 unsigned int socket_id,
1205 const struct rte_eth_txconf *tx_conf)
1207 const struct rte_memzone *tz;
1208 struct igb_tx_queue *txq;
1209 struct e1000_hw *hw;
1212 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 * Validate number of transmit descriptors.
1216 * It must not exceed hardware maximum, and must be multiple
1219 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1220 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1225 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1228 if (tx_conf->tx_free_thresh != 0)
1229 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1230 "used for the 1G driver.");
1231 if (tx_conf->tx_rs_thresh != 0)
1232 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1233 "used for the 1G driver.");
1234 if (tx_conf->tx_thresh.wthresh == 0)
1235 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1236 "consider setting the TX WTHRESH value to 4, 8, "
1239 /* Free memory prior to re-allocation if needed */
1240 if (dev->data->tx_queues[queue_idx] != NULL) {
1241 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1242 dev->data->tx_queues[queue_idx] = NULL;
1245 /* First allocate the tx queue data structure */
1246 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1247 RTE_CACHE_LINE_SIZE);
1252 * Allocate TX ring hardware descriptors. A memzone large enough to
1253 * handle the maximum ring size is allocated in order to allow for
1254 * resizing in later calls to the queue setup function.
1256 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1257 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1260 igb_tx_queue_release(txq);
1264 txq->nb_tx_desc = nb_desc;
1265 txq->pthresh = tx_conf->tx_thresh.pthresh;
1266 txq->hthresh = tx_conf->tx_thresh.hthresh;
1267 txq->wthresh = tx_conf->tx_thresh.wthresh;
1268 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1270 txq->queue_id = queue_idx;
1271 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1272 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1273 txq->port_id = dev->data->port_id;
1275 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1276 #ifndef RTE_LIBRTE_XEN_DOM0
1277 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1279 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1281 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1282 /* Allocate software ring */
1283 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1284 sizeof(struct igb_tx_entry) * nb_desc,
1285 RTE_CACHE_LINE_SIZE);
1286 if (txq->sw_ring == NULL) {
1287 igb_tx_queue_release(txq);
1290 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1291 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1293 igb_reset_tx_queue(txq, dev);
1294 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1295 dev->data->tx_queues[queue_idx] = txq;
1301 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1305 if (rxq->sw_ring != NULL) {
1306 for (i = 0; i < rxq->nb_rx_desc; i++) {
1307 if (rxq->sw_ring[i].mbuf != NULL) {
1308 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1309 rxq->sw_ring[i].mbuf = NULL;
1316 igb_rx_queue_release(struct igb_rx_queue *rxq)
1319 igb_rx_queue_release_mbufs(rxq);
1320 rte_free(rxq->sw_ring);
1326 eth_igb_rx_queue_release(void *rxq)
1328 igb_rx_queue_release(rxq);
1332 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1334 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1338 /* Zero out HW ring memory */
1339 for (i = 0; i < rxq->nb_rx_desc; i++) {
1340 rxq->rx_ring[i] = zeroed_desc;
1344 rxq->pkt_first_seg = NULL;
1345 rxq->pkt_last_seg = NULL;
1349 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1352 unsigned int socket_id,
1353 const struct rte_eth_rxconf *rx_conf,
1354 struct rte_mempool *mp)
1356 const struct rte_memzone *rz;
1357 struct igb_rx_queue *rxq;
1358 struct e1000_hw *hw;
1361 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1364 * Validate number of receive descriptors.
1365 * It must not exceed hardware maximum, and must be multiple
1368 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1369 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1373 /* Free memory prior to re-allocation if needed */
1374 if (dev->data->rx_queues[queue_idx] != NULL) {
1375 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1376 dev->data->rx_queues[queue_idx] = NULL;
1379 /* First allocate the RX queue data structure. */
1380 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1381 RTE_CACHE_LINE_SIZE);
1385 rxq->nb_rx_desc = nb_desc;
1386 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1387 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1388 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1389 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1391 rxq->drop_en = rx_conf->rx_drop_en;
1392 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1393 rxq->queue_id = queue_idx;
1394 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1395 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1396 rxq->port_id = dev->data->port_id;
1397 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1401 * Allocate RX ring hardware descriptors. A memzone large enough to
1402 * handle the maximum ring size is allocated in order to allow for
1403 * resizing in later calls to the queue setup function.
1405 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1406 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1408 igb_rx_queue_release(rxq);
1411 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1412 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1413 #ifndef RTE_LIBRTE_XEN_DOM0
1414 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1416 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1418 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1420 /* Allocate software ring. */
1421 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1422 sizeof(struct igb_rx_entry) * nb_desc,
1423 RTE_CACHE_LINE_SIZE);
1424 if (rxq->sw_ring == NULL) {
1425 igb_rx_queue_release(rxq);
1428 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1429 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1431 dev->data->rx_queues[queue_idx] = rxq;
1432 igb_reset_rx_queue(rxq);
1438 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1440 #define IGB_RXQ_SCAN_INTERVAL 4
1441 volatile union e1000_adv_rx_desc *rxdp;
1442 struct igb_rx_queue *rxq;
1445 if (rx_queue_id >= dev->data->nb_rx_queues) {
1446 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1450 rxq = dev->data->rx_queues[rx_queue_id];
1451 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1453 while ((desc < rxq->nb_rx_desc) &&
1454 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1455 desc += IGB_RXQ_SCAN_INTERVAL;
1456 rxdp += IGB_RXQ_SCAN_INTERVAL;
1457 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1458 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1459 desc - rxq->nb_rx_desc]);
1466 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1468 volatile union e1000_adv_rx_desc *rxdp;
1469 struct igb_rx_queue *rxq = rx_queue;
1472 if (unlikely(offset >= rxq->nb_rx_desc))
1474 desc = rxq->rx_tail + offset;
1475 if (desc >= rxq->nb_rx_desc)
1476 desc -= rxq->nb_rx_desc;
1478 rxdp = &rxq->rx_ring[desc];
1479 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1483 igb_dev_clear_queues(struct rte_eth_dev *dev)
1486 struct igb_tx_queue *txq;
1487 struct igb_rx_queue *rxq;
1489 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1490 txq = dev->data->tx_queues[i];
1492 igb_tx_queue_release_mbufs(txq);
1493 igb_reset_tx_queue(txq, dev);
1497 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1498 rxq = dev->data->rx_queues[i];
1500 igb_rx_queue_release_mbufs(rxq);
1501 igb_reset_rx_queue(rxq);
1507 * Receive Side Scaling (RSS).
1508 * See section 7.1.1.7 in the following document:
1509 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1512 * The source and destination IP addresses of the IP header and the source and
1513 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1514 * against a configurable random key to compute a 32-bit RSS hash result.
1515 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1516 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1517 * RSS output index which is used as the RX queue index where to store the
1519 * The following output is supplied in the RX write-back descriptor:
1520 * - 32-bit result of the Microsoft RSS hash function,
1521 * - 4-bit RSS type field.
1525 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1526 * Used as the default key.
1528 static uint8_t rss_intel_key[40] = {
1529 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1530 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1531 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1532 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1533 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1537 igb_rss_disable(struct rte_eth_dev *dev)
1539 struct e1000_hw *hw;
1542 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1543 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1544 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1545 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1549 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1557 hash_key = rss_conf->rss_key;
1558 if (hash_key != NULL) {
1559 /* Fill in RSS hash key */
1560 for (i = 0; i < 10; i++) {
1561 rss_key = hash_key[(i * 4)];
1562 rss_key |= hash_key[(i * 4) + 1] << 8;
1563 rss_key |= hash_key[(i * 4) + 2] << 16;
1564 rss_key |= hash_key[(i * 4) + 3] << 24;
1565 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1569 /* Set configured hashing protocols in MRQC register */
1570 rss_hf = rss_conf->rss_hf;
1571 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1572 if (rss_hf & ETH_RSS_IPV4)
1573 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1574 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1575 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1576 if (rss_hf & ETH_RSS_IPV6)
1577 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1578 if (rss_hf & ETH_RSS_IPV6_EX)
1579 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1580 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1581 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1582 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1583 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1584 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1585 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1586 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1587 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1588 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1589 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1590 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1594 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1595 struct rte_eth_rss_conf *rss_conf)
1597 struct e1000_hw *hw;
1601 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604 * Before changing anything, first check that the update RSS operation
1605 * does not attempt to disable RSS, if RSS was enabled at
1606 * initialization time, or does not attempt to enable RSS, if RSS was
1607 * disabled at initialization time.
1609 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1610 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1611 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1612 if (rss_hf != 0) /* Enable RSS */
1614 return 0; /* Nothing to do */
1617 if (rss_hf == 0) /* Disable RSS */
1619 igb_hw_rss_hash_set(hw, rss_conf);
1623 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1624 struct rte_eth_rss_conf *rss_conf)
1626 struct e1000_hw *hw;
1633 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1634 hash_key = rss_conf->rss_key;
1635 if (hash_key != NULL) {
1636 /* Return RSS hash key */
1637 for (i = 0; i < 10; i++) {
1638 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1639 hash_key[(i * 4)] = rss_key & 0x000000FF;
1640 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1641 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1642 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1646 /* Get RSS functions configured in MRQC register */
1647 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1648 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1649 rss_conf->rss_hf = 0;
1653 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1654 rss_hf |= ETH_RSS_IPV4;
1655 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1656 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1657 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1658 rss_hf |= ETH_RSS_IPV6;
1659 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1660 rss_hf |= ETH_RSS_IPV6_EX;
1661 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1662 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1663 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1664 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1665 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1666 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1667 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1668 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1669 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1670 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1671 rss_conf->rss_hf = rss_hf;
1676 igb_rss_configure(struct rte_eth_dev *dev)
1678 struct rte_eth_rss_conf rss_conf;
1679 struct e1000_hw *hw;
1683 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1685 /* Fill in redirection table. */
1686 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1687 for (i = 0; i < 128; i++) {
1694 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1695 i % dev->data->nb_rx_queues : 0);
1696 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1698 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1702 * Configure the RSS key and the RSS protocols used to compute
1703 * the RSS hash of input packets.
1705 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1706 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1707 igb_rss_disable(dev);
1710 if (rss_conf.rss_key == NULL)
1711 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1712 igb_hw_rss_hash_set(hw, &rss_conf);
1716 * Check if the mac type support VMDq or not.
1717 * Return 1 if it supports, otherwise, return 0.
1720 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1722 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1724 switch (hw->mac.type) {
1745 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1751 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1753 struct rte_eth_vmdq_rx_conf *cfg;
1754 struct e1000_hw *hw;
1755 uint32_t mrqc, vt_ctl, vmolr, rctl;
1758 PMD_INIT_FUNC_TRACE();
1760 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1761 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1763 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1764 if (igb_is_vmdq_supported(dev) == 0)
1767 igb_rss_disable(dev);
1769 /* RCTL: eanble VLAN filter */
1770 rctl = E1000_READ_REG(hw, E1000_RCTL);
1771 rctl |= E1000_RCTL_VFE;
1772 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1774 /* MRQC: enable vmdq */
1775 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1776 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1777 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1779 /* VTCTL: pool selection according to VLAN tag */
1780 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1781 if (cfg->enable_default_pool)
1782 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1783 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1784 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1786 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1787 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1788 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1789 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1792 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1793 vmolr |= E1000_VMOLR_AUPE;
1794 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1795 vmolr |= E1000_VMOLR_ROMPE;
1796 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1797 vmolr |= E1000_VMOLR_ROPE;
1798 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1799 vmolr |= E1000_VMOLR_BAM;
1800 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1801 vmolr |= E1000_VMOLR_MPME;
1803 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1807 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1808 * Both 82576 and 82580 support it
1810 if (hw->mac.type != e1000_i350) {
1811 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1812 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1813 vmolr |= E1000_VMOLR_STRVLAN;
1814 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1818 /* VFTA - enable all vlan filters */
1819 for (i = 0; i < IGB_VFTA_SIZE; i++)
1820 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1822 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1823 if (hw->mac.type != e1000_82580)
1824 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1827 * RAH/RAL - allow pools to read specific mac addresses
1828 * In this case, all pools should be able to read from mac addr 0
1830 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1831 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1833 /* VLVF: set up filters for vlan tags as configured */
1834 for (i = 0; i < cfg->nb_pool_maps; i++) {
1835 /* set vlan id in VF register and set the valid bit */
1836 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1837 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1838 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1839 E1000_VLVF_POOLSEL_MASK)));
1842 E1000_WRITE_FLUSH(hw);
1848 /*********************************************************************
1850 * Enable receive unit.
1852 **********************************************************************/
1855 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1857 struct igb_rx_entry *rxe = rxq->sw_ring;
1861 /* Initialize software ring entries. */
1862 for (i = 0; i < rxq->nb_rx_desc; i++) {
1863 volatile union e1000_adv_rx_desc *rxd;
1864 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1867 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1868 "queue_id=%hu", rxq->queue_id);
1872 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1873 rxd = &rxq->rx_ring[i];
1874 rxd->read.hdr_addr = dma_addr;
1875 rxd->read.pkt_addr = dma_addr;
1882 #define E1000_MRQC_DEF_Q_SHIFT (3)
1884 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1886 struct e1000_hw *hw =
1887 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1890 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1892 * SRIOV active scheme
1893 * FIXME if support RSS together with VMDq & SRIOV
1895 mrqc = E1000_MRQC_ENABLE_VMDQ;
1896 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1897 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1898 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1899 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1901 * SRIOV inactive scheme
1903 switch (dev->data->dev_conf.rxmode.mq_mode) {
1905 igb_rss_configure(dev);
1907 case ETH_MQ_RX_VMDQ_ONLY:
1908 /*Configure general VMDQ only RX parameters*/
1909 igb_vmdq_rx_hw_configure(dev);
1911 case ETH_MQ_RX_NONE:
1912 /* if mq_mode is none, disable rss mode.*/
1914 igb_rss_disable(dev);
1923 eth_igb_rx_init(struct rte_eth_dev *dev)
1925 struct e1000_hw *hw;
1926 struct igb_rx_queue *rxq;
1927 struct rte_pktmbuf_pool_private *mbp_priv;
1932 uint16_t rctl_bsize;
1936 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1940 * Make sure receives are disabled while setting
1941 * up the descriptor ring.
1943 rctl = E1000_READ_REG(hw, E1000_RCTL);
1944 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1947 * Configure support of jumbo frames, if any.
1949 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1950 rctl |= E1000_RCTL_LPE;
1953 * Set maximum packet length by default, and might be updated
1954 * together with enabling/disabling dual VLAN.
1956 E1000_WRITE_REG(hw, E1000_RLPML,
1957 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1960 rctl &= ~E1000_RCTL_LPE;
1962 /* Configure and enable each RX queue. */
1964 dev->rx_pkt_burst = eth_igb_recv_pkts;
1965 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1969 rxq = dev->data->rx_queues[i];
1971 /* Allocate buffers for descriptor rings and set up queue */
1972 ret = igb_alloc_rx_queue_mbufs(rxq);
1977 * Reset crc_len in case it was changed after queue setup by a
1981 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1984 bus_addr = rxq->rx_ring_phys_addr;
1985 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1987 sizeof(union e1000_adv_rx_desc));
1988 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1989 (uint32_t)(bus_addr >> 32));
1990 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1992 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1995 * Configure RX buffer size.
1997 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1998 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1999 RTE_PKTMBUF_HEADROOM);
2000 if (buf_size >= 1024) {
2002 * Configure the BSIZEPACKET field of the SRRCTL
2003 * register of the queue.
2004 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2005 * If this field is equal to 0b, then RCTL.BSIZE
2006 * determines the RX packet buffer size.
2008 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2009 E1000_SRRCTL_BSIZEPKT_MASK);
2010 buf_size = (uint16_t) ((srrctl &
2011 E1000_SRRCTL_BSIZEPKT_MASK) <<
2012 E1000_SRRCTL_BSIZEPKT_SHIFT);
2014 /* It adds dual VLAN length for supporting dual VLAN */
2015 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2016 2 * VLAN_TAG_SIZE) > buf_size){
2017 if (!dev->data->scattered_rx)
2019 "forcing scatter mode");
2020 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2021 dev->data->scattered_rx = 1;
2025 * Use BSIZE field of the device RCTL register.
2027 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2028 rctl_bsize = buf_size;
2029 if (!dev->data->scattered_rx)
2030 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2031 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2032 dev->data->scattered_rx = 1;
2035 /* Set if packets are dropped when no descriptors available */
2037 srrctl |= E1000_SRRCTL_DROP_EN;
2039 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2041 /* Enable this RX queue. */
2042 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2043 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2044 rxdctl &= 0xFFF00000;
2045 rxdctl |= (rxq->pthresh & 0x1F);
2046 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2047 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2048 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2051 if (dev->data->dev_conf.rxmode.enable_scatter) {
2052 if (!dev->data->scattered_rx)
2053 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2054 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2055 dev->data->scattered_rx = 1;
2059 * Setup BSIZE field of RCTL register, if needed.
2060 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2061 * register, since the code above configures the SRRCTL register of
2062 * the RX queue in such a case.
2063 * All configurable sizes are:
2064 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2065 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2066 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2067 * 2048: rctl |= E1000_RCTL_SZ_2048;
2068 * 1024: rctl |= E1000_RCTL_SZ_1024;
2069 * 512: rctl |= E1000_RCTL_SZ_512;
2070 * 256: rctl |= E1000_RCTL_SZ_256;
2072 if (rctl_bsize > 0) {
2073 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2074 rctl |= E1000_RCTL_SZ_512;
2075 else /* 256 <= buf_size < 512 - use 256 */
2076 rctl |= E1000_RCTL_SZ_256;
2080 * Configure RSS if device configured with multiple RX queues.
2082 igb_dev_mq_rx_configure(dev);
2084 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2085 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2088 * Setup the Checksum Register.
2089 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2091 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2092 rxcsum |= E1000_RXCSUM_PCSD;
2094 /* Enable both L3/L4 rx checksum offload */
2095 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2096 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2098 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2099 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2101 /* Setup the Receive Control Register. */
2102 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2103 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2105 /* set STRCRC bit in all queues */
2106 if (hw->mac.type == e1000_i350 ||
2107 hw->mac.type == e1000_i210 ||
2108 hw->mac.type == e1000_i211 ||
2109 hw->mac.type == e1000_i354) {
2110 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2111 rxq = dev->data->rx_queues[i];
2112 uint32_t dvmolr = E1000_READ_REG(hw,
2113 E1000_DVMOLR(rxq->reg_idx));
2114 dvmolr |= E1000_DVMOLR_STRCRC;
2115 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2119 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2121 /* clear STRCRC bit in all queues */
2122 if (hw->mac.type == e1000_i350 ||
2123 hw->mac.type == e1000_i210 ||
2124 hw->mac.type == e1000_i211 ||
2125 hw->mac.type == e1000_i354) {
2126 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2127 rxq = dev->data->rx_queues[i];
2128 uint32_t dvmolr = E1000_READ_REG(hw,
2129 E1000_DVMOLR(rxq->reg_idx));
2130 dvmolr &= ~E1000_DVMOLR_STRCRC;
2131 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2136 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2137 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2138 E1000_RCTL_RDMTS_HALF |
2139 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2141 /* Make sure VLAN Filters are off. */
2142 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2143 rctl &= ~E1000_RCTL_VFE;
2144 /* Don't store bad packets. */
2145 rctl &= ~E1000_RCTL_SBP;
2147 /* Enable Receives. */
2148 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2151 * Setup the HW Rx Head and Tail Descriptor Pointers.
2152 * This needs to be done after enable.
2154 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2155 rxq = dev->data->rx_queues[i];
2156 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2157 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2163 /*********************************************************************
2165 * Enable transmit unit.
2167 **********************************************************************/
2169 eth_igb_tx_init(struct rte_eth_dev *dev)
2171 struct e1000_hw *hw;
2172 struct igb_tx_queue *txq;
2177 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2179 /* Setup the Base and Length of the Tx Descriptor Rings. */
2180 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2182 txq = dev->data->tx_queues[i];
2183 bus_addr = txq->tx_ring_phys_addr;
2185 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2187 sizeof(union e1000_adv_tx_desc));
2188 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2189 (uint32_t)(bus_addr >> 32));
2190 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2192 /* Setup the HW Tx Head and Tail descriptor pointers. */
2193 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2194 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2196 /* Setup Transmit threshold registers. */
2197 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2198 txdctl |= txq->pthresh & 0x1F;
2199 txdctl |= ((txq->hthresh & 0x1F) << 8);
2200 txdctl |= ((txq->wthresh & 0x1F) << 16);
2201 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2202 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2205 /* Program the Transmit Control Register. */
2206 tctl = E1000_READ_REG(hw, E1000_TCTL);
2207 tctl &= ~E1000_TCTL_CT;
2208 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2209 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2211 e1000_config_collision_dist(hw);
2213 /* This write will effectively turn on the transmit unit. */
2214 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2217 /*********************************************************************
2219 * Enable VF receive unit.
2221 **********************************************************************/
2223 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2225 struct e1000_hw *hw;
2226 struct igb_rx_queue *rxq;
2227 struct rte_pktmbuf_pool_private *mbp_priv;
2230 uint16_t rctl_bsize;
2234 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2237 e1000_rlpml_set_vf(hw,
2238 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2241 /* Configure and enable each RX queue. */
2243 dev->rx_pkt_burst = eth_igb_recv_pkts;
2244 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2248 rxq = dev->data->rx_queues[i];
2250 /* Allocate buffers for descriptor rings and set up queue */
2251 ret = igb_alloc_rx_queue_mbufs(rxq);
2255 bus_addr = rxq->rx_ring_phys_addr;
2256 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2258 sizeof(union e1000_adv_rx_desc));
2259 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2260 (uint32_t)(bus_addr >> 32));
2261 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2263 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2266 * Configure RX buffer size.
2268 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2269 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2270 RTE_PKTMBUF_HEADROOM);
2271 if (buf_size >= 1024) {
2273 * Configure the BSIZEPACKET field of the SRRCTL
2274 * register of the queue.
2275 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2276 * If this field is equal to 0b, then RCTL.BSIZE
2277 * determines the RX packet buffer size.
2279 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2280 E1000_SRRCTL_BSIZEPKT_MASK);
2281 buf_size = (uint16_t) ((srrctl &
2282 E1000_SRRCTL_BSIZEPKT_MASK) <<
2283 E1000_SRRCTL_BSIZEPKT_SHIFT);
2285 /* It adds dual VLAN length for supporting dual VLAN */
2286 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2287 2 * VLAN_TAG_SIZE) > buf_size){
2288 if (!dev->data->scattered_rx)
2290 "forcing scatter mode");
2291 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2292 dev->data->scattered_rx = 1;
2296 * Use BSIZE field of the device RCTL register.
2298 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2299 rctl_bsize = buf_size;
2300 if (!dev->data->scattered_rx)
2301 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2302 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2303 dev->data->scattered_rx = 1;
2306 /* Set if packets are dropped when no descriptors available */
2308 srrctl |= E1000_SRRCTL_DROP_EN;
2310 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2312 /* Enable this RX queue. */
2313 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2314 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2315 rxdctl &= 0xFFF00000;
2316 rxdctl |= (rxq->pthresh & 0x1F);
2317 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2318 if (hw->mac.type == e1000_vfadapt) {
2320 * Workaround of 82576 VF Erratum
2321 * force set WTHRESH to 1
2322 * to avoid Write-Back not triggered sometimes
2325 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2328 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2329 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2332 if (dev->data->dev_conf.rxmode.enable_scatter) {
2333 if (!dev->data->scattered_rx)
2334 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2335 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2336 dev->data->scattered_rx = 1;
2340 * Setup the HW Rx Head and Tail Descriptor Pointers.
2341 * This needs to be done after enable.
2343 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2344 rxq = dev->data->rx_queues[i];
2345 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2346 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2352 /*********************************************************************
2354 * Enable VF transmit unit.
2356 **********************************************************************/
2358 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2360 struct e1000_hw *hw;
2361 struct igb_tx_queue *txq;
2365 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2367 /* Setup the Base and Length of the Tx Descriptor Rings. */
2368 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2371 txq = dev->data->tx_queues[i];
2372 bus_addr = txq->tx_ring_phys_addr;
2373 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2375 sizeof(union e1000_adv_tx_desc));
2376 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2377 (uint32_t)(bus_addr >> 32));
2378 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2380 /* Setup the HW Tx Head and Tail descriptor pointers. */
2381 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2382 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2384 /* Setup Transmit threshold registers. */
2385 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2386 txdctl |= txq->pthresh & 0x1F;
2387 txdctl |= ((txq->hthresh & 0x1F) << 8);
2388 if (hw->mac.type == e1000_82576) {
2390 * Workaround of 82576 VF Erratum
2391 * force set WTHRESH to 1
2392 * to avoid Write-Back not triggered sometimes
2395 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2398 txdctl |= ((txq->wthresh & 0x1F) << 16);
2399 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2400 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);