4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 #define IGB_RSS_OFFLOAD_ALL ( \
82 ETH_RSS_IPV6_TCP_EX | \
87 /* Bit Mask to indicate what bits required for building TX context */
88 #define IGB_TX_OFFLOAD_MASK ( \
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
98 m = __rte_mbuf_raw_alloc(mp);
99 __rte_mbuf_sanity_check_raw(m, 0);
103 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
104 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
106 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
107 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
110 * Structure associated with each descriptor of the RX ring of a RX queue.
112 struct igb_rx_entry {
113 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
117 * Structure associated with each descriptor of the TX ring of a TX queue.
119 struct igb_tx_entry {
120 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
121 uint16_t next_id; /**< Index of next descriptor in ring. */
122 uint16_t last_id; /**< Index of last scattered descriptor. */
126 * Structure associated with each RX queue.
128 struct igb_rx_queue {
129 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
130 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
131 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
132 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
133 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
134 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
135 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
136 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
137 uint16_t nb_rx_desc; /**< number of RX descriptors. */
138 uint16_t rx_tail; /**< current value of RDT register. */
139 uint16_t nb_rx_hold; /**< number of held free RX desc. */
140 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
141 uint16_t queue_id; /**< RX queue index. */
142 uint16_t reg_idx; /**< RX queue register index. */
143 uint8_t port_id; /**< Device port identifier. */
144 uint8_t pthresh; /**< Prefetch threshold register. */
145 uint8_t hthresh; /**< Host threshold register. */
146 uint8_t wthresh; /**< Write-back threshold register. */
147 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
148 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
152 * Hardware context number
154 enum igb_advctx_num {
155 IGB_CTX_0 = 0, /**< CTX0 */
156 IGB_CTX_1 = 1, /**< CTX1 */
157 IGB_CTX_NUM = 2, /**< CTX_NUM */
160 /** Offload features */
161 union igb_vlan_macip {
164 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
166 /**< VLAN Tag Control Identifier (CPU order). */
171 * Compare mask for vlan_macip_len.data,
172 * should be in sync with igb_vlan_macip.f layout.
174 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
175 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
176 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
177 /** MAC+IP length. */
178 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
181 * Strucutre to check if new context need be built
183 struct igb_advctx_info {
184 uint64_t flags; /**< ol_flags related to context build. */
185 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
186 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
190 * Structure associated with each TX queue.
192 struct igb_tx_queue {
193 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
194 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
195 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
196 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
197 uint32_t txd_type; /**< Device-specific TXD type */
198 uint16_t nb_tx_desc; /**< number of TX descriptors. */
199 uint16_t tx_tail; /**< Current value of TDT register. */
201 /**< Index of first used TX descriptor. */
202 uint16_t queue_id; /**< TX queue index. */
203 uint16_t reg_idx; /**< TX queue register index. */
204 uint8_t port_id; /**< Device port identifier. */
205 uint8_t pthresh; /**< Prefetch threshold register. */
206 uint8_t hthresh; /**< Host threshold register. */
207 uint8_t wthresh; /**< Write-back threshold register. */
209 /**< Current used hardware descriptor. */
211 /**< Start context position for transmit queue. */
212 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
213 /**< Hardware context history.*/
217 #define RTE_PMD_USE_PREFETCH
220 #ifdef RTE_PMD_USE_PREFETCH
221 #define rte_igb_prefetch(p) rte_prefetch0(p)
223 #define rte_igb_prefetch(p) do {} while(0)
226 #ifdef RTE_PMD_PACKET_PREFETCH
227 #define rte_packet_prefetch(p) rte_prefetch1(p)
229 #define rte_packet_prefetch(p) do {} while(0)
233 * Macro for VMDq feature for 1 GbE NIC.
235 #define E1000_VMOLR_SIZE (8)
237 /*********************************************************************
241 **********************************************************************/
244 * Advanced context descriptor are almost same between igb/ixgbe
245 * This is a separate function, looking for optimization opportunity here
246 * Rework required to go with the pre-defined values.
250 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
251 volatile struct e1000_adv_tx_context_desc *ctx_txd,
252 uint64_t ol_flags, uint32_t vlan_macip_lens)
254 uint32_t type_tucmd_mlhl;
255 uint32_t mss_l4len_idx;
256 uint32_t ctx_idx, ctx_curr;
259 ctx_curr = txq->ctx_curr;
260 ctx_idx = ctx_curr + txq->ctx_start;
265 if (ol_flags & PKT_TX_VLAN_PKT) {
266 cmp_mask |= TX_VLAN_CMP_MASK;
269 if (ol_flags & PKT_TX_IP_CKSUM) {
270 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
271 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
274 /* Specify which HW CTX to upload. */
275 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
276 switch (ol_flags & PKT_TX_L4_MASK) {
277 case PKT_TX_UDP_CKSUM:
278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
279 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
283 case PKT_TX_TCP_CKSUM:
284 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
289 case PKT_TX_SCTP_CKSUM:
290 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
291 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
293 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
296 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
297 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
301 txq->ctx_cache[ctx_curr].flags = ol_flags;
302 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
303 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
304 vlan_macip_lens & cmp_mask;
306 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
307 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
308 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
309 ctx_txd->seqnum_seed = 0;
313 * Check which hardware context can be used. Use the existing match
314 * or create a new context descriptor.
316 static inline uint32_t
317 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
318 uint32_t vlan_macip_lens)
320 /* If match with the current context */
321 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
322 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
323 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
324 return txq->ctx_curr;
327 /* If match with the second context */
329 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
330 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
331 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
332 return txq->ctx_curr;
335 /* Mismatch, use the previous context */
336 return (IGB_CTX_NUM);
339 static inline uint32_t
340 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
342 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
343 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
346 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
347 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
351 static inline uint32_t
352 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
354 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
355 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
359 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
362 struct igb_tx_queue *txq;
363 struct igb_tx_entry *sw_ring;
364 struct igb_tx_entry *txe, *txn;
365 volatile union e1000_adv_tx_desc *txr;
366 volatile union e1000_adv_tx_desc *txd;
367 struct rte_mbuf *tx_pkt;
368 struct rte_mbuf *m_seg;
369 union igb_vlan_macip vlan_macip_lens;
370 uint64_t buf_dma_addr;
371 uint32_t olinfo_status;
372 uint32_t cmd_type_len;
381 uint32_t new_ctx = 0;
385 sw_ring = txq->sw_ring;
387 tx_id = txq->tx_tail;
388 txe = &sw_ring[tx_id];
390 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
392 pkt_len = tx_pkt->pkt_len;
394 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
397 * The number of descriptors that must be allocated for a
398 * packet is the number of segments of that packet, plus 1
399 * Context Descriptor for the VLAN Tag Identifier, if any.
400 * Determine the last TX descriptor to allocate in the TX ring
401 * for the packet, starting from the current position (tx_id)
404 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
406 ol_flags = tx_pkt->ol_flags;
407 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
408 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
409 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
411 /* If a Context Descriptor need be built . */
413 ctx = what_advctx_update(txq, tx_ol_req,
414 vlan_macip_lens.data);
415 /* Only allocate context descriptor if required*/
416 new_ctx = (ctx == IGB_CTX_NUM);
418 tx_last = (uint16_t) (tx_last + new_ctx);
420 if (tx_last >= txq->nb_tx_desc)
421 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
423 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
424 " tx_first=%u tx_last=%u",
425 (unsigned) txq->port_id,
426 (unsigned) txq->queue_id,
432 * Check if there are enough free descriptors in the TX ring
433 * to transmit the next packet.
434 * This operation is based on the two following rules:
436 * 1- Only check that the last needed TX descriptor can be
437 * allocated (by construction, if that descriptor is free,
438 * all intermediate ones are also free).
440 * For this purpose, the index of the last TX descriptor
441 * used for a packet (the "last descriptor" of a packet)
442 * is recorded in the TX entries (the last one included)
443 * that are associated with all TX descriptors allocated
446 * 2- Avoid to allocate the last free TX descriptor of the
447 * ring, in order to never set the TDT register with the
448 * same value stored in parallel by the NIC in the TDH
449 * register, which makes the TX engine of the NIC enter
450 * in a deadlock situation.
452 * By extension, avoid to allocate a free descriptor that
453 * belongs to the last set of free descriptors allocated
454 * to the same packet previously transmitted.
458 * The "last descriptor" of the previously sent packet, if any,
459 * which used the last descriptor to allocate.
461 tx_end = sw_ring[tx_last].last_id;
464 * The next descriptor following that "last descriptor" in the
467 tx_end = sw_ring[tx_end].next_id;
470 * The "last descriptor" associated with that next descriptor.
472 tx_end = sw_ring[tx_end].last_id;
475 * Check that this descriptor is free.
477 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
484 * Set common flags of all TX Data Descriptors.
486 * The following bits must be set in all Data Descriptors:
487 * - E1000_ADVTXD_DTYP_DATA
488 * - E1000_ADVTXD_DCMD_DEXT
490 * The following bits must be set in the first Data Descriptor
491 * and are ignored in the other ones:
492 * - E1000_ADVTXD_DCMD_IFCS
493 * - E1000_ADVTXD_MAC_1588
494 * - E1000_ADVTXD_DCMD_VLE
496 * The following bits must only be set in the last Data
498 * - E1000_TXD_CMD_EOP
500 * The following bits can be set in any Data Descriptor, but
501 * are only set in the last Data Descriptor:
504 cmd_type_len = txq->txd_type |
505 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
506 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
507 #if defined(RTE_LIBRTE_IEEE1588)
508 if (ol_flags & PKT_TX_IEEE1588_TMST)
509 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
512 /* Setup TX Advanced context descriptor if required */
514 volatile struct e1000_adv_tx_context_desc *
517 ctx_txd = (volatile struct
518 e1000_adv_tx_context_desc *)
521 txn = &sw_ring[txe->next_id];
522 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
524 if (txe->mbuf != NULL) {
525 rte_pktmbuf_free_seg(txe->mbuf);
529 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
530 vlan_macip_lens.data);
532 txe->last_id = tx_last;
533 tx_id = txe->next_id;
537 /* Setup the TX Advanced Data Descriptor */
538 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
539 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
540 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
545 txn = &sw_ring[txe->next_id];
548 if (txe->mbuf != NULL)
549 rte_pktmbuf_free_seg(txe->mbuf);
553 * Set up transmit descriptor.
555 slen = (uint16_t) m_seg->data_len;
556 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
557 txd->read.buffer_addr =
558 rte_cpu_to_le_64(buf_dma_addr);
559 txd->read.cmd_type_len =
560 rte_cpu_to_le_32(cmd_type_len | slen);
561 txd->read.olinfo_status =
562 rte_cpu_to_le_32(olinfo_status);
563 txe->last_id = tx_last;
564 tx_id = txe->next_id;
567 } while (m_seg != NULL);
570 * The last packet data descriptor needs End Of Packet (EOP)
571 * and Report Status (RS).
573 txd->read.cmd_type_len |=
574 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
580 * Set the Transmit Descriptor Tail (TDT).
582 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
583 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
584 (unsigned) txq->port_id, (unsigned) txq->queue_id,
585 (unsigned) tx_id, (unsigned) nb_tx);
586 txq->tx_tail = tx_id;
591 /*********************************************************************
595 **********************************************************************/
596 static inline uint64_t
597 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
601 static uint64_t ip_pkt_types_map[16] = {
602 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
603 PKT_RX_IPV6_HDR, 0, 0, 0,
604 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
605 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
608 #if defined(RTE_LIBRTE_IEEE1588)
609 static uint32_t ip_pkt_etqf_map[8] = {
610 0, 0, 0, PKT_RX_IEEE1588_PTP,
614 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
615 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
616 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
618 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
619 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
621 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
624 static inline uint64_t
625 rx_desc_status_to_pkt_flags(uint32_t rx_status)
629 /* Check if VLAN present */
630 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
632 #if defined(RTE_LIBRTE_IEEE1588)
633 if (rx_status & E1000_RXD_STAT_TMST)
634 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
639 static inline uint64_t
640 rx_desc_error_to_pkt_flags(uint32_t rx_status)
643 * Bit 30: IPE, IPv4 checksum error
644 * Bit 29: L4I, L4I integrity error
647 static uint64_t error_to_pkt_flags_map[4] = {
648 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
649 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
651 return error_to_pkt_flags_map[(rx_status >>
652 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
656 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
659 struct igb_rx_queue *rxq;
660 volatile union e1000_adv_rx_desc *rx_ring;
661 volatile union e1000_adv_rx_desc *rxdp;
662 struct igb_rx_entry *sw_ring;
663 struct igb_rx_entry *rxe;
664 struct rte_mbuf *rxm;
665 struct rte_mbuf *nmb;
666 union e1000_adv_rx_desc rxd;
669 uint32_t hlen_type_rss;
679 rx_id = rxq->rx_tail;
680 rx_ring = rxq->rx_ring;
681 sw_ring = rxq->sw_ring;
682 while (nb_rx < nb_pkts) {
684 * The order of operations here is important as the DD status
685 * bit must not be read after any other descriptor fields.
686 * rx_ring and rxdp are pointing to volatile data so the order
687 * of accesses cannot be reordered by the compiler. If they were
688 * not volatile, they could be reordered which could lead to
689 * using invalid descriptor fields when read from rxd.
691 rxdp = &rx_ring[rx_id];
692 staterr = rxdp->wb.upper.status_error;
693 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
700 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
701 * likely to be invalid and to be dropped by the various
702 * validation checks performed by the network stack.
704 * Allocate a new mbuf to replenish the RX ring descriptor.
705 * If the allocation fails:
706 * - arrange for that RX descriptor to be the first one
707 * being parsed the next time the receive function is
708 * invoked [on the same queue].
710 * - Stop parsing the RX ring and return immediately.
712 * This policy do not drop the packet received in the RX
713 * descriptor for which the allocation of a new mbuf failed.
714 * Thus, it allows that packet to be later retrieved if
715 * mbuf have been freed in the mean time.
716 * As a side effect, holding RX descriptors instead of
717 * systematically giving them back to the NIC may lead to
718 * RX ring exhaustion situations.
719 * However, the NIC can gracefully prevent such situations
720 * to happen by sending specific "back-pressure" flow control
721 * frames to its peer(s).
723 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
724 "staterr=0x%x pkt_len=%u",
725 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
726 (unsigned) rx_id, (unsigned) staterr,
727 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
729 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
731 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
732 "queue_id=%u", (unsigned) rxq->port_id,
733 (unsigned) rxq->queue_id);
734 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
739 rxe = &sw_ring[rx_id];
741 if (rx_id == rxq->nb_rx_desc)
744 /* Prefetch next mbuf while processing current one. */
745 rte_igb_prefetch(sw_ring[rx_id].mbuf);
748 * When next RX descriptor is on a cache-line boundary,
749 * prefetch the next 4 RX descriptors and the next 8 pointers
752 if ((rx_id & 0x3) == 0) {
753 rte_igb_prefetch(&rx_ring[rx_id]);
754 rte_igb_prefetch(&sw_ring[rx_id]);
760 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
761 rxdp->read.hdr_addr = dma_addr;
762 rxdp->read.pkt_addr = dma_addr;
765 * Initialize the returned mbuf.
766 * 1) setup generic mbuf fields:
767 * - number of segments,
770 * - RX port identifier.
771 * 2) integrate hardware offload data, if any:
773 * - IP checksum flag,
774 * - VLAN TCI, if any,
777 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
779 rxm->data_off = RTE_PKTMBUF_HEADROOM;
780 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
783 rxm->pkt_len = pkt_len;
784 rxm->data_len = pkt_len;
785 rxm->port = rxq->port_id;
787 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
788 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
789 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
790 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
792 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
793 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
794 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
795 rxm->ol_flags = pkt_flags;
798 * Store the mbuf address into the next entry of the array
799 * of returned packets.
801 rx_pkts[nb_rx++] = rxm;
803 rxq->rx_tail = rx_id;
806 * If the number of free RX descriptors is greater than the RX free
807 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
809 * Update the RDT with the value of the last processed RX descriptor
810 * minus 1, to guarantee that the RDT register is never equal to the
811 * RDH register, which creates a "full" ring situtation from the
812 * hardware point of view...
814 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
815 if (nb_hold > rxq->rx_free_thresh) {
816 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
817 "nb_hold=%u nb_rx=%u",
818 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819 (unsigned) rx_id, (unsigned) nb_hold,
821 rx_id = (uint16_t) ((rx_id == 0) ?
822 (rxq->nb_rx_desc - 1) : (rx_id - 1));
823 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
826 rxq->nb_rx_hold = nb_hold;
831 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
834 struct igb_rx_queue *rxq;
835 volatile union e1000_adv_rx_desc *rx_ring;
836 volatile union e1000_adv_rx_desc *rxdp;
837 struct igb_rx_entry *sw_ring;
838 struct igb_rx_entry *rxe;
839 struct rte_mbuf *first_seg;
840 struct rte_mbuf *last_seg;
841 struct rte_mbuf *rxm;
842 struct rte_mbuf *nmb;
843 union e1000_adv_rx_desc rxd;
844 uint64_t dma; /* Physical address of mbuf data buffer */
846 uint32_t hlen_type_rss;
856 rx_id = rxq->rx_tail;
857 rx_ring = rxq->rx_ring;
858 sw_ring = rxq->sw_ring;
861 * Retrieve RX context of current packet, if any.
863 first_seg = rxq->pkt_first_seg;
864 last_seg = rxq->pkt_last_seg;
866 while (nb_rx < nb_pkts) {
869 * The order of operations here is important as the DD status
870 * bit must not be read after any other descriptor fields.
871 * rx_ring and rxdp are pointing to volatile data so the order
872 * of accesses cannot be reordered by the compiler. If they were
873 * not volatile, they could be reordered which could lead to
874 * using invalid descriptor fields when read from rxd.
876 rxdp = &rx_ring[rx_id];
877 staterr = rxdp->wb.upper.status_error;
878 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
885 * Allocate a new mbuf to replenish the RX ring descriptor.
886 * If the allocation fails:
887 * - arrange for that RX descriptor to be the first one
888 * being parsed the next time the receive function is
889 * invoked [on the same queue].
891 * - Stop parsing the RX ring and return immediately.
893 * This policy does not drop the packet received in the RX
894 * descriptor for which the allocation of a new mbuf failed.
895 * Thus, it allows that packet to be later retrieved if
896 * mbuf have been freed in the mean time.
897 * As a side effect, holding RX descriptors instead of
898 * systematically giving them back to the NIC may lead to
899 * RX ring exhaustion situations.
900 * However, the NIC can gracefully prevent such situations
901 * to happen by sending specific "back-pressure" flow control
902 * frames to its peer(s).
904 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
905 "staterr=0x%x data_len=%u",
906 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
907 (unsigned) rx_id, (unsigned) staterr,
908 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
910 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
912 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
913 "queue_id=%u", (unsigned) rxq->port_id,
914 (unsigned) rxq->queue_id);
915 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
920 rxe = &sw_ring[rx_id];
922 if (rx_id == rxq->nb_rx_desc)
925 /* Prefetch next mbuf while processing current one. */
926 rte_igb_prefetch(sw_ring[rx_id].mbuf);
929 * When next RX descriptor is on a cache-line boundary,
930 * prefetch the next 4 RX descriptors and the next 8 pointers
933 if ((rx_id & 0x3) == 0) {
934 rte_igb_prefetch(&rx_ring[rx_id]);
935 rte_igb_prefetch(&sw_ring[rx_id]);
939 * Update RX descriptor with the physical address of the new
940 * data buffer of the new allocated mbuf.
944 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
945 rxdp->read.pkt_addr = dma;
946 rxdp->read.hdr_addr = dma;
949 * Set data length & data buffer address of mbuf.
951 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
952 rxm->data_len = data_len;
953 rxm->data_off = RTE_PKTMBUF_HEADROOM;
956 * If this is the first buffer of the received packet,
957 * set the pointer to the first mbuf of the packet and
958 * initialize its context.
959 * Otherwise, update the total length and the number of segments
960 * of the current scattered packet, and update the pointer to
961 * the last mbuf of the current packet.
963 if (first_seg == NULL) {
965 first_seg->pkt_len = data_len;
966 first_seg->nb_segs = 1;
968 first_seg->pkt_len += data_len;
969 first_seg->nb_segs++;
970 last_seg->next = rxm;
974 * If this is not the last buffer of the received packet,
975 * update the pointer to the last mbuf of the current scattered
976 * packet and continue to parse the RX ring.
978 if (! (staterr & E1000_RXD_STAT_EOP)) {
984 * This is the last buffer of the received packet.
985 * If the CRC is not stripped by the hardware:
986 * - Subtract the CRC length from the total packet length.
987 * - If the last buffer only contains the whole CRC or a part
988 * of it, free the mbuf associated to the last buffer.
989 * If part of the CRC is also contained in the previous
990 * mbuf, subtract the length of that CRC part from the
991 * data length of the previous mbuf.
994 if (unlikely(rxq->crc_len > 0)) {
995 first_seg->pkt_len -= ETHER_CRC_LEN;
996 if (data_len <= ETHER_CRC_LEN) {
997 rte_pktmbuf_free_seg(rxm);
998 first_seg->nb_segs--;
999 last_seg->data_len = (uint16_t)
1000 (last_seg->data_len -
1001 (ETHER_CRC_LEN - data_len));
1002 last_seg->next = NULL;
1005 (uint16_t) (data_len - ETHER_CRC_LEN);
1009 * Initialize the first mbuf of the returned packet:
1010 * - RX port identifier,
1011 * - hardware offload data, if any:
1012 * - RSS flag & hash,
1013 * - IP checksum flag,
1014 * - VLAN TCI, if any,
1017 first_seg->port = rxq->port_id;
1018 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1021 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1022 * set in the pkt_flags field.
1024 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1025 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1026 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1027 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1028 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1029 first_seg->ol_flags = pkt_flags;
1031 /* Prefetch data of first segment, if configured to do so. */
1032 rte_packet_prefetch((char *)first_seg->buf_addr +
1033 first_seg->data_off);
1036 * Store the mbuf address into the next entry of the array
1037 * of returned packets.
1039 rx_pkts[nb_rx++] = first_seg;
1042 * Setup receipt context for a new packet.
1048 * Record index of the next RX descriptor to probe.
1050 rxq->rx_tail = rx_id;
1053 * Save receive context.
1055 rxq->pkt_first_seg = first_seg;
1056 rxq->pkt_last_seg = last_seg;
1059 * If the number of free RX descriptors is greater than the RX free
1060 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1062 * Update the RDT with the value of the last processed RX descriptor
1063 * minus 1, to guarantee that the RDT register is never equal to the
1064 * RDH register, which creates a "full" ring situtation from the
1065 * hardware point of view...
1067 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1068 if (nb_hold > rxq->rx_free_thresh) {
1069 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1070 "nb_hold=%u nb_rx=%u",
1071 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1072 (unsigned) rx_id, (unsigned) nb_hold,
1074 rx_id = (uint16_t) ((rx_id == 0) ?
1075 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1076 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1079 rxq->nb_rx_hold = nb_hold;
1084 * Rings setup and release.
1086 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1087 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1088 * This will also optimize cache line size effect.
1089 * H/W supports up to cache line size 128.
1091 #define IGB_ALIGN 128
1094 * Maximum number of Ring Descriptors.
1096 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1097 * desscriptors should meet the following condition:
1098 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1100 #define IGB_MIN_RING_DESC 32
1101 #define IGB_MAX_RING_DESC 4096
1103 static const struct rte_memzone *
1104 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1105 uint16_t queue_id, uint32_t ring_size, int socket_id)
1107 char z_name[RTE_MEMZONE_NAMESIZE];
1108 const struct rte_memzone *mz;
1110 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1111 dev->driver->pci_drv.name, ring_name,
1112 dev->data->port_id, queue_id);
1113 mz = rte_memzone_lookup(z_name);
1117 #ifdef RTE_LIBRTE_XEN_DOM0
1118 return rte_memzone_reserve_bounded(z_name, ring_size,
1119 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1121 return rte_memzone_reserve_aligned(z_name, ring_size,
1122 socket_id, 0, IGB_ALIGN);
1127 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1131 if (txq->sw_ring != NULL) {
1132 for (i = 0; i < txq->nb_tx_desc; i++) {
1133 if (txq->sw_ring[i].mbuf != NULL) {
1134 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1135 txq->sw_ring[i].mbuf = NULL;
1142 igb_tx_queue_release(struct igb_tx_queue *txq)
1145 igb_tx_queue_release_mbufs(txq);
1146 rte_free(txq->sw_ring);
1152 eth_igb_tx_queue_release(void *txq)
1154 igb_tx_queue_release(txq);
1158 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1163 memset((void*)&txq->ctx_cache, 0,
1164 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1168 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1170 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1172 struct igb_tx_entry *txe = txq->sw_ring;
1174 struct e1000_hw *hw;
1176 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1177 /* Zero out HW ring memory */
1178 for (i = 0; i < txq->nb_tx_desc; i++) {
1179 txq->tx_ring[i] = zeroed_desc;
1182 /* Initialize ring entries */
1183 prev = (uint16_t)(txq->nb_tx_desc - 1);
1184 for (i = 0; i < txq->nb_tx_desc; i++) {
1185 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1187 txd->wb.status = E1000_TXD_STAT_DD;
1190 txe[prev].next_id = i;
1194 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1195 /* 82575 specific, each tx queue will use 2 hw contexts */
1196 if (hw->mac.type == e1000_82575)
1197 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1199 igb_reset_tx_queue_stat(txq);
1203 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1206 unsigned int socket_id,
1207 const struct rte_eth_txconf *tx_conf)
1209 const struct rte_memzone *tz;
1210 struct igb_tx_queue *txq;
1211 struct e1000_hw *hw;
1214 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217 * Validate number of transmit descriptors.
1218 * It must not exceed hardware maximum, and must be multiple
1221 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1222 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1227 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1230 if (tx_conf->tx_free_thresh != 0)
1231 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1232 "used for the 1G driver.");
1233 if (tx_conf->tx_rs_thresh != 0)
1234 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1235 "used for the 1G driver.");
1236 if (tx_conf->tx_thresh.wthresh == 0)
1237 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1238 "consider setting the TX WTHRESH value to 4, 8, "
1241 /* Free memory prior to re-allocation if needed */
1242 if (dev->data->tx_queues[queue_idx] != NULL) {
1243 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1244 dev->data->tx_queues[queue_idx] = NULL;
1247 /* First allocate the tx queue data structure */
1248 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1254 * Allocate TX ring hardware descriptors. A memzone large enough to
1255 * handle the maximum ring size is allocated in order to allow for
1256 * resizing in later calls to the queue setup function.
1258 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1259 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1262 igb_tx_queue_release(txq);
1266 txq->nb_tx_desc = nb_desc;
1267 txq->pthresh = tx_conf->tx_thresh.pthresh;
1268 txq->hthresh = tx_conf->tx_thresh.hthresh;
1269 txq->wthresh = tx_conf->tx_thresh.wthresh;
1270 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1272 txq->queue_id = queue_idx;
1273 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1274 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1275 txq->port_id = dev->data->port_id;
1277 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1278 #ifndef RTE_LIBRTE_XEN_DOM0
1279 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1281 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1283 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1284 /* Allocate software ring */
1285 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1286 sizeof(struct igb_tx_entry) * nb_desc,
1288 if (txq->sw_ring == NULL) {
1289 igb_tx_queue_release(txq);
1292 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1293 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1295 igb_reset_tx_queue(txq, dev);
1296 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1297 dev->data->tx_queues[queue_idx] = txq;
1303 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1307 if (rxq->sw_ring != NULL) {
1308 for (i = 0; i < rxq->nb_rx_desc; i++) {
1309 if (rxq->sw_ring[i].mbuf != NULL) {
1310 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1311 rxq->sw_ring[i].mbuf = NULL;
1318 igb_rx_queue_release(struct igb_rx_queue *rxq)
1321 igb_rx_queue_release_mbufs(rxq);
1322 rte_free(rxq->sw_ring);
1328 eth_igb_rx_queue_release(void *rxq)
1330 igb_rx_queue_release(rxq);
1334 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1336 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1340 /* Zero out HW ring memory */
1341 for (i = 0; i < rxq->nb_rx_desc; i++) {
1342 rxq->rx_ring[i] = zeroed_desc;
1346 rxq->pkt_first_seg = NULL;
1347 rxq->pkt_last_seg = NULL;
1351 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1354 unsigned int socket_id,
1355 const struct rte_eth_rxconf *rx_conf,
1356 struct rte_mempool *mp)
1358 const struct rte_memzone *rz;
1359 struct igb_rx_queue *rxq;
1360 struct e1000_hw *hw;
1363 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366 * Validate number of receive descriptors.
1367 * It must not exceed hardware maximum, and must be multiple
1370 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1371 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1375 /* Free memory prior to re-allocation if needed */
1376 if (dev->data->rx_queues[queue_idx] != NULL) {
1377 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1378 dev->data->rx_queues[queue_idx] = NULL;
1381 /* First allocate the RX queue data structure. */
1382 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1387 rxq->nb_rx_desc = nb_desc;
1388 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1389 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1390 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1391 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1393 rxq->drop_en = rx_conf->rx_drop_en;
1394 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1395 rxq->queue_id = queue_idx;
1396 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1397 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1398 rxq->port_id = dev->data->port_id;
1399 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1403 * Allocate RX ring hardware descriptors. A memzone large enough to
1404 * handle the maximum ring size is allocated in order to allow for
1405 * resizing in later calls to the queue setup function.
1407 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1408 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1410 igb_rx_queue_release(rxq);
1413 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1414 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1415 #ifndef RTE_LIBRTE_XEN_DOM0
1416 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1418 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1420 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1422 /* Allocate software ring. */
1423 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1424 sizeof(struct igb_rx_entry) * nb_desc,
1426 if (rxq->sw_ring == NULL) {
1427 igb_rx_queue_release(rxq);
1430 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1431 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1433 dev->data->rx_queues[queue_idx] = rxq;
1434 igb_reset_rx_queue(rxq);
1440 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1442 #define IGB_RXQ_SCAN_INTERVAL 4
1443 volatile union e1000_adv_rx_desc *rxdp;
1444 struct igb_rx_queue *rxq;
1447 if (rx_queue_id >= dev->data->nb_rx_queues) {
1448 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1452 rxq = dev->data->rx_queues[rx_queue_id];
1453 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1455 while ((desc < rxq->nb_rx_desc) &&
1456 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1457 desc += IGB_RXQ_SCAN_INTERVAL;
1458 rxdp += IGB_RXQ_SCAN_INTERVAL;
1459 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1460 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1461 desc - rxq->nb_rx_desc]);
1468 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1470 volatile union e1000_adv_rx_desc *rxdp;
1471 struct igb_rx_queue *rxq = rx_queue;
1474 if (unlikely(offset >= rxq->nb_rx_desc))
1476 desc = rxq->rx_tail + offset;
1477 if (desc >= rxq->nb_rx_desc)
1478 desc -= rxq->nb_rx_desc;
1480 rxdp = &rxq->rx_ring[desc];
1481 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1485 igb_dev_clear_queues(struct rte_eth_dev *dev)
1488 struct igb_tx_queue *txq;
1489 struct igb_rx_queue *rxq;
1491 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1492 txq = dev->data->tx_queues[i];
1494 igb_tx_queue_release_mbufs(txq);
1495 igb_reset_tx_queue(txq, dev);
1499 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1500 rxq = dev->data->rx_queues[i];
1502 igb_rx_queue_release_mbufs(rxq);
1503 igb_reset_rx_queue(rxq);
1509 * Receive Side Scaling (RSS).
1510 * See section 7.1.1.7 in the following document:
1511 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1514 * The source and destination IP addresses of the IP header and the source and
1515 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1516 * against a configurable random key to compute a 32-bit RSS hash result.
1517 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1518 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1519 * RSS output index which is used as the RX queue index where to store the
1521 * The following output is supplied in the RX write-back descriptor:
1522 * - 32-bit result of the Microsoft RSS hash function,
1523 * - 4-bit RSS type field.
1527 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1528 * Used as the default key.
1530 static uint8_t rss_intel_key[40] = {
1531 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1532 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1533 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1534 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1535 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1539 igb_rss_disable(struct rte_eth_dev *dev)
1541 struct e1000_hw *hw;
1544 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1546 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1547 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1551 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1559 hash_key = rss_conf->rss_key;
1560 if (hash_key != NULL) {
1561 /* Fill in RSS hash key */
1562 for (i = 0; i < 10; i++) {
1563 rss_key = hash_key[(i * 4)];
1564 rss_key |= hash_key[(i * 4) + 1] << 8;
1565 rss_key |= hash_key[(i * 4) + 2] << 16;
1566 rss_key |= hash_key[(i * 4) + 3] << 24;
1567 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1571 /* Set configured hashing protocols in MRQC register */
1572 rss_hf = rss_conf->rss_hf;
1573 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1574 if (rss_hf & ETH_RSS_IPV4)
1575 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1576 if (rss_hf & ETH_RSS_IPV4_TCP)
1577 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1578 if (rss_hf & ETH_RSS_IPV6)
1579 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1580 if (rss_hf & ETH_RSS_IPV6_EX)
1581 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1582 if (rss_hf & ETH_RSS_IPV6_TCP)
1583 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1584 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1585 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1586 if (rss_hf & ETH_RSS_IPV4_UDP)
1587 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1588 if (rss_hf & ETH_RSS_IPV6_UDP)
1589 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1590 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1591 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1592 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1596 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1597 struct rte_eth_rss_conf *rss_conf)
1599 struct e1000_hw *hw;
1603 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1606 * Before changing anything, first check that the update RSS operation
1607 * does not attempt to disable RSS, if RSS was enabled at
1608 * initialization time, or does not attempt to enable RSS, if RSS was
1609 * disabled at initialization time.
1611 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1612 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1613 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1614 if (rss_hf != 0) /* Enable RSS */
1616 return 0; /* Nothing to do */
1619 if (rss_hf == 0) /* Disable RSS */
1621 igb_hw_rss_hash_set(hw, rss_conf);
1625 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1626 struct rte_eth_rss_conf *rss_conf)
1628 struct e1000_hw *hw;
1635 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636 hash_key = rss_conf->rss_key;
1637 if (hash_key != NULL) {
1638 /* Return RSS hash key */
1639 for (i = 0; i < 10; i++) {
1640 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1641 hash_key[(i * 4)] = rss_key & 0x000000FF;
1642 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1643 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1644 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1648 /* Get RSS functions configured in MRQC register */
1649 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1650 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1651 rss_conf->rss_hf = 0;
1655 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1656 rss_hf |= ETH_RSS_IPV4;
1657 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1658 rss_hf |= ETH_RSS_IPV4_TCP;
1659 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1660 rss_hf |= ETH_RSS_IPV6;
1661 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1662 rss_hf |= ETH_RSS_IPV6_EX;
1663 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1664 rss_hf |= ETH_RSS_IPV6_TCP;
1665 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1666 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1667 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1668 rss_hf |= ETH_RSS_IPV4_UDP;
1669 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1670 rss_hf |= ETH_RSS_IPV6_UDP;
1671 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1672 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1673 rss_conf->rss_hf = rss_hf;
1678 igb_rss_configure(struct rte_eth_dev *dev)
1680 struct rte_eth_rss_conf rss_conf;
1681 struct e1000_hw *hw;
1685 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1687 /* Fill in redirection table. */
1688 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1689 for (i = 0; i < 128; i++) {
1696 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1697 i % dev->data->nb_rx_queues : 0);
1698 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1700 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1704 * Configure the RSS key and the RSS protocols used to compute
1705 * the RSS hash of input packets.
1707 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1708 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1709 igb_rss_disable(dev);
1712 if (rss_conf.rss_key == NULL)
1713 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1714 igb_hw_rss_hash_set(hw, &rss_conf);
1718 * Check if the mac type support VMDq or not.
1719 * Return 1 if it supports, otherwise, return 0.
1722 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1724 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726 switch (hw->mac.type) {
1747 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1753 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1755 struct rte_eth_vmdq_rx_conf *cfg;
1756 struct e1000_hw *hw;
1757 uint32_t mrqc, vt_ctl, vmolr, rctl;
1760 PMD_INIT_FUNC_TRACE();
1762 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1763 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1765 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1766 if (igb_is_vmdq_supported(dev) == 0)
1769 igb_rss_disable(dev);
1771 /* RCTL: eanble VLAN filter */
1772 rctl = E1000_READ_REG(hw, E1000_RCTL);
1773 rctl |= E1000_RCTL_VFE;
1774 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1776 /* MRQC: enable vmdq */
1777 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1778 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1779 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1781 /* VTCTL: pool selection according to VLAN tag */
1782 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1783 if (cfg->enable_default_pool)
1784 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1785 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1786 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1788 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1789 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1790 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1791 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1794 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1795 vmolr |= E1000_VMOLR_AUPE;
1796 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1797 vmolr |= E1000_VMOLR_ROMPE;
1798 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1799 vmolr |= E1000_VMOLR_ROPE;
1800 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1801 vmolr |= E1000_VMOLR_BAM;
1802 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1803 vmolr |= E1000_VMOLR_MPME;
1805 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1809 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1810 * Both 82576 and 82580 support it
1812 if (hw->mac.type != e1000_i350) {
1813 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1814 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1815 vmolr |= E1000_VMOLR_STRVLAN;
1816 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1820 /* VFTA - enable all vlan filters */
1821 for (i = 0; i < IGB_VFTA_SIZE; i++)
1822 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1824 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1825 if (hw->mac.type != e1000_82580)
1826 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1829 * RAH/RAL - allow pools to read specific mac addresses
1830 * In this case, all pools should be able to read from mac addr 0
1832 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1833 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1835 /* VLVF: set up filters for vlan tags as configured */
1836 for (i = 0; i < cfg->nb_pool_maps; i++) {
1837 /* set vlan id in VF register and set the valid bit */
1838 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1839 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1840 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1841 E1000_VLVF_POOLSEL_MASK)));
1844 E1000_WRITE_FLUSH(hw);
1850 /*********************************************************************
1852 * Enable receive unit.
1854 **********************************************************************/
1857 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1859 struct igb_rx_entry *rxe = rxq->sw_ring;
1863 /* Initialize software ring entries. */
1864 for (i = 0; i < rxq->nb_rx_desc; i++) {
1865 volatile union e1000_adv_rx_desc *rxd;
1866 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1869 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1870 "queue_id=%hu", rxq->queue_id);
1874 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1875 rxd = &rxq->rx_ring[i];
1876 rxd->read.hdr_addr = dma_addr;
1877 rxd->read.pkt_addr = dma_addr;
1884 #define E1000_MRQC_DEF_Q_SHIFT (3)
1886 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1888 struct e1000_hw *hw =
1889 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1894 * SRIOV active scheme
1895 * FIXME if support RSS together with VMDq & SRIOV
1897 mrqc = E1000_MRQC_ENABLE_VMDQ;
1898 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1899 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1900 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1901 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1903 * SRIOV inactive scheme
1905 switch (dev->data->dev_conf.rxmode.mq_mode) {
1907 igb_rss_configure(dev);
1909 case ETH_MQ_RX_VMDQ_ONLY:
1910 /*Configure general VMDQ only RX parameters*/
1911 igb_vmdq_rx_hw_configure(dev);
1913 case ETH_MQ_RX_NONE:
1914 /* if mq_mode is none, disable rss mode.*/
1916 igb_rss_disable(dev);
1925 eth_igb_rx_init(struct rte_eth_dev *dev)
1927 struct e1000_hw *hw;
1928 struct igb_rx_queue *rxq;
1929 struct rte_pktmbuf_pool_private *mbp_priv;
1934 uint16_t rctl_bsize;
1938 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1942 * Make sure receives are disabled while setting
1943 * up the descriptor ring.
1945 rctl = E1000_READ_REG(hw, E1000_RCTL);
1946 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1949 * Configure support of jumbo frames, if any.
1951 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1952 rctl |= E1000_RCTL_LPE;
1955 * Set maximum packet length by default, and might be updated
1956 * together with enabling/disabling dual VLAN.
1958 E1000_WRITE_REG(hw, E1000_RLPML,
1959 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1962 rctl &= ~E1000_RCTL_LPE;
1964 /* Configure and enable each RX queue. */
1966 dev->rx_pkt_burst = eth_igb_recv_pkts;
1967 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1971 rxq = dev->data->rx_queues[i];
1973 /* Allocate buffers for descriptor rings and set up queue */
1974 ret = igb_alloc_rx_queue_mbufs(rxq);
1979 * Reset crc_len in case it was changed after queue setup by a
1983 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1986 bus_addr = rxq->rx_ring_phys_addr;
1987 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1989 sizeof(union e1000_adv_rx_desc));
1990 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1991 (uint32_t)(bus_addr >> 32));
1992 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1994 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1997 * Configure RX buffer size.
1999 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2000 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2001 RTE_PKTMBUF_HEADROOM);
2002 if (buf_size >= 1024) {
2004 * Configure the BSIZEPACKET field of the SRRCTL
2005 * register of the queue.
2006 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2007 * If this field is equal to 0b, then RCTL.BSIZE
2008 * determines the RX packet buffer size.
2010 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2011 E1000_SRRCTL_BSIZEPKT_MASK);
2012 buf_size = (uint16_t) ((srrctl &
2013 E1000_SRRCTL_BSIZEPKT_MASK) <<
2014 E1000_SRRCTL_BSIZEPKT_SHIFT);
2016 /* It adds dual VLAN length for supporting dual VLAN */
2017 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2018 2 * VLAN_TAG_SIZE) > buf_size){
2019 if (!dev->data->scattered_rx)
2021 "forcing scatter mode");
2022 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2023 dev->data->scattered_rx = 1;
2027 * Use BSIZE field of the device RCTL register.
2029 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2030 rctl_bsize = buf_size;
2031 if (!dev->data->scattered_rx)
2032 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2033 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2034 dev->data->scattered_rx = 1;
2037 /* Set if packets are dropped when no descriptors available */
2039 srrctl |= E1000_SRRCTL_DROP_EN;
2041 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2043 /* Enable this RX queue. */
2044 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2045 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2046 rxdctl &= 0xFFF00000;
2047 rxdctl |= (rxq->pthresh & 0x1F);
2048 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2049 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2050 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2053 if (dev->data->dev_conf.rxmode.enable_scatter) {
2054 if (!dev->data->scattered_rx)
2055 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2056 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2057 dev->data->scattered_rx = 1;
2061 * Setup BSIZE field of RCTL register, if needed.
2062 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2063 * register, since the code above configures the SRRCTL register of
2064 * the RX queue in such a case.
2065 * All configurable sizes are:
2066 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2067 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2068 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2069 * 2048: rctl |= E1000_RCTL_SZ_2048;
2070 * 1024: rctl |= E1000_RCTL_SZ_1024;
2071 * 512: rctl |= E1000_RCTL_SZ_512;
2072 * 256: rctl |= E1000_RCTL_SZ_256;
2074 if (rctl_bsize > 0) {
2075 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2076 rctl |= E1000_RCTL_SZ_512;
2077 else /* 256 <= buf_size < 512 - use 256 */
2078 rctl |= E1000_RCTL_SZ_256;
2082 * Configure RSS if device configured with multiple RX queues.
2084 igb_dev_mq_rx_configure(dev);
2086 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2087 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2090 * Setup the Checksum Register.
2091 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2093 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2094 rxcsum |= E1000_RXCSUM_PCSD;
2096 /* Enable both L3/L4 rx checksum offload */
2097 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2098 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2100 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2101 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2103 /* Setup the Receive Control Register. */
2104 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2105 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2107 /* set STRCRC bit in all queues */
2108 if (hw->mac.type == e1000_i350 ||
2109 hw->mac.type == e1000_i210 ||
2110 hw->mac.type == e1000_i211 ||
2111 hw->mac.type == e1000_i354) {
2112 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2113 rxq = dev->data->rx_queues[i];
2114 uint32_t dvmolr = E1000_READ_REG(hw,
2115 E1000_DVMOLR(rxq->reg_idx));
2116 dvmolr |= E1000_DVMOLR_STRCRC;
2117 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2121 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2123 /* clear STRCRC bit in all queues */
2124 if (hw->mac.type == e1000_i350 ||
2125 hw->mac.type == e1000_i210 ||
2126 hw->mac.type == e1000_i211 ||
2127 hw->mac.type == e1000_i354) {
2128 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2129 rxq = dev->data->rx_queues[i];
2130 uint32_t dvmolr = E1000_READ_REG(hw,
2131 E1000_DVMOLR(rxq->reg_idx));
2132 dvmolr &= ~E1000_DVMOLR_STRCRC;
2133 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2138 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2139 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2140 E1000_RCTL_RDMTS_HALF |
2141 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2143 /* Make sure VLAN Filters are off. */
2144 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2145 rctl &= ~E1000_RCTL_VFE;
2146 /* Don't store bad packets. */
2147 rctl &= ~E1000_RCTL_SBP;
2149 /* Enable Receives. */
2150 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2153 * Setup the HW Rx Head and Tail Descriptor Pointers.
2154 * This needs to be done after enable.
2156 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2157 rxq = dev->data->rx_queues[i];
2158 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2159 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2165 /*********************************************************************
2167 * Enable transmit unit.
2169 **********************************************************************/
2171 eth_igb_tx_init(struct rte_eth_dev *dev)
2173 struct e1000_hw *hw;
2174 struct igb_tx_queue *txq;
2179 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2181 /* Setup the Base and Length of the Tx Descriptor Rings. */
2182 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2184 txq = dev->data->tx_queues[i];
2185 bus_addr = txq->tx_ring_phys_addr;
2187 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2189 sizeof(union e1000_adv_tx_desc));
2190 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2191 (uint32_t)(bus_addr >> 32));
2192 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2194 /* Setup the HW Tx Head and Tail descriptor pointers. */
2195 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2196 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2198 /* Setup Transmit threshold registers. */
2199 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2200 txdctl |= txq->pthresh & 0x1F;
2201 txdctl |= ((txq->hthresh & 0x1F) << 8);
2202 txdctl |= ((txq->wthresh & 0x1F) << 16);
2203 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2204 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2207 /* Program the Transmit Control Register. */
2208 tctl = E1000_READ_REG(hw, E1000_TCTL);
2209 tctl &= ~E1000_TCTL_CT;
2210 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2211 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2213 e1000_config_collision_dist(hw);
2215 /* This write will effectively turn on the transmit unit. */
2216 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2219 /*********************************************************************
2221 * Enable VF receive unit.
2223 **********************************************************************/
2225 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2227 struct e1000_hw *hw;
2228 struct igb_rx_queue *rxq;
2229 struct rte_pktmbuf_pool_private *mbp_priv;
2232 uint16_t rctl_bsize;
2236 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2239 e1000_rlpml_set_vf(hw,
2240 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2243 /* Configure and enable each RX queue. */
2245 dev->rx_pkt_burst = eth_igb_recv_pkts;
2246 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2250 rxq = dev->data->rx_queues[i];
2252 /* Allocate buffers for descriptor rings and set up queue */
2253 ret = igb_alloc_rx_queue_mbufs(rxq);
2257 bus_addr = rxq->rx_ring_phys_addr;
2258 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2260 sizeof(union e1000_adv_rx_desc));
2261 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2262 (uint32_t)(bus_addr >> 32));
2263 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2265 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2268 * Configure RX buffer size.
2270 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2271 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2272 RTE_PKTMBUF_HEADROOM);
2273 if (buf_size >= 1024) {
2275 * Configure the BSIZEPACKET field of the SRRCTL
2276 * register of the queue.
2277 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2278 * If this field is equal to 0b, then RCTL.BSIZE
2279 * determines the RX packet buffer size.
2281 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2282 E1000_SRRCTL_BSIZEPKT_MASK);
2283 buf_size = (uint16_t) ((srrctl &
2284 E1000_SRRCTL_BSIZEPKT_MASK) <<
2285 E1000_SRRCTL_BSIZEPKT_SHIFT);
2287 /* It adds dual VLAN length for supporting dual VLAN */
2288 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2289 2 * VLAN_TAG_SIZE) > buf_size){
2290 if (!dev->data->scattered_rx)
2292 "forcing scatter mode");
2293 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2294 dev->data->scattered_rx = 1;
2298 * Use BSIZE field of the device RCTL register.
2300 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2301 rctl_bsize = buf_size;
2302 if (!dev->data->scattered_rx)
2303 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2304 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2305 dev->data->scattered_rx = 1;
2308 /* Set if packets are dropped when no descriptors available */
2310 srrctl |= E1000_SRRCTL_DROP_EN;
2312 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2314 /* Enable this RX queue. */
2315 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2316 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2317 rxdctl &= 0xFFF00000;
2318 rxdctl |= (rxq->pthresh & 0x1F);
2319 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2320 if (hw->mac.type == e1000_vfadapt) {
2322 * Workaround of 82576 VF Erratum
2323 * force set WTHRESH to 1
2324 * to avoid Write-Back not triggered sometimes
2327 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2330 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2331 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2334 if (dev->data->dev_conf.rxmode.enable_scatter) {
2335 if (!dev->data->scattered_rx)
2336 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2337 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2338 dev->data->scattered_rx = 1;
2342 * Setup the HW Rx Head and Tail Descriptor Pointers.
2343 * This needs to be done after enable.
2345 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2346 rxq = dev->data->rx_queues[i];
2347 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2348 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2354 /*********************************************************************
2356 * Enable VF transmit unit.
2358 **********************************************************************/
2360 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2362 struct e1000_hw *hw;
2363 struct igb_tx_queue *txq;
2367 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2369 /* Setup the Base and Length of the Tx Descriptor Rings. */
2370 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2373 txq = dev->data->tx_queues[i];
2374 bus_addr = txq->tx_ring_phys_addr;
2375 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2377 sizeof(union e1000_adv_tx_desc));
2378 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2379 (uint32_t)(bus_addr >> 32));
2380 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2382 /* Setup the HW Tx Head and Tail descriptor pointers. */
2383 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2384 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2386 /* Setup Transmit threshold registers. */
2387 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2388 txdctl |= txq->pthresh & 0x1F;
2389 txdctl |= ((txq->hthresh & 0x1F) << 8);
2390 if (hw->mac.type == e1000_82576) {
2392 * Workaround of 82576 VF Erratum
2393 * force set WTHRESH to 1
2394 * to avoid Write-Back not triggered sometimes
2397 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2400 txdctl |= ((txq->wthresh & 0x1F) << 16);
2401 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2402 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);