4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 #define IGB_RSS_OFFLOAD_ALL ( \
82 ETH_RSS_IPV6_TCP_EX | \
87 /* Bit Mask to indicate what bits required for building TX context */
88 #define IGB_TX_OFFLOAD_MASK ( \
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
98 m = __rte_mbuf_raw_alloc(mp);
99 __rte_mbuf_sanity_check_raw(m, 0);
103 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
104 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
106 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
107 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
110 * Structure associated with each descriptor of the RX ring of a RX queue.
112 struct igb_rx_entry {
113 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
117 * Structure associated with each descriptor of the TX ring of a TX queue.
119 struct igb_tx_entry {
120 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
121 uint16_t next_id; /**< Index of next descriptor in ring. */
122 uint16_t last_id; /**< Index of last scattered descriptor. */
126 * Structure associated with each RX queue.
128 struct igb_rx_queue {
129 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
130 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
131 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
132 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
133 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
134 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
135 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
136 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
137 uint16_t nb_rx_desc; /**< number of RX descriptors. */
138 uint16_t rx_tail; /**< current value of RDT register. */
139 uint16_t nb_rx_hold; /**< number of held free RX desc. */
140 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
141 uint16_t queue_id; /**< RX queue index. */
142 uint16_t reg_idx; /**< RX queue register index. */
143 uint8_t port_id; /**< Device port identifier. */
144 uint8_t pthresh; /**< Prefetch threshold register. */
145 uint8_t hthresh; /**< Host threshold register. */
146 uint8_t wthresh; /**< Write-back threshold register. */
147 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
148 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
152 * Hardware context number
154 enum igb_advctx_num {
155 IGB_CTX_0 = 0, /**< CTX0 */
156 IGB_CTX_1 = 1, /**< CTX1 */
157 IGB_CTX_NUM = 2, /**< CTX_NUM */
160 /** Offload features */
161 union igb_vlan_macip {
164 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
166 /**< VLAN Tag Control Identifier (CPU order). */
171 * Compare mask for vlan_macip_len.data,
172 * should be in sync with igb_vlan_macip.f layout.
174 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
175 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
176 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
177 /** MAC+IP length. */
178 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
181 * Strucutre to check if new context need be built
183 struct igb_advctx_info {
184 uint64_t flags; /**< ol_flags related to context build. */
185 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
186 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
190 * Structure associated with each TX queue.
192 struct igb_tx_queue {
193 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
194 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
195 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
196 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
197 uint32_t txd_type; /**< Device-specific TXD type */
198 uint16_t nb_tx_desc; /**< number of TX descriptors. */
199 uint16_t tx_tail; /**< Current value of TDT register. */
201 /**< Index of first used TX descriptor. */
202 uint16_t queue_id; /**< TX queue index. */
203 uint16_t reg_idx; /**< TX queue register index. */
204 uint8_t port_id; /**< Device port identifier. */
205 uint8_t pthresh; /**< Prefetch threshold register. */
206 uint8_t hthresh; /**< Host threshold register. */
207 uint8_t wthresh; /**< Write-back threshold register. */
209 /**< Current used hardware descriptor. */
211 /**< Start context position for transmit queue. */
212 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
213 /**< Hardware context history.*/
217 #define RTE_PMD_USE_PREFETCH
220 #ifdef RTE_PMD_USE_PREFETCH
221 #define rte_igb_prefetch(p) rte_prefetch0(p)
223 #define rte_igb_prefetch(p) do {} while(0)
226 #ifdef RTE_PMD_PACKET_PREFETCH
227 #define rte_packet_prefetch(p) rte_prefetch1(p)
229 #define rte_packet_prefetch(p) do {} while(0)
233 * Macro for VMDq feature for 1 GbE NIC.
235 #define E1000_VMOLR_SIZE (8)
237 /*********************************************************************
241 **********************************************************************/
244 * Advanced context descriptor are almost same between igb/ixgbe
245 * This is a separate function, looking for optimization opportunity here
246 * Rework required to go with the pre-defined values.
250 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
251 volatile struct e1000_adv_tx_context_desc *ctx_txd,
252 uint64_t ol_flags, uint32_t vlan_macip_lens)
254 uint32_t type_tucmd_mlhl;
255 uint32_t mss_l4len_idx;
256 uint32_t ctx_idx, ctx_curr;
259 ctx_curr = txq->ctx_curr;
260 ctx_idx = ctx_curr + txq->ctx_start;
265 if (ol_flags & PKT_TX_VLAN_PKT) {
266 cmp_mask |= TX_VLAN_CMP_MASK;
269 if (ol_flags & PKT_TX_IP_CKSUM) {
270 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
271 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
274 /* Specify which HW CTX to upload. */
275 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
276 switch (ol_flags & PKT_TX_L4_MASK) {
277 case PKT_TX_UDP_CKSUM:
278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
279 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
283 case PKT_TX_TCP_CKSUM:
284 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
289 case PKT_TX_SCTP_CKSUM:
290 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
291 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
293 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
296 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
297 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
301 txq->ctx_cache[ctx_curr].flags = ol_flags;
302 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
303 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
304 vlan_macip_lens & cmp_mask;
306 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
307 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
308 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
309 ctx_txd->seqnum_seed = 0;
313 * Check which hardware context can be used. Use the existing match
314 * or create a new context descriptor.
316 static inline uint32_t
317 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
318 uint32_t vlan_macip_lens)
320 /* If match with the current context */
321 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
322 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
323 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
324 return txq->ctx_curr;
327 /* If match with the second context */
329 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
330 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
331 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
332 return txq->ctx_curr;
335 /* Mismatch, use the previous context */
336 return (IGB_CTX_NUM);
339 static inline uint32_t
340 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
342 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
343 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
346 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
347 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
351 static inline uint32_t
352 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
354 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
355 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
359 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
362 struct igb_tx_queue *txq;
363 struct igb_tx_entry *sw_ring;
364 struct igb_tx_entry *txe, *txn;
365 volatile union e1000_adv_tx_desc *txr;
366 volatile union e1000_adv_tx_desc *txd;
367 struct rte_mbuf *tx_pkt;
368 struct rte_mbuf *m_seg;
369 union igb_vlan_macip vlan_macip_lens;
377 uint64_t buf_dma_addr;
378 uint32_t olinfo_status;
379 uint32_t cmd_type_len;
388 uint32_t new_ctx = 0;
392 sw_ring = txq->sw_ring;
394 tx_id = txq->tx_tail;
395 txe = &sw_ring[tx_id];
397 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
399 pkt_len = tx_pkt->pkt_len;
401 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
404 * The number of descriptors that must be allocated for a
405 * packet is the number of segments of that packet, plus 1
406 * Context Descriptor for the VLAN Tag Identifier, if any.
407 * Determine the last TX descriptor to allocate in the TX ring
408 * for the packet, starting from the current position (tx_id)
411 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
413 ol_flags = tx_pkt->ol_flags;
414 l2_l3_len.l2_len = tx_pkt->l2_len;
415 l2_l3_len.l3_len = tx_pkt->l3_len;
416 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
417 vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
418 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
420 /* If a Context Descriptor need be built . */
422 ctx = what_advctx_update(txq, tx_ol_req,
423 vlan_macip_lens.data);
424 /* Only allocate context descriptor if required*/
425 new_ctx = (ctx == IGB_CTX_NUM);
427 tx_last = (uint16_t) (tx_last + new_ctx);
429 if (tx_last >= txq->nb_tx_desc)
430 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
432 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
433 " tx_first=%u tx_last=%u",
434 (unsigned) txq->port_id,
435 (unsigned) txq->queue_id,
441 * Check if there are enough free descriptors in the TX ring
442 * to transmit the next packet.
443 * This operation is based on the two following rules:
445 * 1- Only check that the last needed TX descriptor can be
446 * allocated (by construction, if that descriptor is free,
447 * all intermediate ones are also free).
449 * For this purpose, the index of the last TX descriptor
450 * used for a packet (the "last descriptor" of a packet)
451 * is recorded in the TX entries (the last one included)
452 * that are associated with all TX descriptors allocated
455 * 2- Avoid to allocate the last free TX descriptor of the
456 * ring, in order to never set the TDT register with the
457 * same value stored in parallel by the NIC in the TDH
458 * register, which makes the TX engine of the NIC enter
459 * in a deadlock situation.
461 * By extension, avoid to allocate a free descriptor that
462 * belongs to the last set of free descriptors allocated
463 * to the same packet previously transmitted.
467 * The "last descriptor" of the previously sent packet, if any,
468 * which used the last descriptor to allocate.
470 tx_end = sw_ring[tx_last].last_id;
473 * The next descriptor following that "last descriptor" in the
476 tx_end = sw_ring[tx_end].next_id;
479 * The "last descriptor" associated with that next descriptor.
481 tx_end = sw_ring[tx_end].last_id;
484 * Check that this descriptor is free.
486 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
493 * Set common flags of all TX Data Descriptors.
495 * The following bits must be set in all Data Descriptors:
496 * - E1000_ADVTXD_DTYP_DATA
497 * - E1000_ADVTXD_DCMD_DEXT
499 * The following bits must be set in the first Data Descriptor
500 * and are ignored in the other ones:
501 * - E1000_ADVTXD_DCMD_IFCS
502 * - E1000_ADVTXD_MAC_1588
503 * - E1000_ADVTXD_DCMD_VLE
505 * The following bits must only be set in the last Data
507 * - E1000_TXD_CMD_EOP
509 * The following bits can be set in any Data Descriptor, but
510 * are only set in the last Data Descriptor:
513 cmd_type_len = txq->txd_type |
514 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
515 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
516 #if defined(RTE_LIBRTE_IEEE1588)
517 if (ol_flags & PKT_TX_IEEE1588_TMST)
518 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
521 /* Setup TX Advanced context descriptor if required */
523 volatile struct e1000_adv_tx_context_desc *
526 ctx_txd = (volatile struct
527 e1000_adv_tx_context_desc *)
530 txn = &sw_ring[txe->next_id];
531 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
533 if (txe->mbuf != NULL) {
534 rte_pktmbuf_free_seg(txe->mbuf);
538 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
539 vlan_macip_lens.data);
541 txe->last_id = tx_last;
542 tx_id = txe->next_id;
546 /* Setup the TX Advanced Data Descriptor */
547 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
548 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
549 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
554 txn = &sw_ring[txe->next_id];
557 if (txe->mbuf != NULL)
558 rte_pktmbuf_free_seg(txe->mbuf);
562 * Set up transmit descriptor.
564 slen = (uint16_t) m_seg->data_len;
565 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
566 txd->read.buffer_addr =
567 rte_cpu_to_le_64(buf_dma_addr);
568 txd->read.cmd_type_len =
569 rte_cpu_to_le_32(cmd_type_len | slen);
570 txd->read.olinfo_status =
571 rte_cpu_to_le_32(olinfo_status);
572 txe->last_id = tx_last;
573 tx_id = txe->next_id;
576 } while (m_seg != NULL);
579 * The last packet data descriptor needs End Of Packet (EOP)
580 * and Report Status (RS).
582 txd->read.cmd_type_len |=
583 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
589 * Set the Transmit Descriptor Tail (TDT).
591 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
592 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
593 (unsigned) txq->port_id, (unsigned) txq->queue_id,
594 (unsigned) tx_id, (unsigned) nb_tx);
595 txq->tx_tail = tx_id;
600 /*********************************************************************
604 **********************************************************************/
605 static inline uint64_t
606 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
610 static uint64_t ip_pkt_types_map[16] = {
611 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
612 PKT_RX_IPV6_HDR, 0, 0, 0,
613 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
614 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
617 #if defined(RTE_LIBRTE_IEEE1588)
618 static uint32_t ip_pkt_etqf_map[8] = {
619 0, 0, 0, PKT_RX_IEEE1588_PTP,
623 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
624 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
625 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
627 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
628 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
630 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
633 static inline uint64_t
634 rx_desc_status_to_pkt_flags(uint32_t rx_status)
638 /* Check if VLAN present */
639 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
641 #if defined(RTE_LIBRTE_IEEE1588)
642 if (rx_status & E1000_RXD_STAT_TMST)
643 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
648 static inline uint64_t
649 rx_desc_error_to_pkt_flags(uint32_t rx_status)
652 * Bit 30: IPE, IPv4 checksum error
653 * Bit 29: L4I, L4I integrity error
656 static uint64_t error_to_pkt_flags_map[4] = {
657 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
658 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
660 return error_to_pkt_flags_map[(rx_status >>
661 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
665 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
668 struct igb_rx_queue *rxq;
669 volatile union e1000_adv_rx_desc *rx_ring;
670 volatile union e1000_adv_rx_desc *rxdp;
671 struct igb_rx_entry *sw_ring;
672 struct igb_rx_entry *rxe;
673 struct rte_mbuf *rxm;
674 struct rte_mbuf *nmb;
675 union e1000_adv_rx_desc rxd;
678 uint32_t hlen_type_rss;
688 rx_id = rxq->rx_tail;
689 rx_ring = rxq->rx_ring;
690 sw_ring = rxq->sw_ring;
691 while (nb_rx < nb_pkts) {
693 * The order of operations here is important as the DD status
694 * bit must not be read after any other descriptor fields.
695 * rx_ring and rxdp are pointing to volatile data so the order
696 * of accesses cannot be reordered by the compiler. If they were
697 * not volatile, they could be reordered which could lead to
698 * using invalid descriptor fields when read from rxd.
700 rxdp = &rx_ring[rx_id];
701 staterr = rxdp->wb.upper.status_error;
702 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
709 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
710 * likely to be invalid and to be dropped by the various
711 * validation checks performed by the network stack.
713 * Allocate a new mbuf to replenish the RX ring descriptor.
714 * If the allocation fails:
715 * - arrange for that RX descriptor to be the first one
716 * being parsed the next time the receive function is
717 * invoked [on the same queue].
719 * - Stop parsing the RX ring and return immediately.
721 * This policy do not drop the packet received in the RX
722 * descriptor for which the allocation of a new mbuf failed.
723 * Thus, it allows that packet to be later retrieved if
724 * mbuf have been freed in the mean time.
725 * As a side effect, holding RX descriptors instead of
726 * systematically giving them back to the NIC may lead to
727 * RX ring exhaustion situations.
728 * However, the NIC can gracefully prevent such situations
729 * to happen by sending specific "back-pressure" flow control
730 * frames to its peer(s).
732 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
733 "staterr=0x%x pkt_len=%u",
734 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
735 (unsigned) rx_id, (unsigned) staterr,
736 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
738 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
740 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
741 "queue_id=%u", (unsigned) rxq->port_id,
742 (unsigned) rxq->queue_id);
743 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
748 rxe = &sw_ring[rx_id];
750 if (rx_id == rxq->nb_rx_desc)
753 /* Prefetch next mbuf while processing current one. */
754 rte_igb_prefetch(sw_ring[rx_id].mbuf);
757 * When next RX descriptor is on a cache-line boundary,
758 * prefetch the next 4 RX descriptors and the next 8 pointers
761 if ((rx_id & 0x3) == 0) {
762 rte_igb_prefetch(&rx_ring[rx_id]);
763 rte_igb_prefetch(&sw_ring[rx_id]);
769 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
770 rxdp->read.hdr_addr = dma_addr;
771 rxdp->read.pkt_addr = dma_addr;
774 * Initialize the returned mbuf.
775 * 1) setup generic mbuf fields:
776 * - number of segments,
779 * - RX port identifier.
780 * 2) integrate hardware offload data, if any:
782 * - IP checksum flag,
783 * - VLAN TCI, if any,
786 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
788 rxm->data_off = RTE_PKTMBUF_HEADROOM;
789 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
792 rxm->pkt_len = pkt_len;
793 rxm->data_len = pkt_len;
794 rxm->port = rxq->port_id;
796 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
797 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
798 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
799 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
801 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
802 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
803 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
804 rxm->ol_flags = pkt_flags;
807 * Store the mbuf address into the next entry of the array
808 * of returned packets.
810 rx_pkts[nb_rx++] = rxm;
812 rxq->rx_tail = rx_id;
815 * If the number of free RX descriptors is greater than the RX free
816 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
818 * Update the RDT with the value of the last processed RX descriptor
819 * minus 1, to guarantee that the RDT register is never equal to the
820 * RDH register, which creates a "full" ring situtation from the
821 * hardware point of view...
823 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
824 if (nb_hold > rxq->rx_free_thresh) {
825 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
826 "nb_hold=%u nb_rx=%u",
827 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
828 (unsigned) rx_id, (unsigned) nb_hold,
830 rx_id = (uint16_t) ((rx_id == 0) ?
831 (rxq->nb_rx_desc - 1) : (rx_id - 1));
832 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
835 rxq->nb_rx_hold = nb_hold;
840 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
843 struct igb_rx_queue *rxq;
844 volatile union e1000_adv_rx_desc *rx_ring;
845 volatile union e1000_adv_rx_desc *rxdp;
846 struct igb_rx_entry *sw_ring;
847 struct igb_rx_entry *rxe;
848 struct rte_mbuf *first_seg;
849 struct rte_mbuf *last_seg;
850 struct rte_mbuf *rxm;
851 struct rte_mbuf *nmb;
852 union e1000_adv_rx_desc rxd;
853 uint64_t dma; /* Physical address of mbuf data buffer */
855 uint32_t hlen_type_rss;
865 rx_id = rxq->rx_tail;
866 rx_ring = rxq->rx_ring;
867 sw_ring = rxq->sw_ring;
870 * Retrieve RX context of current packet, if any.
872 first_seg = rxq->pkt_first_seg;
873 last_seg = rxq->pkt_last_seg;
875 while (nb_rx < nb_pkts) {
878 * The order of operations here is important as the DD status
879 * bit must not be read after any other descriptor fields.
880 * rx_ring and rxdp are pointing to volatile data so the order
881 * of accesses cannot be reordered by the compiler. If they were
882 * not volatile, they could be reordered which could lead to
883 * using invalid descriptor fields when read from rxd.
885 rxdp = &rx_ring[rx_id];
886 staterr = rxdp->wb.upper.status_error;
887 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
894 * Allocate a new mbuf to replenish the RX ring descriptor.
895 * If the allocation fails:
896 * - arrange for that RX descriptor to be the first one
897 * being parsed the next time the receive function is
898 * invoked [on the same queue].
900 * - Stop parsing the RX ring and return immediately.
902 * This policy does not drop the packet received in the RX
903 * descriptor for which the allocation of a new mbuf failed.
904 * Thus, it allows that packet to be later retrieved if
905 * mbuf have been freed in the mean time.
906 * As a side effect, holding RX descriptors instead of
907 * systematically giving them back to the NIC may lead to
908 * RX ring exhaustion situations.
909 * However, the NIC can gracefully prevent such situations
910 * to happen by sending specific "back-pressure" flow control
911 * frames to its peer(s).
913 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
914 "staterr=0x%x data_len=%u",
915 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
916 (unsigned) rx_id, (unsigned) staterr,
917 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
919 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
921 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
922 "queue_id=%u", (unsigned) rxq->port_id,
923 (unsigned) rxq->queue_id);
924 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
929 rxe = &sw_ring[rx_id];
931 if (rx_id == rxq->nb_rx_desc)
934 /* Prefetch next mbuf while processing current one. */
935 rte_igb_prefetch(sw_ring[rx_id].mbuf);
938 * When next RX descriptor is on a cache-line boundary,
939 * prefetch the next 4 RX descriptors and the next 8 pointers
942 if ((rx_id & 0x3) == 0) {
943 rte_igb_prefetch(&rx_ring[rx_id]);
944 rte_igb_prefetch(&sw_ring[rx_id]);
948 * Update RX descriptor with the physical address of the new
949 * data buffer of the new allocated mbuf.
953 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
954 rxdp->read.pkt_addr = dma;
955 rxdp->read.hdr_addr = dma;
958 * Set data length & data buffer address of mbuf.
960 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
961 rxm->data_len = data_len;
962 rxm->data_off = RTE_PKTMBUF_HEADROOM;
965 * If this is the first buffer of the received packet,
966 * set the pointer to the first mbuf of the packet and
967 * initialize its context.
968 * Otherwise, update the total length and the number of segments
969 * of the current scattered packet, and update the pointer to
970 * the last mbuf of the current packet.
972 if (first_seg == NULL) {
974 first_seg->pkt_len = data_len;
975 first_seg->nb_segs = 1;
977 first_seg->pkt_len += data_len;
978 first_seg->nb_segs++;
979 last_seg->next = rxm;
983 * If this is not the last buffer of the received packet,
984 * update the pointer to the last mbuf of the current scattered
985 * packet and continue to parse the RX ring.
987 if (! (staterr & E1000_RXD_STAT_EOP)) {
993 * This is the last buffer of the received packet.
994 * If the CRC is not stripped by the hardware:
995 * - Subtract the CRC length from the total packet length.
996 * - If the last buffer only contains the whole CRC or a part
997 * of it, free the mbuf associated to the last buffer.
998 * If part of the CRC is also contained in the previous
999 * mbuf, subtract the length of that CRC part from the
1000 * data length of the previous mbuf.
1003 if (unlikely(rxq->crc_len > 0)) {
1004 first_seg->pkt_len -= ETHER_CRC_LEN;
1005 if (data_len <= ETHER_CRC_LEN) {
1006 rte_pktmbuf_free_seg(rxm);
1007 first_seg->nb_segs--;
1008 last_seg->data_len = (uint16_t)
1009 (last_seg->data_len -
1010 (ETHER_CRC_LEN - data_len));
1011 last_seg->next = NULL;
1014 (uint16_t) (data_len - ETHER_CRC_LEN);
1018 * Initialize the first mbuf of the returned packet:
1019 * - RX port identifier,
1020 * - hardware offload data, if any:
1021 * - RSS flag & hash,
1022 * - IP checksum flag,
1023 * - VLAN TCI, if any,
1026 first_seg->port = rxq->port_id;
1027 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1030 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1031 * set in the pkt_flags field.
1033 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1034 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1035 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1036 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1037 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1038 first_seg->ol_flags = pkt_flags;
1040 /* Prefetch data of first segment, if configured to do so. */
1041 rte_packet_prefetch((char *)first_seg->buf_addr +
1042 first_seg->data_off);
1045 * Store the mbuf address into the next entry of the array
1046 * of returned packets.
1048 rx_pkts[nb_rx++] = first_seg;
1051 * Setup receipt context for a new packet.
1057 * Record index of the next RX descriptor to probe.
1059 rxq->rx_tail = rx_id;
1062 * Save receive context.
1064 rxq->pkt_first_seg = first_seg;
1065 rxq->pkt_last_seg = last_seg;
1068 * If the number of free RX descriptors is greater than the RX free
1069 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1071 * Update the RDT with the value of the last processed RX descriptor
1072 * minus 1, to guarantee that the RDT register is never equal to the
1073 * RDH register, which creates a "full" ring situtation from the
1074 * hardware point of view...
1076 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1077 if (nb_hold > rxq->rx_free_thresh) {
1078 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1079 "nb_hold=%u nb_rx=%u",
1080 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1081 (unsigned) rx_id, (unsigned) nb_hold,
1083 rx_id = (uint16_t) ((rx_id == 0) ?
1084 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1085 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1088 rxq->nb_rx_hold = nb_hold;
1093 * Rings setup and release.
1095 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1096 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1097 * This will also optimize cache line size effect.
1098 * H/W supports up to cache line size 128.
1100 #define IGB_ALIGN 128
1103 * Maximum number of Ring Descriptors.
1105 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1106 * desscriptors should meet the following condition:
1107 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1109 #define IGB_MIN_RING_DESC 32
1110 #define IGB_MAX_RING_DESC 4096
1112 static const struct rte_memzone *
1113 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1114 uint16_t queue_id, uint32_t ring_size, int socket_id)
1116 char z_name[RTE_MEMZONE_NAMESIZE];
1117 const struct rte_memzone *mz;
1119 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1120 dev->driver->pci_drv.name, ring_name,
1121 dev->data->port_id, queue_id);
1122 mz = rte_memzone_lookup(z_name);
1126 #ifdef RTE_LIBRTE_XEN_DOM0
1127 return rte_memzone_reserve_bounded(z_name, ring_size,
1128 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1130 return rte_memzone_reserve_aligned(z_name, ring_size,
1131 socket_id, 0, IGB_ALIGN);
1136 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1140 if (txq->sw_ring != NULL) {
1141 for (i = 0; i < txq->nb_tx_desc; i++) {
1142 if (txq->sw_ring[i].mbuf != NULL) {
1143 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1144 txq->sw_ring[i].mbuf = NULL;
1151 igb_tx_queue_release(struct igb_tx_queue *txq)
1154 igb_tx_queue_release_mbufs(txq);
1155 rte_free(txq->sw_ring);
1161 eth_igb_tx_queue_release(void *txq)
1163 igb_tx_queue_release(txq);
1167 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1172 memset((void*)&txq->ctx_cache, 0,
1173 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1177 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1179 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1181 struct igb_tx_entry *txe = txq->sw_ring;
1183 struct e1000_hw *hw;
1185 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186 /* Zero out HW ring memory */
1187 for (i = 0; i < txq->nb_tx_desc; i++) {
1188 txq->tx_ring[i] = zeroed_desc;
1191 /* Initialize ring entries */
1192 prev = (uint16_t)(txq->nb_tx_desc - 1);
1193 for (i = 0; i < txq->nb_tx_desc; i++) {
1194 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1196 txd->wb.status = E1000_TXD_STAT_DD;
1199 txe[prev].next_id = i;
1203 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1204 /* 82575 specific, each tx queue will use 2 hw contexts */
1205 if (hw->mac.type == e1000_82575)
1206 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1208 igb_reset_tx_queue_stat(txq);
1212 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1215 unsigned int socket_id,
1216 const struct rte_eth_txconf *tx_conf)
1218 const struct rte_memzone *tz;
1219 struct igb_tx_queue *txq;
1220 struct e1000_hw *hw;
1223 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1226 * Validate number of transmit descriptors.
1227 * It must not exceed hardware maximum, and must be multiple
1230 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1231 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1236 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1239 if (tx_conf->tx_free_thresh != 0)
1240 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1241 "used for the 1G driver.");
1242 if (tx_conf->tx_rs_thresh != 0)
1243 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1244 "used for the 1G driver.");
1245 if (tx_conf->tx_thresh.wthresh == 0)
1246 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1247 "consider setting the TX WTHRESH value to 4, 8, "
1250 /* Free memory prior to re-allocation if needed */
1251 if (dev->data->tx_queues[queue_idx] != NULL) {
1252 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1253 dev->data->tx_queues[queue_idx] = NULL;
1256 /* First allocate the tx queue data structure */
1257 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1258 RTE_CACHE_LINE_SIZE);
1263 * Allocate TX ring hardware descriptors. A memzone large enough to
1264 * handle the maximum ring size is allocated in order to allow for
1265 * resizing in later calls to the queue setup function.
1267 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1268 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1271 igb_tx_queue_release(txq);
1275 txq->nb_tx_desc = nb_desc;
1276 txq->pthresh = tx_conf->tx_thresh.pthresh;
1277 txq->hthresh = tx_conf->tx_thresh.hthresh;
1278 txq->wthresh = tx_conf->tx_thresh.wthresh;
1279 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1281 txq->queue_id = queue_idx;
1282 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1283 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1284 txq->port_id = dev->data->port_id;
1286 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1287 #ifndef RTE_LIBRTE_XEN_DOM0
1288 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1290 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1292 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1293 /* Allocate software ring */
1294 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1295 sizeof(struct igb_tx_entry) * nb_desc,
1296 RTE_CACHE_LINE_SIZE);
1297 if (txq->sw_ring == NULL) {
1298 igb_tx_queue_release(txq);
1301 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1302 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1304 igb_reset_tx_queue(txq, dev);
1305 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1306 dev->data->tx_queues[queue_idx] = txq;
1312 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1316 if (rxq->sw_ring != NULL) {
1317 for (i = 0; i < rxq->nb_rx_desc; i++) {
1318 if (rxq->sw_ring[i].mbuf != NULL) {
1319 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1320 rxq->sw_ring[i].mbuf = NULL;
1327 igb_rx_queue_release(struct igb_rx_queue *rxq)
1330 igb_rx_queue_release_mbufs(rxq);
1331 rte_free(rxq->sw_ring);
1337 eth_igb_rx_queue_release(void *rxq)
1339 igb_rx_queue_release(rxq);
1343 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1345 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1349 /* Zero out HW ring memory */
1350 for (i = 0; i < rxq->nb_rx_desc; i++) {
1351 rxq->rx_ring[i] = zeroed_desc;
1355 rxq->pkt_first_seg = NULL;
1356 rxq->pkt_last_seg = NULL;
1360 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1363 unsigned int socket_id,
1364 const struct rte_eth_rxconf *rx_conf,
1365 struct rte_mempool *mp)
1367 const struct rte_memzone *rz;
1368 struct igb_rx_queue *rxq;
1369 struct e1000_hw *hw;
1372 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1375 * Validate number of receive descriptors.
1376 * It must not exceed hardware maximum, and must be multiple
1379 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1380 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1384 /* Free memory prior to re-allocation if needed */
1385 if (dev->data->rx_queues[queue_idx] != NULL) {
1386 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1387 dev->data->rx_queues[queue_idx] = NULL;
1390 /* First allocate the RX queue data structure. */
1391 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1392 RTE_CACHE_LINE_SIZE);
1396 rxq->nb_rx_desc = nb_desc;
1397 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1398 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1399 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1400 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1402 rxq->drop_en = rx_conf->rx_drop_en;
1403 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1404 rxq->queue_id = queue_idx;
1405 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1406 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1407 rxq->port_id = dev->data->port_id;
1408 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1412 * Allocate RX ring hardware descriptors. A memzone large enough to
1413 * handle the maximum ring size is allocated in order to allow for
1414 * resizing in later calls to the queue setup function.
1416 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1417 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1419 igb_rx_queue_release(rxq);
1422 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1423 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1424 #ifndef RTE_LIBRTE_XEN_DOM0
1425 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1427 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1429 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1431 /* Allocate software ring. */
1432 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1433 sizeof(struct igb_rx_entry) * nb_desc,
1434 RTE_CACHE_LINE_SIZE);
1435 if (rxq->sw_ring == NULL) {
1436 igb_rx_queue_release(rxq);
1439 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1440 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1442 dev->data->rx_queues[queue_idx] = rxq;
1443 igb_reset_rx_queue(rxq);
1449 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1451 #define IGB_RXQ_SCAN_INTERVAL 4
1452 volatile union e1000_adv_rx_desc *rxdp;
1453 struct igb_rx_queue *rxq;
1456 if (rx_queue_id >= dev->data->nb_rx_queues) {
1457 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1461 rxq = dev->data->rx_queues[rx_queue_id];
1462 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1464 while ((desc < rxq->nb_rx_desc) &&
1465 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1466 desc += IGB_RXQ_SCAN_INTERVAL;
1467 rxdp += IGB_RXQ_SCAN_INTERVAL;
1468 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1469 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1470 desc - rxq->nb_rx_desc]);
1477 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1479 volatile union e1000_adv_rx_desc *rxdp;
1480 struct igb_rx_queue *rxq = rx_queue;
1483 if (unlikely(offset >= rxq->nb_rx_desc))
1485 desc = rxq->rx_tail + offset;
1486 if (desc >= rxq->nb_rx_desc)
1487 desc -= rxq->nb_rx_desc;
1489 rxdp = &rxq->rx_ring[desc];
1490 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1494 igb_dev_clear_queues(struct rte_eth_dev *dev)
1497 struct igb_tx_queue *txq;
1498 struct igb_rx_queue *rxq;
1500 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1501 txq = dev->data->tx_queues[i];
1503 igb_tx_queue_release_mbufs(txq);
1504 igb_reset_tx_queue(txq, dev);
1508 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1509 rxq = dev->data->rx_queues[i];
1511 igb_rx_queue_release_mbufs(rxq);
1512 igb_reset_rx_queue(rxq);
1518 * Receive Side Scaling (RSS).
1519 * See section 7.1.1.7 in the following document:
1520 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1523 * The source and destination IP addresses of the IP header and the source and
1524 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1525 * against a configurable random key to compute a 32-bit RSS hash result.
1526 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1527 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1528 * RSS output index which is used as the RX queue index where to store the
1530 * The following output is supplied in the RX write-back descriptor:
1531 * - 32-bit result of the Microsoft RSS hash function,
1532 * - 4-bit RSS type field.
1536 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1537 * Used as the default key.
1539 static uint8_t rss_intel_key[40] = {
1540 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1541 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1542 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1543 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1544 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1548 igb_rss_disable(struct rte_eth_dev *dev)
1550 struct e1000_hw *hw;
1553 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1555 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1556 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1560 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1568 hash_key = rss_conf->rss_key;
1569 if (hash_key != NULL) {
1570 /* Fill in RSS hash key */
1571 for (i = 0; i < 10; i++) {
1572 rss_key = hash_key[(i * 4)];
1573 rss_key |= hash_key[(i * 4) + 1] << 8;
1574 rss_key |= hash_key[(i * 4) + 2] << 16;
1575 rss_key |= hash_key[(i * 4) + 3] << 24;
1576 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1580 /* Set configured hashing protocols in MRQC register */
1581 rss_hf = rss_conf->rss_hf;
1582 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1583 if (rss_hf & ETH_RSS_IPV4)
1584 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1585 if (rss_hf & ETH_RSS_IPV4_TCP)
1586 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1587 if (rss_hf & ETH_RSS_IPV6)
1588 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1589 if (rss_hf & ETH_RSS_IPV6_EX)
1590 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1591 if (rss_hf & ETH_RSS_IPV6_TCP)
1592 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1593 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1594 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1595 if (rss_hf & ETH_RSS_IPV4_UDP)
1596 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1597 if (rss_hf & ETH_RSS_IPV6_UDP)
1598 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1599 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1600 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1601 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1605 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1606 struct rte_eth_rss_conf *rss_conf)
1608 struct e1000_hw *hw;
1612 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1615 * Before changing anything, first check that the update RSS operation
1616 * does not attempt to disable RSS, if RSS was enabled at
1617 * initialization time, or does not attempt to enable RSS, if RSS was
1618 * disabled at initialization time.
1620 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1621 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1622 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1623 if (rss_hf != 0) /* Enable RSS */
1625 return 0; /* Nothing to do */
1628 if (rss_hf == 0) /* Disable RSS */
1630 igb_hw_rss_hash_set(hw, rss_conf);
1634 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1635 struct rte_eth_rss_conf *rss_conf)
1637 struct e1000_hw *hw;
1644 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1645 hash_key = rss_conf->rss_key;
1646 if (hash_key != NULL) {
1647 /* Return RSS hash key */
1648 for (i = 0; i < 10; i++) {
1649 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1650 hash_key[(i * 4)] = rss_key & 0x000000FF;
1651 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1652 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1653 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1657 /* Get RSS functions configured in MRQC register */
1658 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1659 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1660 rss_conf->rss_hf = 0;
1664 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1665 rss_hf |= ETH_RSS_IPV4;
1666 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1667 rss_hf |= ETH_RSS_IPV4_TCP;
1668 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1669 rss_hf |= ETH_RSS_IPV6;
1670 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1671 rss_hf |= ETH_RSS_IPV6_EX;
1672 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1673 rss_hf |= ETH_RSS_IPV6_TCP;
1674 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1675 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1676 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1677 rss_hf |= ETH_RSS_IPV4_UDP;
1678 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1679 rss_hf |= ETH_RSS_IPV6_UDP;
1680 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1681 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1682 rss_conf->rss_hf = rss_hf;
1687 igb_rss_configure(struct rte_eth_dev *dev)
1689 struct rte_eth_rss_conf rss_conf;
1690 struct e1000_hw *hw;
1694 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696 /* Fill in redirection table. */
1697 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1698 for (i = 0; i < 128; i++) {
1705 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1706 i % dev->data->nb_rx_queues : 0);
1707 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1709 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1713 * Configure the RSS key and the RSS protocols used to compute
1714 * the RSS hash of input packets.
1716 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1717 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1718 igb_rss_disable(dev);
1721 if (rss_conf.rss_key == NULL)
1722 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1723 igb_hw_rss_hash_set(hw, &rss_conf);
1727 * Check if the mac type support VMDq or not.
1728 * Return 1 if it supports, otherwise, return 0.
1731 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1733 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1735 switch (hw->mac.type) {
1756 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1762 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1764 struct rte_eth_vmdq_rx_conf *cfg;
1765 struct e1000_hw *hw;
1766 uint32_t mrqc, vt_ctl, vmolr, rctl;
1769 PMD_INIT_FUNC_TRACE();
1771 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1774 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1775 if (igb_is_vmdq_supported(dev) == 0)
1778 igb_rss_disable(dev);
1780 /* RCTL: eanble VLAN filter */
1781 rctl = E1000_READ_REG(hw, E1000_RCTL);
1782 rctl |= E1000_RCTL_VFE;
1783 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1785 /* MRQC: enable vmdq */
1786 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1787 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1788 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1790 /* VTCTL: pool selection according to VLAN tag */
1791 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1792 if (cfg->enable_default_pool)
1793 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1794 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1795 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1797 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1798 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1799 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1800 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1803 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1804 vmolr |= E1000_VMOLR_AUPE;
1805 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1806 vmolr |= E1000_VMOLR_ROMPE;
1807 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1808 vmolr |= E1000_VMOLR_ROPE;
1809 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1810 vmolr |= E1000_VMOLR_BAM;
1811 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1812 vmolr |= E1000_VMOLR_MPME;
1814 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1818 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1819 * Both 82576 and 82580 support it
1821 if (hw->mac.type != e1000_i350) {
1822 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1823 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1824 vmolr |= E1000_VMOLR_STRVLAN;
1825 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1829 /* VFTA - enable all vlan filters */
1830 for (i = 0; i < IGB_VFTA_SIZE; i++)
1831 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1833 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1834 if (hw->mac.type != e1000_82580)
1835 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1838 * RAH/RAL - allow pools to read specific mac addresses
1839 * In this case, all pools should be able to read from mac addr 0
1841 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1842 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1844 /* VLVF: set up filters for vlan tags as configured */
1845 for (i = 0; i < cfg->nb_pool_maps; i++) {
1846 /* set vlan id in VF register and set the valid bit */
1847 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1848 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1849 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1850 E1000_VLVF_POOLSEL_MASK)));
1853 E1000_WRITE_FLUSH(hw);
1859 /*********************************************************************
1861 * Enable receive unit.
1863 **********************************************************************/
1866 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1868 struct igb_rx_entry *rxe = rxq->sw_ring;
1872 /* Initialize software ring entries. */
1873 for (i = 0; i < rxq->nb_rx_desc; i++) {
1874 volatile union e1000_adv_rx_desc *rxd;
1875 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1878 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1879 "queue_id=%hu", rxq->queue_id);
1883 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1884 rxd = &rxq->rx_ring[i];
1885 rxd->read.hdr_addr = dma_addr;
1886 rxd->read.pkt_addr = dma_addr;
1893 #define E1000_MRQC_DEF_Q_SHIFT (3)
1895 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1897 struct e1000_hw *hw =
1898 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1901 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1903 * SRIOV active scheme
1904 * FIXME if support RSS together with VMDq & SRIOV
1906 mrqc = E1000_MRQC_ENABLE_VMDQ;
1907 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1908 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1909 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1910 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1912 * SRIOV inactive scheme
1914 switch (dev->data->dev_conf.rxmode.mq_mode) {
1916 igb_rss_configure(dev);
1918 case ETH_MQ_RX_VMDQ_ONLY:
1919 /*Configure general VMDQ only RX parameters*/
1920 igb_vmdq_rx_hw_configure(dev);
1922 case ETH_MQ_RX_NONE:
1923 /* if mq_mode is none, disable rss mode.*/
1925 igb_rss_disable(dev);
1934 eth_igb_rx_init(struct rte_eth_dev *dev)
1936 struct e1000_hw *hw;
1937 struct igb_rx_queue *rxq;
1938 struct rte_pktmbuf_pool_private *mbp_priv;
1943 uint16_t rctl_bsize;
1947 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1951 * Make sure receives are disabled while setting
1952 * up the descriptor ring.
1954 rctl = E1000_READ_REG(hw, E1000_RCTL);
1955 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1958 * Configure support of jumbo frames, if any.
1960 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1961 rctl |= E1000_RCTL_LPE;
1964 * Set maximum packet length by default, and might be updated
1965 * together with enabling/disabling dual VLAN.
1967 E1000_WRITE_REG(hw, E1000_RLPML,
1968 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1971 rctl &= ~E1000_RCTL_LPE;
1973 /* Configure and enable each RX queue. */
1975 dev->rx_pkt_burst = eth_igb_recv_pkts;
1976 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1980 rxq = dev->data->rx_queues[i];
1982 /* Allocate buffers for descriptor rings and set up queue */
1983 ret = igb_alloc_rx_queue_mbufs(rxq);
1988 * Reset crc_len in case it was changed after queue setup by a
1992 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1995 bus_addr = rxq->rx_ring_phys_addr;
1996 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1998 sizeof(union e1000_adv_rx_desc));
1999 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2000 (uint32_t)(bus_addr >> 32));
2001 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2003 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2006 * Configure RX buffer size.
2008 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2009 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2010 RTE_PKTMBUF_HEADROOM);
2011 if (buf_size >= 1024) {
2013 * Configure the BSIZEPACKET field of the SRRCTL
2014 * register of the queue.
2015 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2016 * If this field is equal to 0b, then RCTL.BSIZE
2017 * determines the RX packet buffer size.
2019 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2020 E1000_SRRCTL_BSIZEPKT_MASK);
2021 buf_size = (uint16_t) ((srrctl &
2022 E1000_SRRCTL_BSIZEPKT_MASK) <<
2023 E1000_SRRCTL_BSIZEPKT_SHIFT);
2025 /* It adds dual VLAN length for supporting dual VLAN */
2026 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2027 2 * VLAN_TAG_SIZE) > buf_size){
2028 if (!dev->data->scattered_rx)
2030 "forcing scatter mode");
2031 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2032 dev->data->scattered_rx = 1;
2036 * Use BSIZE field of the device RCTL register.
2038 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2039 rctl_bsize = buf_size;
2040 if (!dev->data->scattered_rx)
2041 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2042 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2043 dev->data->scattered_rx = 1;
2046 /* Set if packets are dropped when no descriptors available */
2048 srrctl |= E1000_SRRCTL_DROP_EN;
2050 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2052 /* Enable this RX queue. */
2053 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2054 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2055 rxdctl &= 0xFFF00000;
2056 rxdctl |= (rxq->pthresh & 0x1F);
2057 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2058 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2059 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2062 if (dev->data->dev_conf.rxmode.enable_scatter) {
2063 if (!dev->data->scattered_rx)
2064 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2065 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2066 dev->data->scattered_rx = 1;
2070 * Setup BSIZE field of RCTL register, if needed.
2071 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2072 * register, since the code above configures the SRRCTL register of
2073 * the RX queue in such a case.
2074 * All configurable sizes are:
2075 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2076 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2077 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2078 * 2048: rctl |= E1000_RCTL_SZ_2048;
2079 * 1024: rctl |= E1000_RCTL_SZ_1024;
2080 * 512: rctl |= E1000_RCTL_SZ_512;
2081 * 256: rctl |= E1000_RCTL_SZ_256;
2083 if (rctl_bsize > 0) {
2084 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2085 rctl |= E1000_RCTL_SZ_512;
2086 else /* 256 <= buf_size < 512 - use 256 */
2087 rctl |= E1000_RCTL_SZ_256;
2091 * Configure RSS if device configured with multiple RX queues.
2093 igb_dev_mq_rx_configure(dev);
2095 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2096 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2099 * Setup the Checksum Register.
2100 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2102 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2103 rxcsum |= E1000_RXCSUM_PCSD;
2105 /* Enable both L3/L4 rx checksum offload */
2106 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2107 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2109 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2110 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2112 /* Setup the Receive Control Register. */
2113 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2114 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2116 /* set STRCRC bit in all queues */
2117 if (hw->mac.type == e1000_i350 ||
2118 hw->mac.type == e1000_i210 ||
2119 hw->mac.type == e1000_i211 ||
2120 hw->mac.type == e1000_i354) {
2121 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2122 rxq = dev->data->rx_queues[i];
2123 uint32_t dvmolr = E1000_READ_REG(hw,
2124 E1000_DVMOLR(rxq->reg_idx));
2125 dvmolr |= E1000_DVMOLR_STRCRC;
2126 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2130 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2132 /* clear STRCRC bit in all queues */
2133 if (hw->mac.type == e1000_i350 ||
2134 hw->mac.type == e1000_i210 ||
2135 hw->mac.type == e1000_i211 ||
2136 hw->mac.type == e1000_i354) {
2137 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2138 rxq = dev->data->rx_queues[i];
2139 uint32_t dvmolr = E1000_READ_REG(hw,
2140 E1000_DVMOLR(rxq->reg_idx));
2141 dvmolr &= ~E1000_DVMOLR_STRCRC;
2142 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2147 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2148 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2149 E1000_RCTL_RDMTS_HALF |
2150 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2152 /* Make sure VLAN Filters are off. */
2153 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2154 rctl &= ~E1000_RCTL_VFE;
2155 /* Don't store bad packets. */
2156 rctl &= ~E1000_RCTL_SBP;
2158 /* Enable Receives. */
2159 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2162 * Setup the HW Rx Head and Tail Descriptor Pointers.
2163 * This needs to be done after enable.
2165 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2166 rxq = dev->data->rx_queues[i];
2167 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2168 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2174 /*********************************************************************
2176 * Enable transmit unit.
2178 **********************************************************************/
2180 eth_igb_tx_init(struct rte_eth_dev *dev)
2182 struct e1000_hw *hw;
2183 struct igb_tx_queue *txq;
2188 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2190 /* Setup the Base and Length of the Tx Descriptor Rings. */
2191 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2193 txq = dev->data->tx_queues[i];
2194 bus_addr = txq->tx_ring_phys_addr;
2196 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2198 sizeof(union e1000_adv_tx_desc));
2199 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2200 (uint32_t)(bus_addr >> 32));
2201 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2203 /* Setup the HW Tx Head and Tail descriptor pointers. */
2204 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2205 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2207 /* Setup Transmit threshold registers. */
2208 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2209 txdctl |= txq->pthresh & 0x1F;
2210 txdctl |= ((txq->hthresh & 0x1F) << 8);
2211 txdctl |= ((txq->wthresh & 0x1F) << 16);
2212 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2213 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2216 /* Program the Transmit Control Register. */
2217 tctl = E1000_READ_REG(hw, E1000_TCTL);
2218 tctl &= ~E1000_TCTL_CT;
2219 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2220 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2222 e1000_config_collision_dist(hw);
2224 /* This write will effectively turn on the transmit unit. */
2225 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2228 /*********************************************************************
2230 * Enable VF receive unit.
2232 **********************************************************************/
2234 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2236 struct e1000_hw *hw;
2237 struct igb_rx_queue *rxq;
2238 struct rte_pktmbuf_pool_private *mbp_priv;
2241 uint16_t rctl_bsize;
2245 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2248 e1000_rlpml_set_vf(hw,
2249 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2252 /* Configure and enable each RX queue. */
2254 dev->rx_pkt_burst = eth_igb_recv_pkts;
2255 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2259 rxq = dev->data->rx_queues[i];
2261 /* Allocate buffers for descriptor rings and set up queue */
2262 ret = igb_alloc_rx_queue_mbufs(rxq);
2266 bus_addr = rxq->rx_ring_phys_addr;
2267 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2269 sizeof(union e1000_adv_rx_desc));
2270 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2271 (uint32_t)(bus_addr >> 32));
2272 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2274 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2277 * Configure RX buffer size.
2279 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2280 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2281 RTE_PKTMBUF_HEADROOM);
2282 if (buf_size >= 1024) {
2284 * Configure the BSIZEPACKET field of the SRRCTL
2285 * register of the queue.
2286 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2287 * If this field is equal to 0b, then RCTL.BSIZE
2288 * determines the RX packet buffer size.
2290 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2291 E1000_SRRCTL_BSIZEPKT_MASK);
2292 buf_size = (uint16_t) ((srrctl &
2293 E1000_SRRCTL_BSIZEPKT_MASK) <<
2294 E1000_SRRCTL_BSIZEPKT_SHIFT);
2296 /* It adds dual VLAN length for supporting dual VLAN */
2297 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2298 2 * VLAN_TAG_SIZE) > buf_size){
2299 if (!dev->data->scattered_rx)
2301 "forcing scatter mode");
2302 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2303 dev->data->scattered_rx = 1;
2307 * Use BSIZE field of the device RCTL register.
2309 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2310 rctl_bsize = buf_size;
2311 if (!dev->data->scattered_rx)
2312 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2313 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2314 dev->data->scattered_rx = 1;
2317 /* Set if packets are dropped when no descriptors available */
2319 srrctl |= E1000_SRRCTL_DROP_EN;
2321 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2323 /* Enable this RX queue. */
2324 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2325 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2326 rxdctl &= 0xFFF00000;
2327 rxdctl |= (rxq->pthresh & 0x1F);
2328 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2329 if (hw->mac.type == e1000_vfadapt) {
2331 * Workaround of 82576 VF Erratum
2332 * force set WTHRESH to 1
2333 * to avoid Write-Back not triggered sometimes
2336 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2339 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2340 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2343 if (dev->data->dev_conf.rxmode.enable_scatter) {
2344 if (!dev->data->scattered_rx)
2345 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2346 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2347 dev->data->scattered_rx = 1;
2351 * Setup the HW Rx Head and Tail Descriptor Pointers.
2352 * This needs to be done after enable.
2354 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2355 rxq = dev->data->rx_queues[i];
2356 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2357 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2363 /*********************************************************************
2365 * Enable VF transmit unit.
2367 **********************************************************************/
2369 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2371 struct e1000_hw *hw;
2372 struct igb_tx_queue *txq;
2376 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2378 /* Setup the Base and Length of the Tx Descriptor Rings. */
2379 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2382 txq = dev->data->tx_queues[i];
2383 bus_addr = txq->tx_ring_phys_addr;
2384 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2386 sizeof(union e1000_adv_tx_desc));
2387 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2388 (uint32_t)(bus_addr >> 32));
2389 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2391 /* Setup the HW Tx Head and Tail descriptor pointers. */
2392 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2393 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2395 /* Setup Transmit threshold registers. */
2396 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2397 txdctl |= txq->pthresh & 0x1F;
2398 txdctl |= ((txq->hthresh & 0x1F) << 8);
2399 if (hw->mac.type == e1000_82576) {
2401 * Workaround of 82576 VF Erratum
2402 * force set WTHRESH to 1
2403 * to avoid Write-Back not triggered sometimes
2406 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2409 txdctl |= ((txq->wthresh & 0x1F) << 16);
2410 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2411 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);