4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 #define IGB_RSS_OFFLOAD_ALL ( \
82 ETH_RSS_IPV6_TCP_EX | \
87 static inline struct rte_mbuf *
88 rte_rxmbuf_alloc(struct rte_mempool *mp)
92 m = __rte_mbuf_raw_alloc(mp);
93 __rte_mbuf_sanity_check_raw(m, 0);
97 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
98 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
100 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
101 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
104 * Structure associated with each descriptor of the RX ring of a RX queue.
106 struct igb_rx_entry {
107 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
111 * Structure associated with each descriptor of the TX ring of a TX queue.
113 struct igb_tx_entry {
114 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
115 uint16_t next_id; /**< Index of next descriptor in ring. */
116 uint16_t last_id; /**< Index of last scattered descriptor. */
120 * Structure associated with each RX queue.
122 struct igb_rx_queue {
123 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
124 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
125 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
126 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
127 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
128 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
129 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
130 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
131 uint16_t nb_rx_desc; /**< number of RX descriptors. */
132 uint16_t rx_tail; /**< current value of RDT register. */
133 uint16_t nb_rx_hold; /**< number of held free RX desc. */
134 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
135 uint16_t queue_id; /**< RX queue index. */
136 uint16_t reg_idx; /**< RX queue register index. */
137 uint8_t port_id; /**< Device port identifier. */
138 uint8_t pthresh; /**< Prefetch threshold register. */
139 uint8_t hthresh; /**< Host threshold register. */
140 uint8_t wthresh; /**< Write-back threshold register. */
141 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
142 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
146 * Hardware context number
148 enum igb_advctx_num {
149 IGB_CTX_0 = 0, /**< CTX0 */
150 IGB_CTX_1 = 1, /**< CTX1 */
151 IGB_CTX_NUM = 2, /**< CTX_NUM */
154 /** Offload features */
155 union igb_vlan_macip {
158 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
160 /**< VLAN Tag Control Identifier (CPU order). */
165 * Compare mask for vlan_macip_len.data,
166 * should be in sync with igb_vlan_macip.f layout.
168 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
169 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
170 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
171 /** MAC+IP length. */
172 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
175 * Strucutre to check if new context need be built
177 struct igb_advctx_info {
178 uint64_t flags; /**< ol_flags related to context build. */
179 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
180 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
184 * Structure associated with each TX queue.
186 struct igb_tx_queue {
187 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
188 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
189 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
190 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
191 uint32_t txd_type; /**< Device-specific TXD type */
192 uint16_t nb_tx_desc; /**< number of TX descriptors. */
193 uint16_t tx_tail; /**< Current value of TDT register. */
195 /**< Index of first used TX descriptor. */
196 uint16_t queue_id; /**< TX queue index. */
197 uint16_t reg_idx; /**< TX queue register index. */
198 uint8_t port_id; /**< Device port identifier. */
199 uint8_t pthresh; /**< Prefetch threshold register. */
200 uint8_t hthresh; /**< Host threshold register. */
201 uint8_t wthresh; /**< Write-back threshold register. */
203 /**< Current used hardware descriptor. */
205 /**< Start context position for transmit queue. */
206 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
207 /**< Hardware context history.*/
211 #define RTE_PMD_USE_PREFETCH
214 #ifdef RTE_PMD_USE_PREFETCH
215 #define rte_igb_prefetch(p) rte_prefetch0(p)
217 #define rte_igb_prefetch(p) do {} while(0)
220 #ifdef RTE_PMD_PACKET_PREFETCH
221 #define rte_packet_prefetch(p) rte_prefetch1(p)
223 #define rte_packet_prefetch(p) do {} while(0)
227 * Macro for VMDq feature for 1 GbE NIC.
229 #define E1000_VMOLR_SIZE (8)
231 /*********************************************************************
235 **********************************************************************/
238 * Advanced context descriptor are almost same between igb/ixgbe
239 * This is a separate function, looking for optimization opportunity here
240 * Rework required to go with the pre-defined values.
244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245 volatile struct e1000_adv_tx_context_desc *ctx_txd,
246 uint64_t ol_flags, uint32_t vlan_macip_lens)
248 uint32_t type_tucmd_mlhl;
249 uint32_t mss_l4len_idx;
250 uint32_t ctx_idx, ctx_curr;
253 ctx_curr = txq->ctx_curr;
254 ctx_idx = ctx_curr + txq->ctx_start;
259 if (ol_flags & PKT_TX_VLAN_PKT) {
260 cmp_mask |= TX_VLAN_CMP_MASK;
263 if (ol_flags & PKT_TX_IP_CKSUM) {
264 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
265 cmp_mask |= TX_MAC_LEN_CMP_MASK;
268 /* Specify which HW CTX to upload. */
269 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
270 switch (ol_flags & PKT_TX_L4_MASK) {
271 case PKT_TX_UDP_CKSUM:
272 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
273 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
275 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
277 case PKT_TX_TCP_CKSUM:
278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
279 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
283 case PKT_TX_SCTP_CKSUM:
284 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
286 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
287 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
290 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
291 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
295 txq->ctx_cache[ctx_curr].flags = ol_flags;
296 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
297 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
298 vlan_macip_lens & cmp_mask;
300 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
301 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
302 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
303 ctx_txd->seqnum_seed = 0;
307 * Check which hardware context can be used. Use the existing match
308 * or create a new context descriptor.
310 static inline uint32_t
311 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
312 uint32_t vlan_macip_lens)
314 /* If match with the current context */
315 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
316 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
317 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
318 return txq->ctx_curr;
321 /* If match with the second context */
323 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
324 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
325 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
326 return txq->ctx_curr;
329 /* Mismatch, use the previous context */
330 return (IGB_CTX_NUM);
333 static inline uint32_t
334 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
336 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
337 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
340 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
341 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
345 static inline uint32_t
346 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
348 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
349 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
353 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
356 struct igb_tx_queue *txq;
357 struct igb_tx_entry *sw_ring;
358 struct igb_tx_entry *txe, *txn;
359 volatile union e1000_adv_tx_desc *txr;
360 volatile union e1000_adv_tx_desc *txd;
361 struct rte_mbuf *tx_pkt;
362 struct rte_mbuf *m_seg;
363 union igb_vlan_macip vlan_macip_lens;
364 uint64_t buf_dma_addr;
365 uint32_t olinfo_status;
366 uint32_t cmd_type_len;
375 uint32_t new_ctx = 0;
379 sw_ring = txq->sw_ring;
381 tx_id = txq->tx_tail;
382 txe = &sw_ring[tx_id];
384 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
386 pkt_len = tx_pkt->pkt_len;
388 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
391 * The number of descriptors that must be allocated for a
392 * packet is the number of segments of that packet, plus 1
393 * Context Descriptor for the VLAN Tag Identifier, if any.
394 * Determine the last TX descriptor to allocate in the TX ring
395 * for the packet, starting from the current position (tx_id)
398 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
400 ol_flags = tx_pkt->ol_flags;
401 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
402 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
403 tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
405 /* If a Context Descriptor need be built . */
407 ctx = what_advctx_update(txq, tx_ol_req,
408 vlan_macip_lens.data);
409 /* Only allocate context descriptor if required*/
410 new_ctx = (ctx == IGB_CTX_NUM);
412 tx_last = (uint16_t) (tx_last + new_ctx);
414 if (tx_last >= txq->nb_tx_desc)
415 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
417 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
418 " tx_first=%u tx_last=%u",
419 (unsigned) txq->port_id,
420 (unsigned) txq->queue_id,
426 * Check if there are enough free descriptors in the TX ring
427 * to transmit the next packet.
428 * This operation is based on the two following rules:
430 * 1- Only check that the last needed TX descriptor can be
431 * allocated (by construction, if that descriptor is free,
432 * all intermediate ones are also free).
434 * For this purpose, the index of the last TX descriptor
435 * used for a packet (the "last descriptor" of a packet)
436 * is recorded in the TX entries (the last one included)
437 * that are associated with all TX descriptors allocated
440 * 2- Avoid to allocate the last free TX descriptor of the
441 * ring, in order to never set the TDT register with the
442 * same value stored in parallel by the NIC in the TDH
443 * register, which makes the TX engine of the NIC enter
444 * in a deadlock situation.
446 * By extension, avoid to allocate a free descriptor that
447 * belongs to the last set of free descriptors allocated
448 * to the same packet previously transmitted.
452 * The "last descriptor" of the previously sent packet, if any,
453 * which used the last descriptor to allocate.
455 tx_end = sw_ring[tx_last].last_id;
458 * The next descriptor following that "last descriptor" in the
461 tx_end = sw_ring[tx_end].next_id;
464 * The "last descriptor" associated with that next descriptor.
466 tx_end = sw_ring[tx_end].last_id;
469 * Check that this descriptor is free.
471 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
478 * Set common flags of all TX Data Descriptors.
480 * The following bits must be set in all Data Descriptors:
481 * - E1000_ADVTXD_DTYP_DATA
482 * - E1000_ADVTXD_DCMD_DEXT
484 * The following bits must be set in the first Data Descriptor
485 * and are ignored in the other ones:
486 * - E1000_ADVTXD_DCMD_IFCS
487 * - E1000_ADVTXD_MAC_1588
488 * - E1000_ADVTXD_DCMD_VLE
490 * The following bits must only be set in the last Data
492 * - E1000_TXD_CMD_EOP
494 * The following bits can be set in any Data Descriptor, but
495 * are only set in the last Data Descriptor:
498 cmd_type_len = txq->txd_type |
499 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
500 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
501 #if defined(RTE_LIBRTE_IEEE1588)
502 if (ol_flags & PKT_TX_IEEE1588_TMST)
503 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
506 /* Setup TX Advanced context descriptor if required */
508 volatile struct e1000_adv_tx_context_desc *
511 ctx_txd = (volatile struct
512 e1000_adv_tx_context_desc *)
515 txn = &sw_ring[txe->next_id];
516 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
518 if (txe->mbuf != NULL) {
519 rte_pktmbuf_free_seg(txe->mbuf);
523 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
524 vlan_macip_lens.data);
526 txe->last_id = tx_last;
527 tx_id = txe->next_id;
531 /* Setup the TX Advanced Data Descriptor */
532 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
533 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
534 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
539 txn = &sw_ring[txe->next_id];
542 if (txe->mbuf != NULL)
543 rte_pktmbuf_free_seg(txe->mbuf);
547 * Set up transmit descriptor.
549 slen = (uint16_t) m_seg->data_len;
550 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
551 txd->read.buffer_addr =
552 rte_cpu_to_le_64(buf_dma_addr);
553 txd->read.cmd_type_len =
554 rte_cpu_to_le_32(cmd_type_len | slen);
555 txd->read.olinfo_status =
556 rte_cpu_to_le_32(olinfo_status);
557 txe->last_id = tx_last;
558 tx_id = txe->next_id;
561 } while (m_seg != NULL);
564 * The last packet data descriptor needs End Of Packet (EOP)
565 * and Report Status (RS).
567 txd->read.cmd_type_len |=
568 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
574 * Set the Transmit Descriptor Tail (TDT).
576 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
577 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
578 (unsigned) txq->port_id, (unsigned) txq->queue_id,
579 (unsigned) tx_id, (unsigned) nb_tx);
580 txq->tx_tail = tx_id;
585 /*********************************************************************
589 **********************************************************************/
590 static inline uint64_t
591 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
595 static uint64_t ip_pkt_types_map[16] = {
596 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
597 PKT_RX_IPV6_HDR, 0, 0, 0,
598 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
599 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
602 #if defined(RTE_LIBRTE_IEEE1588)
603 static uint32_t ip_pkt_etqf_map[8] = {
604 0, 0, 0, PKT_RX_IEEE1588_PTP,
608 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
609 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
610 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
612 pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
613 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
615 return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
618 static inline uint64_t
619 rx_desc_status_to_pkt_flags(uint32_t rx_status)
623 /* Check if VLAN present */
624 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
626 #if defined(RTE_LIBRTE_IEEE1588)
627 if (rx_status & E1000_RXD_STAT_TMST)
628 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
633 static inline uint64_t
634 rx_desc_error_to_pkt_flags(uint32_t rx_status)
637 * Bit 30: IPE, IPv4 checksum error
638 * Bit 29: L4I, L4I integrity error
641 static uint64_t error_to_pkt_flags_map[4] = {
642 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
643 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
645 return error_to_pkt_flags_map[(rx_status >>
646 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
650 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
653 struct igb_rx_queue *rxq;
654 volatile union e1000_adv_rx_desc *rx_ring;
655 volatile union e1000_adv_rx_desc *rxdp;
656 struct igb_rx_entry *sw_ring;
657 struct igb_rx_entry *rxe;
658 struct rte_mbuf *rxm;
659 struct rte_mbuf *nmb;
660 union e1000_adv_rx_desc rxd;
663 uint32_t hlen_type_rss;
673 rx_id = rxq->rx_tail;
674 rx_ring = rxq->rx_ring;
675 sw_ring = rxq->sw_ring;
676 while (nb_rx < nb_pkts) {
678 * The order of operations here is important as the DD status
679 * bit must not be read after any other descriptor fields.
680 * rx_ring and rxdp are pointing to volatile data so the order
681 * of accesses cannot be reordered by the compiler. If they were
682 * not volatile, they could be reordered which could lead to
683 * using invalid descriptor fields when read from rxd.
685 rxdp = &rx_ring[rx_id];
686 staterr = rxdp->wb.upper.status_error;
687 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
694 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
695 * likely to be invalid and to be dropped by the various
696 * validation checks performed by the network stack.
698 * Allocate a new mbuf to replenish the RX ring descriptor.
699 * If the allocation fails:
700 * - arrange for that RX descriptor to be the first one
701 * being parsed the next time the receive function is
702 * invoked [on the same queue].
704 * - Stop parsing the RX ring and return immediately.
706 * This policy do not drop the packet received in the RX
707 * descriptor for which the allocation of a new mbuf failed.
708 * Thus, it allows that packet to be later retrieved if
709 * mbuf have been freed in the mean time.
710 * As a side effect, holding RX descriptors instead of
711 * systematically giving them back to the NIC may lead to
712 * RX ring exhaustion situations.
713 * However, the NIC can gracefully prevent such situations
714 * to happen by sending specific "back-pressure" flow control
715 * frames to its peer(s).
717 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
718 "staterr=0x%x pkt_len=%u",
719 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
720 (unsigned) rx_id, (unsigned) staterr,
721 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
723 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
725 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
726 "queue_id=%u", (unsigned) rxq->port_id,
727 (unsigned) rxq->queue_id);
728 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
733 rxe = &sw_ring[rx_id];
735 if (rx_id == rxq->nb_rx_desc)
738 /* Prefetch next mbuf while processing current one. */
739 rte_igb_prefetch(sw_ring[rx_id].mbuf);
742 * When next RX descriptor is on a cache-line boundary,
743 * prefetch the next 4 RX descriptors and the next 8 pointers
746 if ((rx_id & 0x3) == 0) {
747 rte_igb_prefetch(&rx_ring[rx_id]);
748 rte_igb_prefetch(&sw_ring[rx_id]);
754 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
755 rxdp->read.hdr_addr = dma_addr;
756 rxdp->read.pkt_addr = dma_addr;
759 * Initialize the returned mbuf.
760 * 1) setup generic mbuf fields:
761 * - number of segments,
764 * - RX port identifier.
765 * 2) integrate hardware offload data, if any:
767 * - IP checksum flag,
768 * - VLAN TCI, if any,
771 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
773 rxm->data_off = RTE_PKTMBUF_HEADROOM;
774 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
777 rxm->pkt_len = pkt_len;
778 rxm->data_len = pkt_len;
779 rxm->port = rxq->port_id;
781 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
782 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
783 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
784 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
786 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
787 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
788 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
789 rxm->ol_flags = pkt_flags;
792 * Store the mbuf address into the next entry of the array
793 * of returned packets.
795 rx_pkts[nb_rx++] = rxm;
797 rxq->rx_tail = rx_id;
800 * If the number of free RX descriptors is greater than the RX free
801 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
803 * Update the RDT with the value of the last processed RX descriptor
804 * minus 1, to guarantee that the RDT register is never equal to the
805 * RDH register, which creates a "full" ring situtation from the
806 * hardware point of view...
808 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
809 if (nb_hold > rxq->rx_free_thresh) {
810 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
811 "nb_hold=%u nb_rx=%u",
812 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
813 (unsigned) rx_id, (unsigned) nb_hold,
815 rx_id = (uint16_t) ((rx_id == 0) ?
816 (rxq->nb_rx_desc - 1) : (rx_id - 1));
817 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
820 rxq->nb_rx_hold = nb_hold;
825 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
828 struct igb_rx_queue *rxq;
829 volatile union e1000_adv_rx_desc *rx_ring;
830 volatile union e1000_adv_rx_desc *rxdp;
831 struct igb_rx_entry *sw_ring;
832 struct igb_rx_entry *rxe;
833 struct rte_mbuf *first_seg;
834 struct rte_mbuf *last_seg;
835 struct rte_mbuf *rxm;
836 struct rte_mbuf *nmb;
837 union e1000_adv_rx_desc rxd;
838 uint64_t dma; /* Physical address of mbuf data buffer */
840 uint32_t hlen_type_rss;
850 rx_id = rxq->rx_tail;
851 rx_ring = rxq->rx_ring;
852 sw_ring = rxq->sw_ring;
855 * Retrieve RX context of current packet, if any.
857 first_seg = rxq->pkt_first_seg;
858 last_seg = rxq->pkt_last_seg;
860 while (nb_rx < nb_pkts) {
863 * The order of operations here is important as the DD status
864 * bit must not be read after any other descriptor fields.
865 * rx_ring and rxdp are pointing to volatile data so the order
866 * of accesses cannot be reordered by the compiler. If they were
867 * not volatile, they could be reordered which could lead to
868 * using invalid descriptor fields when read from rxd.
870 rxdp = &rx_ring[rx_id];
871 staterr = rxdp->wb.upper.status_error;
872 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
879 * Allocate a new mbuf to replenish the RX ring descriptor.
880 * If the allocation fails:
881 * - arrange for that RX descriptor to be the first one
882 * being parsed the next time the receive function is
883 * invoked [on the same queue].
885 * - Stop parsing the RX ring and return immediately.
887 * This policy does not drop the packet received in the RX
888 * descriptor for which the allocation of a new mbuf failed.
889 * Thus, it allows that packet to be later retrieved if
890 * mbuf have been freed in the mean time.
891 * As a side effect, holding RX descriptors instead of
892 * systematically giving them back to the NIC may lead to
893 * RX ring exhaustion situations.
894 * However, the NIC can gracefully prevent such situations
895 * to happen by sending specific "back-pressure" flow control
896 * frames to its peer(s).
898 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
899 "staterr=0x%x data_len=%u",
900 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
901 (unsigned) rx_id, (unsigned) staterr,
902 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
904 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
906 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
907 "queue_id=%u", (unsigned) rxq->port_id,
908 (unsigned) rxq->queue_id);
909 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
914 rxe = &sw_ring[rx_id];
916 if (rx_id == rxq->nb_rx_desc)
919 /* Prefetch next mbuf while processing current one. */
920 rte_igb_prefetch(sw_ring[rx_id].mbuf);
923 * When next RX descriptor is on a cache-line boundary,
924 * prefetch the next 4 RX descriptors and the next 8 pointers
927 if ((rx_id & 0x3) == 0) {
928 rte_igb_prefetch(&rx_ring[rx_id]);
929 rte_igb_prefetch(&sw_ring[rx_id]);
933 * Update RX descriptor with the physical address of the new
934 * data buffer of the new allocated mbuf.
938 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
939 rxdp->read.pkt_addr = dma;
940 rxdp->read.hdr_addr = dma;
943 * Set data length & data buffer address of mbuf.
945 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
946 rxm->data_len = data_len;
947 rxm->data_off = RTE_PKTMBUF_HEADROOM;
950 * If this is the first buffer of the received packet,
951 * set the pointer to the first mbuf of the packet and
952 * initialize its context.
953 * Otherwise, update the total length and the number of segments
954 * of the current scattered packet, and update the pointer to
955 * the last mbuf of the current packet.
957 if (first_seg == NULL) {
959 first_seg->pkt_len = data_len;
960 first_seg->nb_segs = 1;
962 first_seg->pkt_len += data_len;
963 first_seg->nb_segs++;
964 last_seg->next = rxm;
968 * If this is not the last buffer of the received packet,
969 * update the pointer to the last mbuf of the current scattered
970 * packet and continue to parse the RX ring.
972 if (! (staterr & E1000_RXD_STAT_EOP)) {
978 * This is the last buffer of the received packet.
979 * If the CRC is not stripped by the hardware:
980 * - Subtract the CRC length from the total packet length.
981 * - If the last buffer only contains the whole CRC or a part
982 * of it, free the mbuf associated to the last buffer.
983 * If part of the CRC is also contained in the previous
984 * mbuf, subtract the length of that CRC part from the
985 * data length of the previous mbuf.
988 if (unlikely(rxq->crc_len > 0)) {
989 first_seg->pkt_len -= ETHER_CRC_LEN;
990 if (data_len <= ETHER_CRC_LEN) {
991 rte_pktmbuf_free_seg(rxm);
992 first_seg->nb_segs--;
993 last_seg->data_len = (uint16_t)
994 (last_seg->data_len -
995 (ETHER_CRC_LEN - data_len));
996 last_seg->next = NULL;
999 (uint16_t) (data_len - ETHER_CRC_LEN);
1003 * Initialize the first mbuf of the returned packet:
1004 * - RX port identifier,
1005 * - hardware offload data, if any:
1006 * - RSS flag & hash,
1007 * - IP checksum flag,
1008 * - VLAN TCI, if any,
1011 first_seg->port = rxq->port_id;
1012 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1015 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1016 * set in the pkt_flags field.
1018 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1019 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1020 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1021 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1022 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1023 first_seg->ol_flags = pkt_flags;
1025 /* Prefetch data of first segment, if configured to do so. */
1026 rte_packet_prefetch((char *)first_seg->buf_addr +
1027 first_seg->data_off);
1030 * Store the mbuf address into the next entry of the array
1031 * of returned packets.
1033 rx_pkts[nb_rx++] = first_seg;
1036 * Setup receipt context for a new packet.
1042 * Record index of the next RX descriptor to probe.
1044 rxq->rx_tail = rx_id;
1047 * Save receive context.
1049 rxq->pkt_first_seg = first_seg;
1050 rxq->pkt_last_seg = last_seg;
1053 * If the number of free RX descriptors is greater than the RX free
1054 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1056 * Update the RDT with the value of the last processed RX descriptor
1057 * minus 1, to guarantee that the RDT register is never equal to the
1058 * RDH register, which creates a "full" ring situtation from the
1059 * hardware point of view...
1061 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1062 if (nb_hold > rxq->rx_free_thresh) {
1063 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1064 "nb_hold=%u nb_rx=%u",
1065 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1066 (unsigned) rx_id, (unsigned) nb_hold,
1068 rx_id = (uint16_t) ((rx_id == 0) ?
1069 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1070 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1073 rxq->nb_rx_hold = nb_hold;
1078 * Rings setup and release.
1080 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1081 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1082 * This will also optimize cache line size effect.
1083 * H/W supports up to cache line size 128.
1085 #define IGB_ALIGN 128
1088 * Maximum number of Ring Descriptors.
1090 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1091 * desscriptors should meet the following condition:
1092 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1094 #define IGB_MIN_RING_DESC 32
1095 #define IGB_MAX_RING_DESC 4096
1097 static const struct rte_memzone *
1098 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1099 uint16_t queue_id, uint32_t ring_size, int socket_id)
1101 char z_name[RTE_MEMZONE_NAMESIZE];
1102 const struct rte_memzone *mz;
1104 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1105 dev->driver->pci_drv.name, ring_name,
1106 dev->data->port_id, queue_id);
1107 mz = rte_memzone_lookup(z_name);
1111 #ifdef RTE_LIBRTE_XEN_DOM0
1112 return rte_memzone_reserve_bounded(z_name, ring_size,
1113 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1115 return rte_memzone_reserve_aligned(z_name, ring_size,
1116 socket_id, 0, IGB_ALIGN);
1121 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1125 if (txq->sw_ring != NULL) {
1126 for (i = 0; i < txq->nb_tx_desc; i++) {
1127 if (txq->sw_ring[i].mbuf != NULL) {
1128 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1129 txq->sw_ring[i].mbuf = NULL;
1136 igb_tx_queue_release(struct igb_tx_queue *txq)
1139 igb_tx_queue_release_mbufs(txq);
1140 rte_free(txq->sw_ring);
1146 eth_igb_tx_queue_release(void *txq)
1148 igb_tx_queue_release(txq);
1152 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1157 memset((void*)&txq->ctx_cache, 0,
1158 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1162 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1164 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1166 struct igb_tx_entry *txe = txq->sw_ring;
1168 struct e1000_hw *hw;
1170 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1171 /* Zero out HW ring memory */
1172 for (i = 0; i < txq->nb_tx_desc; i++) {
1173 txq->tx_ring[i] = zeroed_desc;
1176 /* Initialize ring entries */
1177 prev = (uint16_t)(txq->nb_tx_desc - 1);
1178 for (i = 0; i < txq->nb_tx_desc; i++) {
1179 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1181 txd->wb.status = E1000_TXD_STAT_DD;
1184 txe[prev].next_id = i;
1188 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1189 /* 82575 specific, each tx queue will use 2 hw contexts */
1190 if (hw->mac.type == e1000_82575)
1191 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1193 igb_reset_tx_queue_stat(txq);
1197 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1200 unsigned int socket_id,
1201 const struct rte_eth_txconf *tx_conf)
1203 const struct rte_memzone *tz;
1204 struct igb_tx_queue *txq;
1205 struct e1000_hw *hw;
1208 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1211 * Validate number of transmit descriptors.
1212 * It must not exceed hardware maximum, and must be multiple
1215 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1216 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1221 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1224 if (tx_conf->tx_free_thresh != 0)
1225 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1226 "used for the 1G driver.");
1227 if (tx_conf->tx_rs_thresh != 0)
1228 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1229 "used for the 1G driver.");
1230 if (tx_conf->tx_thresh.wthresh == 0)
1231 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1232 "consider setting the TX WTHRESH value to 4, 8, "
1235 /* Free memory prior to re-allocation if needed */
1236 if (dev->data->tx_queues[queue_idx] != NULL) {
1237 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1238 dev->data->tx_queues[queue_idx] = NULL;
1241 /* First allocate the tx queue data structure */
1242 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1248 * Allocate TX ring hardware descriptors. A memzone large enough to
1249 * handle the maximum ring size is allocated in order to allow for
1250 * resizing in later calls to the queue setup function.
1252 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1253 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1256 igb_tx_queue_release(txq);
1260 txq->nb_tx_desc = nb_desc;
1261 txq->pthresh = tx_conf->tx_thresh.pthresh;
1262 txq->hthresh = tx_conf->tx_thresh.hthresh;
1263 txq->wthresh = tx_conf->tx_thresh.wthresh;
1264 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1266 txq->queue_id = queue_idx;
1267 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1268 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1269 txq->port_id = dev->data->port_id;
1271 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1272 #ifndef RTE_LIBRTE_XEN_DOM0
1273 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1275 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1277 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1278 /* Allocate software ring */
1279 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1280 sizeof(struct igb_tx_entry) * nb_desc,
1282 if (txq->sw_ring == NULL) {
1283 igb_tx_queue_release(txq);
1286 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1287 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1289 igb_reset_tx_queue(txq, dev);
1290 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1291 dev->data->tx_queues[queue_idx] = txq;
1297 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1301 if (rxq->sw_ring != NULL) {
1302 for (i = 0; i < rxq->nb_rx_desc; i++) {
1303 if (rxq->sw_ring[i].mbuf != NULL) {
1304 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1305 rxq->sw_ring[i].mbuf = NULL;
1312 igb_rx_queue_release(struct igb_rx_queue *rxq)
1315 igb_rx_queue_release_mbufs(rxq);
1316 rte_free(rxq->sw_ring);
1322 eth_igb_rx_queue_release(void *rxq)
1324 igb_rx_queue_release(rxq);
1328 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1330 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1334 /* Zero out HW ring memory */
1335 for (i = 0; i < rxq->nb_rx_desc; i++) {
1336 rxq->rx_ring[i] = zeroed_desc;
1340 rxq->pkt_first_seg = NULL;
1341 rxq->pkt_last_seg = NULL;
1345 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1348 unsigned int socket_id,
1349 const struct rte_eth_rxconf *rx_conf,
1350 struct rte_mempool *mp)
1352 const struct rte_memzone *rz;
1353 struct igb_rx_queue *rxq;
1354 struct e1000_hw *hw;
1357 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1360 * Validate number of receive descriptors.
1361 * It must not exceed hardware maximum, and must be multiple
1364 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1365 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1369 /* Free memory prior to re-allocation if needed */
1370 if (dev->data->rx_queues[queue_idx] != NULL) {
1371 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1372 dev->data->rx_queues[queue_idx] = NULL;
1375 /* First allocate the RX queue data structure. */
1376 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1381 rxq->nb_rx_desc = nb_desc;
1382 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1383 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1384 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1385 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1387 rxq->drop_en = rx_conf->rx_drop_en;
1388 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1389 rxq->queue_id = queue_idx;
1390 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1391 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1392 rxq->port_id = dev->data->port_id;
1393 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1397 * Allocate RX ring hardware descriptors. A memzone large enough to
1398 * handle the maximum ring size is allocated in order to allow for
1399 * resizing in later calls to the queue setup function.
1401 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1402 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1404 igb_rx_queue_release(rxq);
1407 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1408 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1409 #ifndef RTE_LIBRTE_XEN_DOM0
1410 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1412 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1414 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1416 /* Allocate software ring. */
1417 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1418 sizeof(struct igb_rx_entry) * nb_desc,
1420 if (rxq->sw_ring == NULL) {
1421 igb_rx_queue_release(rxq);
1424 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1425 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1427 dev->data->rx_queues[queue_idx] = rxq;
1428 igb_reset_rx_queue(rxq);
1434 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1436 #define IGB_RXQ_SCAN_INTERVAL 4
1437 volatile union e1000_adv_rx_desc *rxdp;
1438 struct igb_rx_queue *rxq;
1441 if (rx_queue_id >= dev->data->nb_rx_queues) {
1442 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1446 rxq = dev->data->rx_queues[rx_queue_id];
1447 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1449 while ((desc < rxq->nb_rx_desc) &&
1450 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1451 desc += IGB_RXQ_SCAN_INTERVAL;
1452 rxdp += IGB_RXQ_SCAN_INTERVAL;
1453 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1454 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1455 desc - rxq->nb_rx_desc]);
1462 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1464 volatile union e1000_adv_rx_desc *rxdp;
1465 struct igb_rx_queue *rxq = rx_queue;
1468 if (unlikely(offset >= rxq->nb_rx_desc))
1470 desc = rxq->rx_tail + offset;
1471 if (desc >= rxq->nb_rx_desc)
1472 desc -= rxq->nb_rx_desc;
1474 rxdp = &rxq->rx_ring[desc];
1475 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1479 igb_dev_clear_queues(struct rte_eth_dev *dev)
1482 struct igb_tx_queue *txq;
1483 struct igb_rx_queue *rxq;
1485 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1486 txq = dev->data->tx_queues[i];
1488 igb_tx_queue_release_mbufs(txq);
1489 igb_reset_tx_queue(txq, dev);
1493 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1494 rxq = dev->data->rx_queues[i];
1496 igb_rx_queue_release_mbufs(rxq);
1497 igb_reset_rx_queue(rxq);
1503 * Receive Side Scaling (RSS).
1504 * See section 7.1.1.7 in the following document:
1505 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1508 * The source and destination IP addresses of the IP header and the source and
1509 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1510 * against a configurable random key to compute a 32-bit RSS hash result.
1511 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1512 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1513 * RSS output index which is used as the RX queue index where to store the
1515 * The following output is supplied in the RX write-back descriptor:
1516 * - 32-bit result of the Microsoft RSS hash function,
1517 * - 4-bit RSS type field.
1521 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1522 * Used as the default key.
1524 static uint8_t rss_intel_key[40] = {
1525 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1526 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1527 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1528 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1529 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1533 igb_rss_disable(struct rte_eth_dev *dev)
1535 struct e1000_hw *hw;
1538 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1540 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1541 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1545 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1553 hash_key = rss_conf->rss_key;
1554 if (hash_key != NULL) {
1555 /* Fill in RSS hash key */
1556 for (i = 0; i < 10; i++) {
1557 rss_key = hash_key[(i * 4)];
1558 rss_key |= hash_key[(i * 4) + 1] << 8;
1559 rss_key |= hash_key[(i * 4) + 2] << 16;
1560 rss_key |= hash_key[(i * 4) + 3] << 24;
1561 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1565 /* Set configured hashing protocols in MRQC register */
1566 rss_hf = rss_conf->rss_hf;
1567 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1568 if (rss_hf & ETH_RSS_IPV4)
1569 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1570 if (rss_hf & ETH_RSS_IPV4_TCP)
1571 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1572 if (rss_hf & ETH_RSS_IPV6)
1573 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1574 if (rss_hf & ETH_RSS_IPV6_EX)
1575 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1576 if (rss_hf & ETH_RSS_IPV6_TCP)
1577 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1578 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1579 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1580 if (rss_hf & ETH_RSS_IPV4_UDP)
1581 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1582 if (rss_hf & ETH_RSS_IPV6_UDP)
1583 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1584 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1585 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1586 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1590 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1591 struct rte_eth_rss_conf *rss_conf)
1593 struct e1000_hw *hw;
1597 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1600 * Before changing anything, first check that the update RSS operation
1601 * does not attempt to disable RSS, if RSS was enabled at
1602 * initialization time, or does not attempt to enable RSS, if RSS was
1603 * disabled at initialization time.
1605 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1606 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1607 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1608 if (rss_hf != 0) /* Enable RSS */
1610 return 0; /* Nothing to do */
1613 if (rss_hf == 0) /* Disable RSS */
1615 igb_hw_rss_hash_set(hw, rss_conf);
1619 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1620 struct rte_eth_rss_conf *rss_conf)
1622 struct e1000_hw *hw;
1629 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1630 hash_key = rss_conf->rss_key;
1631 if (hash_key != NULL) {
1632 /* Return RSS hash key */
1633 for (i = 0; i < 10; i++) {
1634 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1635 hash_key[(i * 4)] = rss_key & 0x000000FF;
1636 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1637 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1638 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1642 /* Get RSS functions configured in MRQC register */
1643 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1644 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1645 rss_conf->rss_hf = 0;
1649 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1650 rss_hf |= ETH_RSS_IPV4;
1651 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1652 rss_hf |= ETH_RSS_IPV4_TCP;
1653 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1654 rss_hf |= ETH_RSS_IPV6;
1655 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1656 rss_hf |= ETH_RSS_IPV6_EX;
1657 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1658 rss_hf |= ETH_RSS_IPV6_TCP;
1659 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1660 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1661 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1662 rss_hf |= ETH_RSS_IPV4_UDP;
1663 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1664 rss_hf |= ETH_RSS_IPV6_UDP;
1665 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1666 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1667 rss_conf->rss_hf = rss_hf;
1672 igb_rss_configure(struct rte_eth_dev *dev)
1674 struct rte_eth_rss_conf rss_conf;
1675 struct e1000_hw *hw;
1679 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1681 /* Fill in redirection table. */
1682 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1683 for (i = 0; i < 128; i++) {
1690 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1691 i % dev->data->nb_rx_queues : 0);
1692 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1694 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1698 * Configure the RSS key and the RSS protocols used to compute
1699 * the RSS hash of input packets.
1701 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1702 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1703 igb_rss_disable(dev);
1706 if (rss_conf.rss_key == NULL)
1707 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1708 igb_hw_rss_hash_set(hw, &rss_conf);
1712 * Check if the mac type support VMDq or not.
1713 * Return 1 if it supports, otherwise, return 0.
1716 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1718 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720 switch (hw->mac.type) {
1741 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1747 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1749 struct rte_eth_vmdq_rx_conf *cfg;
1750 struct e1000_hw *hw;
1751 uint32_t mrqc, vt_ctl, vmolr, rctl;
1754 PMD_INIT_FUNC_TRACE();
1756 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1757 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1759 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1760 if (igb_is_vmdq_supported(dev) == 0)
1763 igb_rss_disable(dev);
1765 /* RCTL: eanble VLAN filter */
1766 rctl = E1000_READ_REG(hw, E1000_RCTL);
1767 rctl |= E1000_RCTL_VFE;
1768 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1770 /* MRQC: enable vmdq */
1771 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1772 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1773 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1775 /* VTCTL: pool selection according to VLAN tag */
1776 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1777 if (cfg->enable_default_pool)
1778 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1779 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1780 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1782 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1783 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1784 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1785 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1788 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1789 vmolr |= E1000_VMOLR_AUPE;
1790 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1791 vmolr |= E1000_VMOLR_ROMPE;
1792 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1793 vmolr |= E1000_VMOLR_ROPE;
1794 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1795 vmolr |= E1000_VMOLR_BAM;
1796 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1797 vmolr |= E1000_VMOLR_MPME;
1799 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1803 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1804 * Both 82576 and 82580 support it
1806 if (hw->mac.type != e1000_i350) {
1807 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1808 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1809 vmolr |= E1000_VMOLR_STRVLAN;
1810 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1814 /* VFTA - enable all vlan filters */
1815 for (i = 0; i < IGB_VFTA_SIZE; i++)
1816 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1818 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1819 if (hw->mac.type != e1000_82580)
1820 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1823 * RAH/RAL - allow pools to read specific mac addresses
1824 * In this case, all pools should be able to read from mac addr 0
1826 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1827 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1829 /* VLVF: set up filters for vlan tags as configured */
1830 for (i = 0; i < cfg->nb_pool_maps; i++) {
1831 /* set vlan id in VF register and set the valid bit */
1832 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1833 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1834 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1835 E1000_VLVF_POOLSEL_MASK)));
1838 E1000_WRITE_FLUSH(hw);
1844 /*********************************************************************
1846 * Enable receive unit.
1848 **********************************************************************/
1851 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1853 struct igb_rx_entry *rxe = rxq->sw_ring;
1857 /* Initialize software ring entries. */
1858 for (i = 0; i < rxq->nb_rx_desc; i++) {
1859 volatile union e1000_adv_rx_desc *rxd;
1860 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1863 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1864 "queue_id=%hu", rxq->queue_id);
1868 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1869 rxd = &rxq->rx_ring[i];
1870 rxd->read.hdr_addr = dma_addr;
1871 rxd->read.pkt_addr = dma_addr;
1878 #define E1000_MRQC_DEF_Q_SHIFT (3)
1880 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1882 struct e1000_hw *hw =
1883 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1886 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1888 * SRIOV active scheme
1889 * FIXME if support RSS together with VMDq & SRIOV
1891 mrqc = E1000_MRQC_ENABLE_VMDQ;
1892 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1893 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1894 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1895 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1897 * SRIOV inactive scheme
1899 switch (dev->data->dev_conf.rxmode.mq_mode) {
1901 igb_rss_configure(dev);
1903 case ETH_MQ_RX_VMDQ_ONLY:
1904 /*Configure general VMDQ only RX parameters*/
1905 igb_vmdq_rx_hw_configure(dev);
1907 case ETH_MQ_RX_NONE:
1908 /* if mq_mode is none, disable rss mode.*/
1910 igb_rss_disable(dev);
1919 eth_igb_rx_init(struct rte_eth_dev *dev)
1921 struct e1000_hw *hw;
1922 struct igb_rx_queue *rxq;
1923 struct rte_pktmbuf_pool_private *mbp_priv;
1928 uint16_t rctl_bsize;
1932 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1936 * Make sure receives are disabled while setting
1937 * up the descriptor ring.
1939 rctl = E1000_READ_REG(hw, E1000_RCTL);
1940 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1943 * Configure support of jumbo frames, if any.
1945 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1946 rctl |= E1000_RCTL_LPE;
1949 * Set maximum packet length by default, and might be updated
1950 * together with enabling/disabling dual VLAN.
1952 E1000_WRITE_REG(hw, E1000_RLPML,
1953 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1956 rctl &= ~E1000_RCTL_LPE;
1958 /* Configure and enable each RX queue. */
1960 dev->rx_pkt_burst = eth_igb_recv_pkts;
1961 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1965 rxq = dev->data->rx_queues[i];
1967 /* Allocate buffers for descriptor rings and set up queue */
1968 ret = igb_alloc_rx_queue_mbufs(rxq);
1973 * Reset crc_len in case it was changed after queue setup by a
1977 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1980 bus_addr = rxq->rx_ring_phys_addr;
1981 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1983 sizeof(union e1000_adv_rx_desc));
1984 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1985 (uint32_t)(bus_addr >> 32));
1986 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1988 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1991 * Configure RX buffer size.
1993 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1994 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1995 RTE_PKTMBUF_HEADROOM);
1996 if (buf_size >= 1024) {
1998 * Configure the BSIZEPACKET field of the SRRCTL
1999 * register of the queue.
2000 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2001 * If this field is equal to 0b, then RCTL.BSIZE
2002 * determines the RX packet buffer size.
2004 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2005 E1000_SRRCTL_BSIZEPKT_MASK);
2006 buf_size = (uint16_t) ((srrctl &
2007 E1000_SRRCTL_BSIZEPKT_MASK) <<
2008 E1000_SRRCTL_BSIZEPKT_SHIFT);
2010 /* It adds dual VLAN length for supporting dual VLAN */
2011 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2012 2 * VLAN_TAG_SIZE) > buf_size){
2013 if (!dev->data->scattered_rx)
2015 "forcing scatter mode");
2016 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2017 dev->data->scattered_rx = 1;
2021 * Use BSIZE field of the device RCTL register.
2023 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2024 rctl_bsize = buf_size;
2025 if (!dev->data->scattered_rx)
2026 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2027 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2028 dev->data->scattered_rx = 1;
2031 /* Set if packets are dropped when no descriptors available */
2033 srrctl |= E1000_SRRCTL_DROP_EN;
2035 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2037 /* Enable this RX queue. */
2038 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2039 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2040 rxdctl &= 0xFFF00000;
2041 rxdctl |= (rxq->pthresh & 0x1F);
2042 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2043 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2044 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2047 if (dev->data->dev_conf.rxmode.enable_scatter) {
2048 if (!dev->data->scattered_rx)
2049 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2050 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2051 dev->data->scattered_rx = 1;
2055 * Setup BSIZE field of RCTL register, if needed.
2056 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2057 * register, since the code above configures the SRRCTL register of
2058 * the RX queue in such a case.
2059 * All configurable sizes are:
2060 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2061 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2062 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2063 * 2048: rctl |= E1000_RCTL_SZ_2048;
2064 * 1024: rctl |= E1000_RCTL_SZ_1024;
2065 * 512: rctl |= E1000_RCTL_SZ_512;
2066 * 256: rctl |= E1000_RCTL_SZ_256;
2068 if (rctl_bsize > 0) {
2069 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2070 rctl |= E1000_RCTL_SZ_512;
2071 else /* 256 <= buf_size < 512 - use 256 */
2072 rctl |= E1000_RCTL_SZ_256;
2076 * Configure RSS if device configured with multiple RX queues.
2078 igb_dev_mq_rx_configure(dev);
2080 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2081 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2084 * Setup the Checksum Register.
2085 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2087 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2088 rxcsum |= E1000_RXCSUM_PCSD;
2090 /* Enable both L3/L4 rx checksum offload */
2091 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2092 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2094 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2095 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2097 /* Setup the Receive Control Register. */
2098 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2099 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2101 /* set STRCRC bit in all queues */
2102 if (hw->mac.type == e1000_i350 ||
2103 hw->mac.type == e1000_i210 ||
2104 hw->mac.type == e1000_i211 ||
2105 hw->mac.type == e1000_i354) {
2106 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2107 rxq = dev->data->rx_queues[i];
2108 uint32_t dvmolr = E1000_READ_REG(hw,
2109 E1000_DVMOLR(rxq->reg_idx));
2110 dvmolr |= E1000_DVMOLR_STRCRC;
2111 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2115 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2117 /* clear STRCRC bit in all queues */
2118 if (hw->mac.type == e1000_i350 ||
2119 hw->mac.type == e1000_i210 ||
2120 hw->mac.type == e1000_i211 ||
2121 hw->mac.type == e1000_i354) {
2122 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2123 rxq = dev->data->rx_queues[i];
2124 uint32_t dvmolr = E1000_READ_REG(hw,
2125 E1000_DVMOLR(rxq->reg_idx));
2126 dvmolr &= ~E1000_DVMOLR_STRCRC;
2127 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2132 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2133 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2134 E1000_RCTL_RDMTS_HALF |
2135 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2137 /* Make sure VLAN Filters are off. */
2138 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2139 rctl &= ~E1000_RCTL_VFE;
2140 /* Don't store bad packets. */
2141 rctl &= ~E1000_RCTL_SBP;
2143 /* Enable Receives. */
2144 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2147 * Setup the HW Rx Head and Tail Descriptor Pointers.
2148 * This needs to be done after enable.
2150 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2151 rxq = dev->data->rx_queues[i];
2152 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2153 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2159 /*********************************************************************
2161 * Enable transmit unit.
2163 **********************************************************************/
2165 eth_igb_tx_init(struct rte_eth_dev *dev)
2167 struct e1000_hw *hw;
2168 struct igb_tx_queue *txq;
2173 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2175 /* Setup the Base and Length of the Tx Descriptor Rings. */
2176 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2178 txq = dev->data->tx_queues[i];
2179 bus_addr = txq->tx_ring_phys_addr;
2181 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2183 sizeof(union e1000_adv_tx_desc));
2184 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2185 (uint32_t)(bus_addr >> 32));
2186 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2188 /* Setup the HW Tx Head and Tail descriptor pointers. */
2189 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2190 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2192 /* Setup Transmit threshold registers. */
2193 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2194 txdctl |= txq->pthresh & 0x1F;
2195 txdctl |= ((txq->hthresh & 0x1F) << 8);
2196 txdctl |= ((txq->wthresh & 0x1F) << 16);
2197 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2198 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2201 /* Program the Transmit Control Register. */
2202 tctl = E1000_READ_REG(hw, E1000_TCTL);
2203 tctl &= ~E1000_TCTL_CT;
2204 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2205 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2207 e1000_config_collision_dist(hw);
2209 /* This write will effectively turn on the transmit unit. */
2210 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2213 /*********************************************************************
2215 * Enable VF receive unit.
2217 **********************************************************************/
2219 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2221 struct e1000_hw *hw;
2222 struct igb_rx_queue *rxq;
2223 struct rte_pktmbuf_pool_private *mbp_priv;
2226 uint16_t rctl_bsize;
2230 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2233 e1000_rlpml_set_vf(hw,
2234 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2237 /* Configure and enable each RX queue. */
2239 dev->rx_pkt_burst = eth_igb_recv_pkts;
2240 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2244 rxq = dev->data->rx_queues[i];
2246 /* Allocate buffers for descriptor rings and set up queue */
2247 ret = igb_alloc_rx_queue_mbufs(rxq);
2251 bus_addr = rxq->rx_ring_phys_addr;
2252 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2254 sizeof(union e1000_adv_rx_desc));
2255 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2256 (uint32_t)(bus_addr >> 32));
2257 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2259 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2262 * Configure RX buffer size.
2264 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2265 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2266 RTE_PKTMBUF_HEADROOM);
2267 if (buf_size >= 1024) {
2269 * Configure the BSIZEPACKET field of the SRRCTL
2270 * register of the queue.
2271 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2272 * If this field is equal to 0b, then RCTL.BSIZE
2273 * determines the RX packet buffer size.
2275 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2276 E1000_SRRCTL_BSIZEPKT_MASK);
2277 buf_size = (uint16_t) ((srrctl &
2278 E1000_SRRCTL_BSIZEPKT_MASK) <<
2279 E1000_SRRCTL_BSIZEPKT_SHIFT);
2281 /* It adds dual VLAN length for supporting dual VLAN */
2282 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2283 2 * VLAN_TAG_SIZE) > buf_size){
2284 if (!dev->data->scattered_rx)
2286 "forcing scatter mode");
2287 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2288 dev->data->scattered_rx = 1;
2292 * Use BSIZE field of the device RCTL register.
2294 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2295 rctl_bsize = buf_size;
2296 if (!dev->data->scattered_rx)
2297 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2298 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2299 dev->data->scattered_rx = 1;
2302 /* Set if packets are dropped when no descriptors available */
2304 srrctl |= E1000_SRRCTL_DROP_EN;
2306 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2308 /* Enable this RX queue. */
2309 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2310 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2311 rxdctl &= 0xFFF00000;
2312 rxdctl |= (rxq->pthresh & 0x1F);
2313 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2314 if (hw->mac.type == e1000_vfadapt) {
2316 * Workaround of 82576 VF Erratum
2317 * force set WTHRESH to 1
2318 * to avoid Write-Back not triggered sometimes
2321 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2324 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2325 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2328 if (dev->data->dev_conf.rxmode.enable_scatter) {
2329 if (!dev->data->scattered_rx)
2330 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2331 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2332 dev->data->scattered_rx = 1;
2336 * Setup the HW Rx Head and Tail Descriptor Pointers.
2337 * This needs to be done after enable.
2339 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2340 rxq = dev->data->rx_queues[i];
2341 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2342 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2348 /*********************************************************************
2350 * Enable VF transmit unit.
2352 **********************************************************************/
2354 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2356 struct e1000_hw *hw;
2357 struct igb_tx_queue *txq;
2361 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363 /* Setup the Base and Length of the Tx Descriptor Rings. */
2364 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2367 txq = dev->data->tx_queues[i];
2368 bus_addr = txq->tx_ring_phys_addr;
2369 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2371 sizeof(union e1000_adv_tx_desc));
2372 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2373 (uint32_t)(bus_addr >> 32));
2374 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2376 /* Setup the HW Tx Head and Tail descriptor pointers. */
2377 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2378 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2380 /* Setup Transmit threshold registers. */
2381 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2382 txdctl |= txq->pthresh & 0x1F;
2383 txdctl |= ((txq->hthresh & 0x1F) << 8);
2384 if (hw->mac.type == e1000_82576) {
2386 * Workaround of 82576 VF Erratum
2387 * force set WTHRESH to 1
2388 * to avoid Write-Back not triggered sometimes
2391 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2394 txdctl |= ((txq->wthresh & 0x1F) << 16);
2395 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2396 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);