4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 #define IGB_RSS_OFFLOAD_ALL ( \
82 ETH_RSS_IPV6_TCP_EX | \
87 static inline struct rte_mbuf *
88 rte_rxmbuf_alloc(struct rte_mempool *mp)
92 m = __rte_mbuf_raw_alloc(mp);
93 __rte_mbuf_sanity_check_raw(m, 0);
97 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
98 (uint64_t) ((mb)->buf_physaddr + \
99 (uint64_t) ((char *)((mb)->data) - \
100 (char *)(mb)->buf_addr))
102 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
103 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
106 * Structure associated with each descriptor of the RX ring of a RX queue.
108 struct igb_rx_entry {
109 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
113 * Structure associated with each descriptor of the TX ring of a TX queue.
115 struct igb_tx_entry {
116 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
117 uint16_t next_id; /**< Index of next descriptor in ring. */
118 uint16_t last_id; /**< Index of last scattered descriptor. */
122 * Structure associated with each RX queue.
124 struct igb_rx_queue {
125 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
126 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
127 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
128 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
129 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
130 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
131 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
132 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
133 uint16_t nb_rx_desc; /**< number of RX descriptors. */
134 uint16_t rx_tail; /**< current value of RDT register. */
135 uint16_t nb_rx_hold; /**< number of held free RX desc. */
136 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
137 uint16_t queue_id; /**< RX queue index. */
138 uint16_t reg_idx; /**< RX queue register index. */
139 uint8_t port_id; /**< Device port identifier. */
140 uint8_t pthresh; /**< Prefetch threshold register. */
141 uint8_t hthresh; /**< Host threshold register. */
142 uint8_t wthresh; /**< Write-back threshold register. */
143 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
144 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
148 * Hardware context number
150 enum igb_advctx_num {
151 IGB_CTX_0 = 0, /**< CTX0 */
152 IGB_CTX_1 = 1, /**< CTX1 */
153 IGB_CTX_NUM = 2, /**< CTX_NUM */
156 /** Offload features */
157 union igb_vlan_macip {
160 uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
162 /**< VLAN Tag Control Identifier (CPU order). */
167 * Compare mask for vlan_macip_len.data,
168 * should be in sync with igb_vlan_macip.f layout.
170 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
171 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
172 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
173 /** MAC+IP length. */
174 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
177 * Strucutre to check if new context need be built
179 struct igb_advctx_info {
180 uint16_t flags; /**< ol_flags related to context build. */
181 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
182 union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
186 * Structure associated with each TX queue.
188 struct igb_tx_queue {
189 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
190 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
191 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
192 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
193 uint32_t txd_type; /**< Device-specific TXD type */
194 uint16_t nb_tx_desc; /**< number of TX descriptors. */
195 uint16_t tx_tail; /**< Current value of TDT register. */
197 /**< Index of first used TX descriptor. */
198 uint16_t queue_id; /**< TX queue index. */
199 uint16_t reg_idx; /**< TX queue register index. */
200 uint8_t port_id; /**< Device port identifier. */
201 uint8_t pthresh; /**< Prefetch threshold register. */
202 uint8_t hthresh; /**< Host threshold register. */
203 uint8_t wthresh; /**< Write-back threshold register. */
205 /**< Current used hardware descriptor. */
207 /**< Start context position for transmit queue. */
208 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
209 /**< Hardware context history.*/
213 #define RTE_PMD_USE_PREFETCH
216 #ifdef RTE_PMD_USE_PREFETCH
217 #define rte_igb_prefetch(p) rte_prefetch0(p)
219 #define rte_igb_prefetch(p) do {} while(0)
222 #ifdef RTE_PMD_PACKET_PREFETCH
223 #define rte_packet_prefetch(p) rte_prefetch1(p)
225 #define rte_packet_prefetch(p) do {} while(0)
229 * Macro for VMDq feature for 1 GbE NIC.
231 #define E1000_VMOLR_SIZE (8)
233 /*********************************************************************
237 **********************************************************************/
240 * Advanced context descriptor are almost same between igb/ixgbe
241 * This is a separate function, looking for optimization opportunity here
242 * Rework required to go with the pre-defined values.
246 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
247 volatile struct e1000_adv_tx_context_desc *ctx_txd,
248 uint16_t ol_flags, uint32_t vlan_macip_lens)
250 uint32_t type_tucmd_mlhl;
251 uint32_t mss_l4len_idx;
252 uint32_t ctx_idx, ctx_curr;
255 ctx_curr = txq->ctx_curr;
256 ctx_idx = ctx_curr + txq->ctx_start;
261 if (ol_flags & PKT_TX_VLAN_PKT) {
262 cmp_mask |= TX_VLAN_CMP_MASK;
265 if (ol_flags & PKT_TX_IP_CKSUM) {
266 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
267 cmp_mask |= TX_MAC_LEN_CMP_MASK;
270 /* Specify which HW CTX to upload. */
271 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
272 switch (ol_flags & PKT_TX_L4_MASK) {
273 case PKT_TX_UDP_CKSUM:
274 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
275 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
276 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
277 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
279 case PKT_TX_TCP_CKSUM:
280 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
281 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
282 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
283 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
285 case PKT_TX_SCTP_CKSUM:
286 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
287 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
289 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
292 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
293 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
297 txq->ctx_cache[ctx_curr].flags = ol_flags;
298 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
299 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
300 vlan_macip_lens & cmp_mask;
302 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
303 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
304 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
305 ctx_txd->seqnum_seed = 0;
309 * Check which hardware context can be used. Use the existing match
310 * or create a new context descriptor.
312 static inline uint32_t
313 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
314 uint32_t vlan_macip_lens)
316 /* If match with the current context */
317 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
318 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
319 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
320 return txq->ctx_curr;
323 /* If match with the second context */
325 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
326 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
327 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
328 return txq->ctx_curr;
331 /* Mismatch, use the previous context */
332 return (IGB_CTX_NUM);
335 static inline uint32_t
336 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
338 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
339 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
342 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
343 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
347 static inline uint32_t
348 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
350 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
351 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
355 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
358 struct igb_tx_queue *txq;
359 struct igb_tx_entry *sw_ring;
360 struct igb_tx_entry *txe, *txn;
361 volatile union e1000_adv_tx_desc *txr;
362 volatile union e1000_adv_tx_desc *txd;
363 struct rte_mbuf *tx_pkt;
364 struct rte_mbuf *m_seg;
365 union igb_vlan_macip vlan_macip_lens;
366 uint64_t buf_dma_addr;
367 uint32_t olinfo_status;
368 uint32_t cmd_type_len;
377 uint32_t new_ctx = 0;
381 sw_ring = txq->sw_ring;
383 tx_id = txq->tx_tail;
384 txe = &sw_ring[tx_id];
386 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
388 pkt_len = tx_pkt->pkt_len;
390 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
393 * The number of descriptors that must be allocated for a
394 * packet is the number of segments of that packet, plus 1
395 * Context Descriptor for the VLAN Tag Identifier, if any.
396 * Determine the last TX descriptor to allocate in the TX ring
397 * for the packet, starting from the current position (tx_id)
400 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
402 ol_flags = tx_pkt->ol_flags;
403 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
404 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
405 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
407 /* If a Context Descriptor need be built . */
409 ctx = what_advctx_update(txq, tx_ol_req,
410 vlan_macip_lens.data);
411 /* Only allocate context descriptor if required*/
412 new_ctx = (ctx == IGB_CTX_NUM);
414 tx_last = (uint16_t) (tx_last + new_ctx);
416 if (tx_last >= txq->nb_tx_desc)
417 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
419 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
420 " tx_first=%u tx_last=%u\n",
421 (unsigned) txq->port_id,
422 (unsigned) txq->queue_id,
428 * Check if there are enough free descriptors in the TX ring
429 * to transmit the next packet.
430 * This operation is based on the two following rules:
432 * 1- Only check that the last needed TX descriptor can be
433 * allocated (by construction, if that descriptor is free,
434 * all intermediate ones are also free).
436 * For this purpose, the index of the last TX descriptor
437 * used for a packet (the "last descriptor" of a packet)
438 * is recorded in the TX entries (the last one included)
439 * that are associated with all TX descriptors allocated
442 * 2- Avoid to allocate the last free TX descriptor of the
443 * ring, in order to never set the TDT register with the
444 * same value stored in parallel by the NIC in the TDH
445 * register, which makes the TX engine of the NIC enter
446 * in a deadlock situation.
448 * By extension, avoid to allocate a free descriptor that
449 * belongs to the last set of free descriptors allocated
450 * to the same packet previously transmitted.
454 * The "last descriptor" of the previously sent packet, if any,
455 * which used the last descriptor to allocate.
457 tx_end = sw_ring[tx_last].last_id;
460 * The next descriptor following that "last descriptor" in the
463 tx_end = sw_ring[tx_end].next_id;
466 * The "last descriptor" associated with that next descriptor.
468 tx_end = sw_ring[tx_end].last_id;
471 * Check that this descriptor is free.
473 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
480 * Set common flags of all TX Data Descriptors.
482 * The following bits must be set in all Data Descriptors:
483 * - E1000_ADVTXD_DTYP_DATA
484 * - E1000_ADVTXD_DCMD_DEXT
486 * The following bits must be set in the first Data Descriptor
487 * and are ignored in the other ones:
488 * - E1000_ADVTXD_DCMD_IFCS
489 * - E1000_ADVTXD_MAC_1588
490 * - E1000_ADVTXD_DCMD_VLE
492 * The following bits must only be set in the last Data
494 * - E1000_TXD_CMD_EOP
496 * The following bits can be set in any Data Descriptor, but
497 * are only set in the last Data Descriptor:
500 cmd_type_len = txq->txd_type |
501 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
502 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
503 #if defined(RTE_LIBRTE_IEEE1588)
504 if (ol_flags & PKT_TX_IEEE1588_TMST)
505 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
508 /* Setup TX Advanced context descriptor if required */
510 volatile struct e1000_adv_tx_context_desc *
513 ctx_txd = (volatile struct
514 e1000_adv_tx_context_desc *)
517 txn = &sw_ring[txe->next_id];
518 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
520 if (txe->mbuf != NULL) {
521 rte_pktmbuf_free_seg(txe->mbuf);
525 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
526 vlan_macip_lens.data);
528 txe->last_id = tx_last;
529 tx_id = txe->next_id;
533 /* Setup the TX Advanced Data Descriptor */
534 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
535 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
536 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
541 txn = &sw_ring[txe->next_id];
544 if (txe->mbuf != NULL)
545 rte_pktmbuf_free_seg(txe->mbuf);
549 * Set up transmit descriptor.
551 slen = (uint16_t) m_seg->data_len;
552 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
553 txd->read.buffer_addr =
554 rte_cpu_to_le_64(buf_dma_addr);
555 txd->read.cmd_type_len =
556 rte_cpu_to_le_32(cmd_type_len | slen);
557 txd->read.olinfo_status =
558 rte_cpu_to_le_32(olinfo_status);
559 txe->last_id = tx_last;
560 tx_id = txe->next_id;
563 } while (m_seg != NULL);
566 * The last packet data descriptor needs End Of Packet (EOP)
567 * and Report Status (RS).
569 txd->read.cmd_type_len |=
570 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
576 * Set the Transmit Descriptor Tail (TDT).
578 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
579 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
580 (unsigned) txq->port_id, (unsigned) txq->queue_id,
581 (unsigned) tx_id, (unsigned) nb_tx);
582 txq->tx_tail = tx_id;
587 /*********************************************************************
591 **********************************************************************/
592 static inline uint16_t
593 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
597 static uint16_t ip_pkt_types_map[16] = {
598 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
599 PKT_RX_IPV6_HDR, 0, 0, 0,
600 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
601 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
604 #if defined(RTE_LIBRTE_IEEE1588)
605 static uint32_t ip_pkt_etqf_map[8] = {
606 0, 0, 0, PKT_RX_IEEE1588_PTP,
610 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
611 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
612 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
614 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
615 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
617 return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
618 0 : PKT_RX_RSS_HASH));
621 static inline uint16_t
622 rx_desc_status_to_pkt_flags(uint32_t rx_status)
626 /* Check if VLAN present */
627 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
628 PKT_RX_VLAN_PKT : 0);
630 #if defined(RTE_LIBRTE_IEEE1588)
631 if (rx_status & E1000_RXD_STAT_TMST)
632 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
637 static inline uint16_t
638 rx_desc_error_to_pkt_flags(uint32_t rx_status)
641 * Bit 30: IPE, IPv4 checksum error
642 * Bit 29: L4I, L4I integrity error
645 static uint16_t error_to_pkt_flags_map[4] = {
646 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
647 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
649 return error_to_pkt_flags_map[(rx_status >>
650 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
654 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
657 struct igb_rx_queue *rxq;
658 volatile union e1000_adv_rx_desc *rx_ring;
659 volatile union e1000_adv_rx_desc *rxdp;
660 struct igb_rx_entry *sw_ring;
661 struct igb_rx_entry *rxe;
662 struct rte_mbuf *rxm;
663 struct rte_mbuf *nmb;
664 union e1000_adv_rx_desc rxd;
667 uint32_t hlen_type_rss;
677 rx_id = rxq->rx_tail;
678 rx_ring = rxq->rx_ring;
679 sw_ring = rxq->sw_ring;
680 while (nb_rx < nb_pkts) {
682 * The order of operations here is important as the DD status
683 * bit must not be read after any other descriptor fields.
684 * rx_ring and rxdp are pointing to volatile data so the order
685 * of accesses cannot be reordered by the compiler. If they were
686 * not volatile, they could be reordered which could lead to
687 * using invalid descriptor fields when read from rxd.
689 rxdp = &rx_ring[rx_id];
690 staterr = rxdp->wb.upper.status_error;
691 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
698 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
699 * likely to be invalid and to be dropped by the various
700 * validation checks performed by the network stack.
702 * Allocate a new mbuf to replenish the RX ring descriptor.
703 * If the allocation fails:
704 * - arrange for that RX descriptor to be the first one
705 * being parsed the next time the receive function is
706 * invoked [on the same queue].
708 * - Stop parsing the RX ring and return immediately.
710 * This policy do not drop the packet received in the RX
711 * descriptor for which the allocation of a new mbuf failed.
712 * Thus, it allows that packet to be later retrieved if
713 * mbuf have been freed in the mean time.
714 * As a side effect, holding RX descriptors instead of
715 * systematically giving them back to the NIC may lead to
716 * RX ring exhaustion situations.
717 * However, the NIC can gracefully prevent such situations
718 * to happen by sending specific "back-pressure" flow control
719 * frames to its peer(s).
721 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
722 "staterr=0x%x pkt_len=%u\n",
723 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
724 (unsigned) rx_id, (unsigned) staterr,
725 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
727 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
729 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
730 "queue_id=%u\n", (unsigned) rxq->port_id,
731 (unsigned) rxq->queue_id);
732 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
737 rxe = &sw_ring[rx_id];
739 if (rx_id == rxq->nb_rx_desc)
742 /* Prefetch next mbuf while processing current one. */
743 rte_igb_prefetch(sw_ring[rx_id].mbuf);
746 * When next RX descriptor is on a cache-line boundary,
747 * prefetch the next 4 RX descriptors and the next 8 pointers
750 if ((rx_id & 0x3) == 0) {
751 rte_igb_prefetch(&rx_ring[rx_id]);
752 rte_igb_prefetch(&sw_ring[rx_id]);
758 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
759 rxdp->read.hdr_addr = dma_addr;
760 rxdp->read.pkt_addr = dma_addr;
763 * Initialize the returned mbuf.
764 * 1) setup generic mbuf fields:
765 * - number of segments,
768 * - RX port identifier.
769 * 2) integrate hardware offload data, if any:
771 * - IP checksum flag,
772 * - VLAN TCI, if any,
775 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
777 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
778 rte_packet_prefetch(rxm->data);
781 rxm->pkt_len = pkt_len;
782 rxm->data_len = pkt_len;
783 rxm->port = rxq->port_id;
785 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
786 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
787 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
788 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
790 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
791 pkt_flags = (uint16_t)(pkt_flags |
792 rx_desc_status_to_pkt_flags(staterr));
793 pkt_flags = (uint16_t)(pkt_flags |
794 rx_desc_error_to_pkt_flags(staterr));
795 rxm->ol_flags = pkt_flags;
798 * Store the mbuf address into the next entry of the array
799 * of returned packets.
801 rx_pkts[nb_rx++] = rxm;
803 rxq->rx_tail = rx_id;
806 * If the number of free RX descriptors is greater than the RX free
807 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
809 * Update the RDT with the value of the last processed RX descriptor
810 * minus 1, to guarantee that the RDT register is never equal to the
811 * RDH register, which creates a "full" ring situtation from the
812 * hardware point of view...
814 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
815 if (nb_hold > rxq->rx_free_thresh) {
816 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
817 "nb_hold=%u nb_rx=%u\n",
818 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
819 (unsigned) rx_id, (unsigned) nb_hold,
821 rx_id = (uint16_t) ((rx_id == 0) ?
822 (rxq->nb_rx_desc - 1) : (rx_id - 1));
823 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
826 rxq->nb_rx_hold = nb_hold;
831 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
834 struct igb_rx_queue *rxq;
835 volatile union e1000_adv_rx_desc *rx_ring;
836 volatile union e1000_adv_rx_desc *rxdp;
837 struct igb_rx_entry *sw_ring;
838 struct igb_rx_entry *rxe;
839 struct rte_mbuf *first_seg;
840 struct rte_mbuf *last_seg;
841 struct rte_mbuf *rxm;
842 struct rte_mbuf *nmb;
843 union e1000_adv_rx_desc rxd;
844 uint64_t dma; /* Physical address of mbuf data buffer */
846 uint32_t hlen_type_rss;
856 rx_id = rxq->rx_tail;
857 rx_ring = rxq->rx_ring;
858 sw_ring = rxq->sw_ring;
861 * Retrieve RX context of current packet, if any.
863 first_seg = rxq->pkt_first_seg;
864 last_seg = rxq->pkt_last_seg;
866 while (nb_rx < nb_pkts) {
869 * The order of operations here is important as the DD status
870 * bit must not be read after any other descriptor fields.
871 * rx_ring and rxdp are pointing to volatile data so the order
872 * of accesses cannot be reordered by the compiler. If they were
873 * not volatile, they could be reordered which could lead to
874 * using invalid descriptor fields when read from rxd.
876 rxdp = &rx_ring[rx_id];
877 staterr = rxdp->wb.upper.status_error;
878 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
885 * Allocate a new mbuf to replenish the RX ring descriptor.
886 * If the allocation fails:
887 * - arrange for that RX descriptor to be the first one
888 * being parsed the next time the receive function is
889 * invoked [on the same queue].
891 * - Stop parsing the RX ring and return immediately.
893 * This policy does not drop the packet received in the RX
894 * descriptor for which the allocation of a new mbuf failed.
895 * Thus, it allows that packet to be later retrieved if
896 * mbuf have been freed in the mean time.
897 * As a side effect, holding RX descriptors instead of
898 * systematically giving them back to the NIC may lead to
899 * RX ring exhaustion situations.
900 * However, the NIC can gracefully prevent such situations
901 * to happen by sending specific "back-pressure" flow control
902 * frames to its peer(s).
904 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
905 "staterr=0x%x data_len=%u\n",
906 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
907 (unsigned) rx_id, (unsigned) staterr,
908 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
910 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
912 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
913 "queue_id=%u\n", (unsigned) rxq->port_id,
914 (unsigned) rxq->queue_id);
915 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
920 rxe = &sw_ring[rx_id];
922 if (rx_id == rxq->nb_rx_desc)
925 /* Prefetch next mbuf while processing current one. */
926 rte_igb_prefetch(sw_ring[rx_id].mbuf);
929 * When next RX descriptor is on a cache-line boundary,
930 * prefetch the next 4 RX descriptors and the next 8 pointers
933 if ((rx_id & 0x3) == 0) {
934 rte_igb_prefetch(&rx_ring[rx_id]);
935 rte_igb_prefetch(&sw_ring[rx_id]);
939 * Update RX descriptor with the physical address of the new
940 * data buffer of the new allocated mbuf.
944 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
945 rxdp->read.pkt_addr = dma;
946 rxdp->read.hdr_addr = dma;
949 * Set data length & data buffer address of mbuf.
951 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
952 rxm->data_len = data_len;
953 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
956 * If this is the first buffer of the received packet,
957 * set the pointer to the first mbuf of the packet and
958 * initialize its context.
959 * Otherwise, update the total length and the number of segments
960 * of the current scattered packet, and update the pointer to
961 * the last mbuf of the current packet.
963 if (first_seg == NULL) {
965 first_seg->pkt_len = data_len;
966 first_seg->nb_segs = 1;
968 first_seg->pkt_len += data_len;
969 first_seg->nb_segs++;
970 last_seg->next = rxm;
974 * If this is not the last buffer of the received packet,
975 * update the pointer to the last mbuf of the current scattered
976 * packet and continue to parse the RX ring.
978 if (! (staterr & E1000_RXD_STAT_EOP)) {
984 * This is the last buffer of the received packet.
985 * If the CRC is not stripped by the hardware:
986 * - Subtract the CRC length from the total packet length.
987 * - If the last buffer only contains the whole CRC or a part
988 * of it, free the mbuf associated to the last buffer.
989 * If part of the CRC is also contained in the previous
990 * mbuf, subtract the length of that CRC part from the
991 * data length of the previous mbuf.
994 if (unlikely(rxq->crc_len > 0)) {
995 first_seg->pkt_len -= ETHER_CRC_LEN;
996 if (data_len <= ETHER_CRC_LEN) {
997 rte_pktmbuf_free_seg(rxm);
998 first_seg->nb_segs--;
999 last_seg->data_len = (uint16_t)
1000 (last_seg->data_len -
1001 (ETHER_CRC_LEN - data_len));
1002 last_seg->next = NULL;
1005 (uint16_t) (data_len - ETHER_CRC_LEN);
1009 * Initialize the first mbuf of the returned packet:
1010 * - RX port identifier,
1011 * - hardware offload data, if any:
1012 * - RSS flag & hash,
1013 * - IP checksum flag,
1014 * - VLAN TCI, if any,
1017 first_seg->port = rxq->port_id;
1018 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1021 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1022 * set in the pkt_flags field.
1024 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1025 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1026 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1027 pkt_flags = (uint16_t)(pkt_flags |
1028 rx_desc_status_to_pkt_flags(staterr));
1029 pkt_flags = (uint16_t)(pkt_flags |
1030 rx_desc_error_to_pkt_flags(staterr));
1031 first_seg->ol_flags = pkt_flags;
1033 /* Prefetch data of first segment, if configured to do so. */
1034 rte_packet_prefetch(first_seg->data);
1037 * Store the mbuf address into the next entry of the array
1038 * of returned packets.
1040 rx_pkts[nb_rx++] = first_seg;
1043 * Setup receipt context for a new packet.
1049 * Record index of the next RX descriptor to probe.
1051 rxq->rx_tail = rx_id;
1054 * Save receive context.
1056 rxq->pkt_first_seg = first_seg;
1057 rxq->pkt_last_seg = last_seg;
1060 * If the number of free RX descriptors is greater than the RX free
1061 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1063 * Update the RDT with the value of the last processed RX descriptor
1064 * minus 1, to guarantee that the RDT register is never equal to the
1065 * RDH register, which creates a "full" ring situtation from the
1066 * hardware point of view...
1068 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1069 if (nb_hold > rxq->rx_free_thresh) {
1070 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1071 "nb_hold=%u nb_rx=%u\n",
1072 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1073 (unsigned) rx_id, (unsigned) nb_hold,
1075 rx_id = (uint16_t) ((rx_id == 0) ?
1076 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1077 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1080 rxq->nb_rx_hold = nb_hold;
1085 * Rings setup and release.
1087 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1088 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1089 * This will also optimize cache line size effect.
1090 * H/W supports up to cache line size 128.
1092 #define IGB_ALIGN 128
1095 * Maximum number of Ring Descriptors.
1097 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1098 * desscriptors should meet the following condition:
1099 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1101 #define IGB_MIN_RING_DESC 32
1102 #define IGB_MAX_RING_DESC 4096
1104 static const struct rte_memzone *
1105 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1106 uint16_t queue_id, uint32_t ring_size, int socket_id)
1108 char z_name[RTE_MEMZONE_NAMESIZE];
1109 const struct rte_memzone *mz;
1111 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1112 dev->driver->pci_drv.name, ring_name,
1113 dev->data->port_id, queue_id);
1114 mz = rte_memzone_lookup(z_name);
1118 #ifdef RTE_LIBRTE_XEN_DOM0
1119 return rte_memzone_reserve_bounded(z_name, ring_size,
1120 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1122 return rte_memzone_reserve_aligned(z_name, ring_size,
1123 socket_id, 0, IGB_ALIGN);
1128 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1132 if (txq->sw_ring != NULL) {
1133 for (i = 0; i < txq->nb_tx_desc; i++) {
1134 if (txq->sw_ring[i].mbuf != NULL) {
1135 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1136 txq->sw_ring[i].mbuf = NULL;
1143 igb_tx_queue_release(struct igb_tx_queue *txq)
1146 igb_tx_queue_release_mbufs(txq);
1147 rte_free(txq->sw_ring);
1153 eth_igb_tx_queue_release(void *txq)
1155 igb_tx_queue_release(txq);
1159 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1164 memset((void*)&txq->ctx_cache, 0,
1165 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1169 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1171 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1173 struct igb_tx_entry *txe = txq->sw_ring;
1175 struct e1000_hw *hw;
1177 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178 /* Zero out HW ring memory */
1179 for (i = 0; i < txq->nb_tx_desc; i++) {
1180 txq->tx_ring[i] = zeroed_desc;
1183 /* Initialize ring entries */
1184 prev = (uint16_t)(txq->nb_tx_desc - 1);
1185 for (i = 0; i < txq->nb_tx_desc; i++) {
1186 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1188 txd->wb.status = E1000_TXD_STAT_DD;
1191 txe[prev].next_id = i;
1195 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1196 /* 82575 specific, each tx queue will use 2 hw contexts */
1197 if (hw->mac.type == e1000_82575)
1198 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1200 igb_reset_tx_queue_stat(txq);
1204 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1207 unsigned int socket_id,
1208 const struct rte_eth_txconf *tx_conf)
1210 const struct rte_memzone *tz;
1211 struct igb_tx_queue *txq;
1212 struct e1000_hw *hw;
1215 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1218 * Validate number of transmit descriptors.
1219 * It must not exceed hardware maximum, and must be multiple
1222 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1223 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1228 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1231 if (tx_conf->tx_free_thresh != 0)
1232 RTE_LOG(WARNING, PMD,
1233 "The tx_free_thresh parameter is not "
1234 "used for the 1G driver.\n");
1235 if (tx_conf->tx_rs_thresh != 0)
1236 RTE_LOG(WARNING, PMD,
1237 "The tx_rs_thresh parameter is not "
1238 "used for the 1G driver.\n");
1239 if (tx_conf->tx_thresh.wthresh == 0)
1240 RTE_LOG(WARNING, PMD,
1241 "To improve 1G driver performance, consider setting "
1242 "the TX WTHRESH value to 4, 8, or 16.\n");
1244 /* Free memory prior to re-allocation if needed */
1245 if (dev->data->tx_queues[queue_idx] != NULL) {
1246 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1247 dev->data->tx_queues[queue_idx] = NULL;
1250 /* First allocate the tx queue data structure */
1251 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1257 * Allocate TX ring hardware descriptors. A memzone large enough to
1258 * handle the maximum ring size is allocated in order to allow for
1259 * resizing in later calls to the queue setup function.
1261 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1262 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1265 igb_tx_queue_release(txq);
1269 txq->nb_tx_desc = nb_desc;
1270 txq->pthresh = tx_conf->tx_thresh.pthresh;
1271 txq->hthresh = tx_conf->tx_thresh.hthresh;
1272 txq->wthresh = tx_conf->tx_thresh.wthresh;
1273 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1275 txq->queue_id = queue_idx;
1276 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1277 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1278 txq->port_id = dev->data->port_id;
1280 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1281 #ifndef RTE_LIBRTE_XEN_DOM0
1282 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1284 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1286 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1287 /* Allocate software ring */
1288 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1289 sizeof(struct igb_tx_entry) * nb_desc,
1291 if (txq->sw_ring == NULL) {
1292 igb_tx_queue_release(txq);
1295 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1296 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1298 igb_reset_tx_queue(txq, dev);
1299 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1300 dev->data->tx_queues[queue_idx] = txq;
1306 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1310 if (rxq->sw_ring != NULL) {
1311 for (i = 0; i < rxq->nb_rx_desc; i++) {
1312 if (rxq->sw_ring[i].mbuf != NULL) {
1313 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1314 rxq->sw_ring[i].mbuf = NULL;
1321 igb_rx_queue_release(struct igb_rx_queue *rxq)
1324 igb_rx_queue_release_mbufs(rxq);
1325 rte_free(rxq->sw_ring);
1331 eth_igb_rx_queue_release(void *rxq)
1333 igb_rx_queue_release(rxq);
1337 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1339 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1343 /* Zero out HW ring memory */
1344 for (i = 0; i < rxq->nb_rx_desc; i++) {
1345 rxq->rx_ring[i] = zeroed_desc;
1349 rxq->pkt_first_seg = NULL;
1350 rxq->pkt_last_seg = NULL;
1354 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1357 unsigned int socket_id,
1358 const struct rte_eth_rxconf *rx_conf,
1359 struct rte_mempool *mp)
1361 const struct rte_memzone *rz;
1362 struct igb_rx_queue *rxq;
1363 struct e1000_hw *hw;
1366 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1369 * Validate number of receive descriptors.
1370 * It must not exceed hardware maximum, and must be multiple
1373 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1374 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1378 /* Free memory prior to re-allocation if needed */
1379 if (dev->data->rx_queues[queue_idx] != NULL) {
1380 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1381 dev->data->rx_queues[queue_idx] = NULL;
1384 /* First allocate the RX queue data structure. */
1385 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1390 rxq->nb_rx_desc = nb_desc;
1391 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1392 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1393 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1394 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1396 rxq->drop_en = rx_conf->rx_drop_en;
1397 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1398 rxq->queue_id = queue_idx;
1399 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1400 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1401 rxq->port_id = dev->data->port_id;
1402 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1406 * Allocate RX ring hardware descriptors. A memzone large enough to
1407 * handle the maximum ring size is allocated in order to allow for
1408 * resizing in later calls to the queue setup function.
1410 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1411 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1413 igb_rx_queue_release(rxq);
1416 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1417 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1418 #ifndef RTE_LIBRTE_XEN_DOM0
1419 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1421 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1423 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1425 /* Allocate software ring. */
1426 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1427 sizeof(struct igb_rx_entry) * nb_desc,
1429 if (rxq->sw_ring == NULL) {
1430 igb_rx_queue_release(rxq);
1433 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1434 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1436 dev->data->rx_queues[queue_idx] = rxq;
1437 igb_reset_rx_queue(rxq);
1443 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1445 #define IGB_RXQ_SCAN_INTERVAL 4
1446 volatile union e1000_adv_rx_desc *rxdp;
1447 struct igb_rx_queue *rxq;
1450 if (rx_queue_id >= dev->data->nb_rx_queues) {
1451 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1455 rxq = dev->data->rx_queues[rx_queue_id];
1456 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1458 while ((desc < rxq->nb_rx_desc) &&
1459 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1460 desc += IGB_RXQ_SCAN_INTERVAL;
1461 rxdp += IGB_RXQ_SCAN_INTERVAL;
1462 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1463 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1464 desc - rxq->nb_rx_desc]);
1471 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1473 volatile union e1000_adv_rx_desc *rxdp;
1474 struct igb_rx_queue *rxq = rx_queue;
1477 if (unlikely(offset >= rxq->nb_rx_desc))
1479 desc = rxq->rx_tail + offset;
1480 if (desc >= rxq->nb_rx_desc)
1481 desc -= rxq->nb_rx_desc;
1483 rxdp = &rxq->rx_ring[desc];
1484 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1488 igb_dev_clear_queues(struct rte_eth_dev *dev)
1491 struct igb_tx_queue *txq;
1492 struct igb_rx_queue *rxq;
1494 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1495 txq = dev->data->tx_queues[i];
1497 igb_tx_queue_release_mbufs(txq);
1498 igb_reset_tx_queue(txq, dev);
1502 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1503 rxq = dev->data->rx_queues[i];
1505 igb_rx_queue_release_mbufs(rxq);
1506 igb_reset_rx_queue(rxq);
1512 * Receive Side Scaling (RSS).
1513 * See section 7.1.1.7 in the following document:
1514 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1517 * The source and destination IP addresses of the IP header and the source and
1518 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1519 * against a configurable random key to compute a 32-bit RSS hash result.
1520 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1521 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1522 * RSS output index which is used as the RX queue index where to store the
1524 * The following output is supplied in the RX write-back descriptor:
1525 * - 32-bit result of the Microsoft RSS hash function,
1526 * - 4-bit RSS type field.
1530 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1531 * Used as the default key.
1533 static uint8_t rss_intel_key[40] = {
1534 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1535 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1536 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1537 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1538 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1542 igb_rss_disable(struct rte_eth_dev *dev)
1544 struct e1000_hw *hw;
1547 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1549 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1550 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1554 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1562 hash_key = rss_conf->rss_key;
1563 if (hash_key != NULL) {
1564 /* Fill in RSS hash key */
1565 for (i = 0; i < 10; i++) {
1566 rss_key = hash_key[(i * 4)];
1567 rss_key |= hash_key[(i * 4) + 1] << 8;
1568 rss_key |= hash_key[(i * 4) + 2] << 16;
1569 rss_key |= hash_key[(i * 4) + 3] << 24;
1570 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1574 /* Set configured hashing protocols in MRQC register */
1575 rss_hf = rss_conf->rss_hf;
1576 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1577 if (rss_hf & ETH_RSS_IPV4)
1578 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1579 if (rss_hf & ETH_RSS_IPV4_TCP)
1580 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1581 if (rss_hf & ETH_RSS_IPV6)
1582 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1583 if (rss_hf & ETH_RSS_IPV6_EX)
1584 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1585 if (rss_hf & ETH_RSS_IPV6_TCP)
1586 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1587 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1588 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1589 if (rss_hf & ETH_RSS_IPV4_UDP)
1590 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1591 if (rss_hf & ETH_RSS_IPV6_UDP)
1592 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1593 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1594 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1595 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1599 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1600 struct rte_eth_rss_conf *rss_conf)
1602 struct e1000_hw *hw;
1606 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1609 * Before changing anything, first check that the update RSS operation
1610 * does not attempt to disable RSS, if RSS was enabled at
1611 * initialization time, or does not attempt to enable RSS, if RSS was
1612 * disabled at initialization time.
1614 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1615 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1616 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1617 if (rss_hf != 0) /* Enable RSS */
1619 return 0; /* Nothing to do */
1622 if (rss_hf == 0) /* Disable RSS */
1624 igb_hw_rss_hash_set(hw, rss_conf);
1628 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1629 struct rte_eth_rss_conf *rss_conf)
1631 struct e1000_hw *hw;
1638 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639 hash_key = rss_conf->rss_key;
1640 if (hash_key != NULL) {
1641 /* Return RSS hash key */
1642 for (i = 0; i < 10; i++) {
1643 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1644 hash_key[(i * 4)] = rss_key & 0x000000FF;
1645 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1646 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1647 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1651 /* Get RSS functions configured in MRQC register */
1652 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1653 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1654 rss_conf->rss_hf = 0;
1658 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1659 rss_hf |= ETH_RSS_IPV4;
1660 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1661 rss_hf |= ETH_RSS_IPV4_TCP;
1662 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1663 rss_hf |= ETH_RSS_IPV6;
1664 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1665 rss_hf |= ETH_RSS_IPV6_EX;
1666 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1667 rss_hf |= ETH_RSS_IPV6_TCP;
1668 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1669 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1670 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1671 rss_hf |= ETH_RSS_IPV4_UDP;
1672 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1673 rss_hf |= ETH_RSS_IPV6_UDP;
1674 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1675 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1676 rss_conf->rss_hf = rss_hf;
1681 igb_rss_configure(struct rte_eth_dev *dev)
1683 struct rte_eth_rss_conf rss_conf;
1684 struct e1000_hw *hw;
1688 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1690 /* Fill in redirection table. */
1691 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1692 for (i = 0; i < 128; i++) {
1699 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1700 i % dev->data->nb_rx_queues : 0);
1701 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1703 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1707 * Configure the RSS key and the RSS protocols used to compute
1708 * the RSS hash of input packets.
1710 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1711 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1712 igb_rss_disable(dev);
1715 if (rss_conf.rss_key == NULL)
1716 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1717 igb_hw_rss_hash_set(hw, &rss_conf);
1721 * Check if the mac type support VMDq or not.
1722 * Return 1 if it supports, otherwise, return 0.
1725 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1727 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1729 switch (hw->mac.type) {
1750 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1756 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1758 struct rte_eth_vmdq_rx_conf *cfg;
1759 struct e1000_hw *hw;
1760 uint32_t mrqc, vt_ctl, vmolr, rctl;
1763 PMD_INIT_LOG(DEBUG, ">>");
1764 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1767 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1768 if (igb_is_vmdq_supported(dev) == 0)
1771 igb_rss_disable(dev);
1773 /* RCTL: eanble VLAN filter */
1774 rctl = E1000_READ_REG(hw, E1000_RCTL);
1775 rctl |= E1000_RCTL_VFE;
1776 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1778 /* MRQC: enable vmdq */
1779 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1780 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1781 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1783 /* VTCTL: pool selection according to VLAN tag */
1784 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1785 if (cfg->enable_default_pool)
1786 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1787 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1788 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1791 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1792 * Both 82576 and 82580 support it
1794 if (hw->mac.type != e1000_i350) {
1795 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1796 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1797 vmolr |= E1000_VMOLR_STRVLAN;
1798 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1802 /* VFTA - enable all vlan filters */
1803 for (i = 0; i < IGB_VFTA_SIZE; i++)
1804 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1806 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1807 if (hw->mac.type != e1000_82580)
1808 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1811 * RAH/RAL - allow pools to read specific mac addresses
1812 * In this case, all pools should be able to read from mac addr 0
1814 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1815 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1817 /* VLVF: set up filters for vlan tags as configured */
1818 for (i = 0; i < cfg->nb_pool_maps; i++) {
1819 /* set vlan id in VF register and set the valid bit */
1820 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1821 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1822 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1823 E1000_VLVF_POOLSEL_MASK)));
1826 E1000_WRITE_FLUSH(hw);
1832 /*********************************************************************
1834 * Enable receive unit.
1836 **********************************************************************/
1839 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1841 struct igb_rx_entry *rxe = rxq->sw_ring;
1845 /* Initialize software ring entries. */
1846 for (i = 0; i < rxq->nb_rx_desc; i++) {
1847 volatile union e1000_adv_rx_desc *rxd;
1848 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1851 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1852 "queue_id=%hu\n", rxq->queue_id);
1856 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1857 rxd = &rxq->rx_ring[i];
1858 rxd->read.hdr_addr = dma_addr;
1859 rxd->read.pkt_addr = dma_addr;
1866 #define E1000_MRQC_DEF_Q_SHIFT (3)
1868 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1870 struct e1000_hw *hw =
1871 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1874 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1876 * SRIOV active scheme
1877 * FIXME if support RSS together with VMDq & SRIOV
1879 mrqc = E1000_MRQC_ENABLE_VMDQ;
1880 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1881 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1882 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1883 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1885 * SRIOV inactive scheme
1887 switch (dev->data->dev_conf.rxmode.mq_mode) {
1889 igb_rss_configure(dev);
1891 case ETH_MQ_RX_VMDQ_ONLY:
1892 /*Configure general VMDQ only RX parameters*/
1893 igb_vmdq_rx_hw_configure(dev);
1895 case ETH_MQ_RX_NONE:
1896 /* if mq_mode is none, disable rss mode.*/
1898 igb_rss_disable(dev);
1907 eth_igb_rx_init(struct rte_eth_dev *dev)
1909 struct e1000_hw *hw;
1910 struct igb_rx_queue *rxq;
1911 struct rte_pktmbuf_pool_private *mbp_priv;
1916 uint16_t rctl_bsize;
1920 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924 * Make sure receives are disabled while setting
1925 * up the descriptor ring.
1927 rctl = E1000_READ_REG(hw, E1000_RCTL);
1928 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1931 * Configure support of jumbo frames, if any.
1933 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1934 rctl |= E1000_RCTL_LPE;
1937 * Set maximum packet length by default, and might be updated
1938 * together with enabling/disabling dual VLAN.
1940 E1000_WRITE_REG(hw, E1000_RLPML,
1941 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1944 rctl &= ~E1000_RCTL_LPE;
1946 /* Configure and enable each RX queue. */
1948 dev->rx_pkt_burst = eth_igb_recv_pkts;
1949 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1953 rxq = dev->data->rx_queues[i];
1955 /* Allocate buffers for descriptor rings and set up queue */
1956 ret = igb_alloc_rx_queue_mbufs(rxq);
1961 * Reset crc_len in case it was changed after queue setup by a
1965 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1968 bus_addr = rxq->rx_ring_phys_addr;
1969 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1971 sizeof(union e1000_adv_rx_desc));
1972 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1973 (uint32_t)(bus_addr >> 32));
1974 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1976 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1979 * Configure RX buffer size.
1981 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1982 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1983 RTE_PKTMBUF_HEADROOM);
1984 if (buf_size >= 1024) {
1986 * Configure the BSIZEPACKET field of the SRRCTL
1987 * register of the queue.
1988 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1989 * If this field is equal to 0b, then RCTL.BSIZE
1990 * determines the RX packet buffer size.
1992 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1993 E1000_SRRCTL_BSIZEPKT_MASK);
1994 buf_size = (uint16_t) ((srrctl &
1995 E1000_SRRCTL_BSIZEPKT_MASK) <<
1996 E1000_SRRCTL_BSIZEPKT_SHIFT);
1998 /* It adds dual VLAN length for supporting dual VLAN */
1999 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2000 2 * VLAN_TAG_SIZE) > buf_size){
2001 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2002 dev->data->scattered_rx = 1;
2006 * Use BSIZE field of the device RCTL register.
2008 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2009 rctl_bsize = buf_size;
2010 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2011 dev->data->scattered_rx = 1;
2014 /* Set if packets are dropped when no descriptors available */
2016 srrctl |= E1000_SRRCTL_DROP_EN;
2018 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2020 /* Enable this RX queue. */
2021 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2022 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2023 rxdctl &= 0xFFF00000;
2024 rxdctl |= (rxq->pthresh & 0x1F);
2025 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2026 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2027 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2030 if (dev->data->dev_conf.rxmode.enable_scatter) {
2031 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2032 dev->data->scattered_rx = 1;
2036 * Setup BSIZE field of RCTL register, if needed.
2037 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2038 * register, since the code above configures the SRRCTL register of
2039 * the RX queue in such a case.
2040 * All configurable sizes are:
2041 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2042 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2043 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2044 * 2048: rctl |= E1000_RCTL_SZ_2048;
2045 * 1024: rctl |= E1000_RCTL_SZ_1024;
2046 * 512: rctl |= E1000_RCTL_SZ_512;
2047 * 256: rctl |= E1000_RCTL_SZ_256;
2049 if (rctl_bsize > 0) {
2050 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2051 rctl |= E1000_RCTL_SZ_512;
2052 else /* 256 <= buf_size < 512 - use 256 */
2053 rctl |= E1000_RCTL_SZ_256;
2057 * Configure RSS if device configured with multiple RX queues.
2059 igb_dev_mq_rx_configure(dev);
2061 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2062 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2065 * Setup the Checksum Register.
2066 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2068 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2069 rxcsum |= E1000_RXCSUM_PCSD;
2071 /* Enable both L3/L4 rx checksum offload */
2072 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2073 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2075 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2076 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2078 /* Setup the Receive Control Register. */
2079 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2080 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2082 /* set STRCRC bit in all queues */
2083 if (hw->mac.type == e1000_i350 ||
2084 hw->mac.type == e1000_i210 ||
2085 hw->mac.type == e1000_i211 ||
2086 hw->mac.type == e1000_i354) {
2087 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2088 rxq = dev->data->rx_queues[i];
2089 uint32_t dvmolr = E1000_READ_REG(hw,
2090 E1000_DVMOLR(rxq->reg_idx));
2091 dvmolr |= E1000_DVMOLR_STRCRC;
2092 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2096 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2098 /* clear STRCRC bit in all queues */
2099 if (hw->mac.type == e1000_i350 ||
2100 hw->mac.type == e1000_i210 ||
2101 hw->mac.type == e1000_i211 ||
2102 hw->mac.type == e1000_i354) {
2103 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2104 rxq = dev->data->rx_queues[i];
2105 uint32_t dvmolr = E1000_READ_REG(hw,
2106 E1000_DVMOLR(rxq->reg_idx));
2107 dvmolr &= ~E1000_DVMOLR_STRCRC;
2108 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2113 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2114 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2115 E1000_RCTL_RDMTS_HALF |
2116 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2118 /* Make sure VLAN Filters are off. */
2119 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2120 rctl &= ~E1000_RCTL_VFE;
2121 /* Don't store bad packets. */
2122 rctl &= ~E1000_RCTL_SBP;
2124 /* Enable Receives. */
2125 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2128 * Setup the HW Rx Head and Tail Descriptor Pointers.
2129 * This needs to be done after enable.
2131 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2132 rxq = dev->data->rx_queues[i];
2133 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2134 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2140 /*********************************************************************
2142 * Enable transmit unit.
2144 **********************************************************************/
2146 eth_igb_tx_init(struct rte_eth_dev *dev)
2148 struct e1000_hw *hw;
2149 struct igb_tx_queue *txq;
2154 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2156 /* Setup the Base and Length of the Tx Descriptor Rings. */
2157 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2159 txq = dev->data->tx_queues[i];
2160 bus_addr = txq->tx_ring_phys_addr;
2162 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2164 sizeof(union e1000_adv_tx_desc));
2165 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2166 (uint32_t)(bus_addr >> 32));
2167 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2169 /* Setup the HW Tx Head and Tail descriptor pointers. */
2170 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2171 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2173 /* Setup Transmit threshold registers. */
2174 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2175 txdctl |= txq->pthresh & 0x1F;
2176 txdctl |= ((txq->hthresh & 0x1F) << 8);
2177 txdctl |= ((txq->wthresh & 0x1F) << 16);
2178 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2179 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2182 /* Program the Transmit Control Register. */
2183 tctl = E1000_READ_REG(hw, E1000_TCTL);
2184 tctl &= ~E1000_TCTL_CT;
2185 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2186 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2188 e1000_config_collision_dist(hw);
2190 /* This write will effectively turn on the transmit unit. */
2191 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2194 /*********************************************************************
2196 * Enable VF receive unit.
2198 **********************************************************************/
2200 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2202 struct e1000_hw *hw;
2203 struct igb_rx_queue *rxq;
2204 struct rte_pktmbuf_pool_private *mbp_priv;
2207 uint16_t rctl_bsize;
2211 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2214 e1000_rlpml_set_vf(hw,
2215 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2218 /* Configure and enable each RX queue. */
2220 dev->rx_pkt_burst = eth_igb_recv_pkts;
2221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2225 rxq = dev->data->rx_queues[i];
2227 /* Allocate buffers for descriptor rings and set up queue */
2228 ret = igb_alloc_rx_queue_mbufs(rxq);
2232 bus_addr = rxq->rx_ring_phys_addr;
2233 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2235 sizeof(union e1000_adv_rx_desc));
2236 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2237 (uint32_t)(bus_addr >> 32));
2238 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2240 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2243 * Configure RX buffer size.
2245 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2246 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2247 RTE_PKTMBUF_HEADROOM);
2248 if (buf_size >= 1024) {
2250 * Configure the BSIZEPACKET field of the SRRCTL
2251 * register of the queue.
2252 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2253 * If this field is equal to 0b, then RCTL.BSIZE
2254 * determines the RX packet buffer size.
2256 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2257 E1000_SRRCTL_BSIZEPKT_MASK);
2258 buf_size = (uint16_t) ((srrctl &
2259 E1000_SRRCTL_BSIZEPKT_MASK) <<
2260 E1000_SRRCTL_BSIZEPKT_SHIFT);
2262 /* It adds dual VLAN length for supporting dual VLAN */
2263 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2264 2 * VLAN_TAG_SIZE) > buf_size){
2265 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2266 dev->data->scattered_rx = 1;
2270 * Use BSIZE field of the device RCTL register.
2272 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2273 rctl_bsize = buf_size;
2274 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2275 dev->data->scattered_rx = 1;
2278 /* Set if packets are dropped when no descriptors available */
2280 srrctl |= E1000_SRRCTL_DROP_EN;
2282 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2284 /* Enable this RX queue. */
2285 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2286 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2287 rxdctl &= 0xFFF00000;
2288 rxdctl |= (rxq->pthresh & 0x1F);
2289 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2290 if (hw->mac.type == e1000_vfadapt) {
2292 * Workaround of 82576 VF Erratum
2293 * force set WTHRESH to 1
2294 * to avoid Write-Back not triggered sometimes
2297 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2300 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2301 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2304 if (dev->data->dev_conf.rxmode.enable_scatter) {
2305 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2306 dev->data->scattered_rx = 1;
2310 * Setup the HW Rx Head and Tail Descriptor Pointers.
2311 * This needs to be done after enable.
2313 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2314 rxq = dev->data->rx_queues[i];
2315 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2316 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2322 /*********************************************************************
2324 * Enable VF transmit unit.
2326 **********************************************************************/
2328 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2330 struct e1000_hw *hw;
2331 struct igb_tx_queue *txq;
2335 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2337 /* Setup the Base and Length of the Tx Descriptor Rings. */
2338 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2341 txq = dev->data->tx_queues[i];
2342 bus_addr = txq->tx_ring_phys_addr;
2343 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2345 sizeof(union e1000_adv_tx_desc));
2346 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2347 (uint32_t)(bus_addr >> 32));
2348 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2350 /* Setup the HW Tx Head and Tail descriptor pointers. */
2351 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2352 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2354 /* Setup Transmit threshold registers. */
2355 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2356 txdctl |= txq->pthresh & 0x1F;
2357 txdctl |= ((txq->hthresh & 0x1F) << 8);
2358 if (hw->mac.type == e1000_82576) {
2360 * Workaround of 82576 VF Erratum
2361 * force set WTHRESH to 1
2362 * to avoid Write-Back not triggered sometimes
2365 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2368 txdctl |= ((txq->wthresh & 0x1F) << 16);
2369 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2370 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);