4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "e1000/e1000_api.h"
74 #include "e1000_ethdev.h"
76 static inline struct rte_mbuf *
77 rte_rxmbuf_alloc(struct rte_mempool *mp)
81 m = __rte_mbuf_raw_alloc(mp);
82 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
86 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
87 (uint64_t) ((mb)->buf_physaddr + \
88 (uint64_t) ((char *)((mb)->pkt.data) - \
89 (char *)(mb)->buf_addr))
91 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
92 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
95 * Structure associated with each descriptor of the RX ring of a RX queue.
98 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
102 * Structure associated with each descriptor of the TX ring of a TX queue.
104 struct igb_tx_entry {
105 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
106 uint16_t next_id; /**< Index of next descriptor in ring. */
107 uint16_t last_id; /**< Index of last scattered descriptor. */
111 * Structure associated with each RX queue.
113 struct igb_rx_queue {
114 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
115 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
116 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
117 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
118 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
119 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
120 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
121 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
122 uint16_t nb_rx_desc; /**< number of RX descriptors. */
123 uint16_t rx_tail; /**< current value of RDT register. */
124 uint16_t nb_rx_hold; /**< number of held free RX desc. */
125 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
126 uint16_t queue_id; /**< RX queue index. */
127 uint16_t reg_idx; /**< RX queue register index. */
128 uint8_t port_id; /**< Device port identifier. */
129 uint8_t pthresh; /**< Prefetch threshold register. */
130 uint8_t hthresh; /**< Host threshold register. */
131 uint8_t wthresh; /**< Write-back threshold register. */
132 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
133 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
137 * Hardware context number
139 enum igb_advctx_num {
140 IGB_CTX_0 = 0, /**< CTX0 */
141 IGB_CTX_1 = 1, /**< CTX1 */
142 IGB_CTX_NUM = 2, /**< CTX_NUM */
146 * Strucutre to check if new context need be built
148 struct igb_advctx_info {
149 uint16_t flags; /**< ol_flags related to context build. */
150 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
151 union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
155 * Structure associated with each TX queue.
157 struct igb_tx_queue {
158 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
159 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
160 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
161 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
162 uint32_t txd_type; /**< Device-specific TXD type */
163 uint16_t nb_tx_desc; /**< number of TX descriptors. */
164 uint16_t tx_tail; /**< Current value of TDT register. */
166 /**< Index of first used TX descriptor. */
167 uint16_t queue_id; /**< TX queue index. */
168 uint16_t reg_idx; /**< TX queue register index. */
169 uint8_t port_id; /**< Device port identifier. */
170 uint8_t pthresh; /**< Prefetch threshold register. */
171 uint8_t hthresh; /**< Host threshold register. */
172 uint8_t wthresh; /**< Write-back threshold register. */
174 /**< Current used hardware descriptor. */
176 /**< Start context position for transmit queue. */
177 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
178 /**< Hardware context history.*/
182 #define RTE_PMD_USE_PREFETCH
185 #ifdef RTE_PMD_USE_PREFETCH
186 #define rte_igb_prefetch(p) rte_prefetch0(p)
188 #define rte_igb_prefetch(p) do {} while(0)
191 #ifdef RTE_PMD_PACKET_PREFETCH
192 #define rte_packet_prefetch(p) rte_prefetch1(p)
194 #define rte_packet_prefetch(p) do {} while(0)
198 * Macro for VMDq feature for 1 GbE NIC.
200 #define E1000_VMOLR_SIZE (8)
202 /*********************************************************************
206 **********************************************************************/
209 * Advanced context descriptor are almost same between igb/ixgbe
210 * This is a separate function, looking for optimization opportunity here
211 * Rework required to go with the pre-defined values.
215 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
216 volatile struct e1000_adv_tx_context_desc *ctx_txd,
217 uint16_t ol_flags, uint32_t vlan_macip_lens)
219 uint32_t type_tucmd_mlhl;
220 uint32_t mss_l4len_idx;
221 uint32_t ctx_idx, ctx_curr;
224 ctx_curr = txq->ctx_curr;
225 ctx_idx = ctx_curr + txq->ctx_start;
230 if (ol_flags & PKT_TX_VLAN_PKT) {
231 cmp_mask |= TX_VLAN_CMP_MASK;
234 if (ol_flags & PKT_TX_IP_CKSUM) {
235 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
236 cmp_mask |= TX_MAC_LEN_CMP_MASK;
239 /* Specify which HW CTX to upload. */
240 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
241 switch (ol_flags & PKT_TX_L4_MASK) {
242 case PKT_TX_UDP_CKSUM:
243 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
244 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
245 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
246 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
248 case PKT_TX_TCP_CKSUM:
249 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
250 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
251 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
252 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
254 case PKT_TX_SCTP_CKSUM:
255 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
256 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
257 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
258 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
261 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
262 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
266 txq->ctx_cache[ctx_curr].flags = ol_flags;
267 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
268 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
269 vlan_macip_lens & cmp_mask;
271 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
272 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
273 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
274 ctx_txd->seqnum_seed = 0;
278 * Check which hardware context can be used. Use the existing match
279 * or create a new context descriptor.
281 static inline uint32_t
282 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
283 uint32_t vlan_macip_lens)
285 /* If match with the current context */
286 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
287 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
288 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
289 return txq->ctx_curr;
292 /* If match with the second context */
294 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
295 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
296 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
297 return txq->ctx_curr;
300 /* Mismatch, use the previous context */
301 return (IGB_CTX_NUM);
304 static inline uint32_t
305 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
307 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
308 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
311 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
312 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
316 static inline uint32_t
317 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
319 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
320 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
324 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
327 struct igb_tx_queue *txq;
328 struct igb_tx_entry *sw_ring;
329 struct igb_tx_entry *txe, *txn;
330 volatile union e1000_adv_tx_desc *txr;
331 volatile union e1000_adv_tx_desc *txd;
332 struct rte_mbuf *tx_pkt;
333 struct rte_mbuf *m_seg;
334 uint64_t buf_dma_addr;
335 uint32_t olinfo_status;
336 uint32_t cmd_type_len;
345 uint32_t new_ctx = 0;
347 uint32_t vlan_macip_lens;
350 sw_ring = txq->sw_ring;
352 tx_id = txq->tx_tail;
353 txe = &sw_ring[tx_id];
355 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
357 pkt_len = tx_pkt->pkt.pkt_len;
359 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
362 * The number of descriptors that must be allocated for a
363 * packet is the number of segments of that packet, plus 1
364 * Context Descriptor for the VLAN Tag Identifier, if any.
365 * Determine the last TX descriptor to allocate in the TX ring
366 * for the packet, starting from the current position (tx_id)
369 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
371 ol_flags = tx_pkt->ol_flags;
372 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
373 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
375 /* If a Context Descriptor need be built . */
377 ctx = what_advctx_update(txq, tx_ol_req,
379 /* Only allocate context descriptor if required*/
380 new_ctx = (ctx == IGB_CTX_NUM);
382 tx_last = (uint16_t) (tx_last + new_ctx);
384 if (tx_last >= txq->nb_tx_desc)
385 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
387 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
388 " tx_first=%u tx_last=%u\n",
389 (unsigned) txq->port_id,
390 (unsigned) txq->queue_id,
396 * Check if there are enough free descriptors in the TX ring
397 * to transmit the next packet.
398 * This operation is based on the two following rules:
400 * 1- Only check that the last needed TX descriptor can be
401 * allocated (by construction, if that descriptor is free,
402 * all intermediate ones are also free).
404 * For this purpose, the index of the last TX descriptor
405 * used for a packet (the "last descriptor" of a packet)
406 * is recorded in the TX entries (the last one included)
407 * that are associated with all TX descriptors allocated
410 * 2- Avoid to allocate the last free TX descriptor of the
411 * ring, in order to never set the TDT register with the
412 * same value stored in parallel by the NIC in the TDH
413 * register, which makes the TX engine of the NIC enter
414 * in a deadlock situation.
416 * By extension, avoid to allocate a free descriptor that
417 * belongs to the last set of free descriptors allocated
418 * to the same packet previously transmitted.
422 * The "last descriptor" of the previously sent packet, if any,
423 * which used the last descriptor to allocate.
425 tx_end = sw_ring[tx_last].last_id;
428 * The next descriptor following that "last descriptor" in the
431 tx_end = sw_ring[tx_end].next_id;
434 * The "last descriptor" associated with that next descriptor.
436 tx_end = sw_ring[tx_end].last_id;
439 * Check that this descriptor is free.
441 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
448 * Set common flags of all TX Data Descriptors.
450 * The following bits must be set in all Data Descriptors:
451 * - E1000_ADVTXD_DTYP_DATA
452 * - E1000_ADVTXD_DCMD_DEXT
454 * The following bits must be set in the first Data Descriptor
455 * and are ignored in the other ones:
456 * - E1000_ADVTXD_DCMD_IFCS
457 * - E1000_ADVTXD_MAC_1588
458 * - E1000_ADVTXD_DCMD_VLE
460 * The following bits must only be set in the last Data
462 * - E1000_TXD_CMD_EOP
464 * The following bits can be set in any Data Descriptor, but
465 * are only set in the last Data Descriptor:
468 cmd_type_len = txq->txd_type |
469 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
470 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
471 #if defined(RTE_LIBRTE_IEEE1588)
472 if (ol_flags & PKT_TX_IEEE1588_TMST)
473 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
476 /* Setup TX Advanced context descriptor if required */
478 volatile struct e1000_adv_tx_context_desc *
481 ctx_txd = (volatile struct
482 e1000_adv_tx_context_desc *)
485 txn = &sw_ring[txe->next_id];
486 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
488 if (txe->mbuf != NULL) {
489 rte_pktmbuf_free_seg(txe->mbuf);
493 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
496 txe->last_id = tx_last;
497 tx_id = txe->next_id;
501 /* Setup the TX Advanced Data Descriptor */
502 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
503 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
504 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
509 txn = &sw_ring[txe->next_id];
512 if (txe->mbuf != NULL)
513 rte_pktmbuf_free_seg(txe->mbuf);
517 * Set up transmit descriptor.
519 slen = (uint16_t) m_seg->pkt.data_len;
520 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
521 txd->read.buffer_addr =
522 rte_cpu_to_le_64(buf_dma_addr);
523 txd->read.cmd_type_len =
524 rte_cpu_to_le_32(cmd_type_len | slen);
525 txd->read.olinfo_status =
526 rte_cpu_to_le_32(olinfo_status);
527 txe->last_id = tx_last;
528 tx_id = txe->next_id;
530 m_seg = m_seg->pkt.next;
531 } while (m_seg != NULL);
534 * The last packet data descriptor needs End Of Packet (EOP)
535 * and Report Status (RS).
537 txd->read.cmd_type_len |=
538 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
544 * Set the Transmit Descriptor Tail (TDT).
546 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
547 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
548 (unsigned) txq->port_id, (unsigned) txq->queue_id,
549 (unsigned) tx_id, (unsigned) nb_tx);
550 txq->tx_tail = tx_id;
555 /*********************************************************************
559 **********************************************************************/
560 static inline uint16_t
561 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
565 static uint16_t ip_pkt_types_map[16] = {
566 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
567 PKT_RX_IPV6_HDR, 0, 0, 0,
568 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
569 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
572 #if defined(RTE_LIBRTE_IEEE1588)
573 static uint32_t ip_pkt_etqf_map[8] = {
574 0, 0, 0, PKT_RX_IEEE1588_PTP,
578 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
579 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
580 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
582 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
583 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
585 return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
586 0 : PKT_RX_RSS_HASH));
589 static inline uint16_t
590 rx_desc_status_to_pkt_flags(uint32_t rx_status)
594 /* Check if VLAN present */
595 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
596 PKT_RX_VLAN_PKT : 0);
598 #if defined(RTE_LIBRTE_IEEE1588)
599 if (rx_status & E1000_RXD_STAT_TMST)
600 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
605 static inline uint16_t
606 rx_desc_error_to_pkt_flags(uint32_t rx_status)
609 * Bit 30: IPE, IPv4 checksum error
610 * Bit 29: L4I, L4I integrity error
613 static uint16_t error_to_pkt_flags_map[4] = {
614 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
615 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
617 return error_to_pkt_flags_map[(rx_status >>
618 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
622 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
625 struct igb_rx_queue *rxq;
626 volatile union e1000_adv_rx_desc *rx_ring;
627 volatile union e1000_adv_rx_desc *rxdp;
628 struct igb_rx_entry *sw_ring;
629 struct igb_rx_entry *rxe;
630 struct rte_mbuf *rxm;
631 struct rte_mbuf *nmb;
632 union e1000_adv_rx_desc rxd;
635 uint32_t hlen_type_rss;
645 rx_id = rxq->rx_tail;
646 rx_ring = rxq->rx_ring;
647 sw_ring = rxq->sw_ring;
648 while (nb_rx < nb_pkts) {
650 * The order of operations here is important as the DD status
651 * bit must not be read after any other descriptor fields.
652 * rx_ring and rxdp are pointing to volatile data so the order
653 * of accesses cannot be reordered by the compiler. If they were
654 * not volatile, they could be reordered which could lead to
655 * using invalid descriptor fields when read from rxd.
657 rxdp = &rx_ring[rx_id];
658 staterr = rxdp->wb.upper.status_error;
659 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
666 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
667 * likely to be invalid and to be dropped by the various
668 * validation checks performed by the network stack.
670 * Allocate a new mbuf to replenish the RX ring descriptor.
671 * If the allocation fails:
672 * - arrange for that RX descriptor to be the first one
673 * being parsed the next time the receive function is
674 * invoked [on the same queue].
676 * - Stop parsing the RX ring and return immediately.
678 * This policy do not drop the packet received in the RX
679 * descriptor for which the allocation of a new mbuf failed.
680 * Thus, it allows that packet to be later retrieved if
681 * mbuf have been freed in the mean time.
682 * As a side effect, holding RX descriptors instead of
683 * systematically giving them back to the NIC may lead to
684 * RX ring exhaustion situations.
685 * However, the NIC can gracefully prevent such situations
686 * to happen by sending specific "back-pressure" flow control
687 * frames to its peer(s).
689 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
690 "staterr=0x%x pkt_len=%u\n",
691 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
692 (unsigned) rx_id, (unsigned) staterr,
693 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
695 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
697 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
698 "queue_id=%u\n", (unsigned) rxq->port_id,
699 (unsigned) rxq->queue_id);
700 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
705 rxe = &sw_ring[rx_id];
707 if (rx_id == rxq->nb_rx_desc)
710 /* Prefetch next mbuf while processing current one. */
711 rte_igb_prefetch(sw_ring[rx_id].mbuf);
714 * When next RX descriptor is on a cache-line boundary,
715 * prefetch the next 4 RX descriptors and the next 8 pointers
718 if ((rx_id & 0x3) == 0) {
719 rte_igb_prefetch(&rx_ring[rx_id]);
720 rte_igb_prefetch(&sw_ring[rx_id]);
726 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
727 rxdp->read.hdr_addr = dma_addr;
728 rxdp->read.pkt_addr = dma_addr;
731 * Initialize the returned mbuf.
732 * 1) setup generic mbuf fields:
733 * - number of segments,
736 * - RX port identifier.
737 * 2) integrate hardware offload data, if any:
739 * - IP checksum flag,
740 * - VLAN TCI, if any,
743 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
745 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
746 rte_packet_prefetch(rxm->pkt.data);
747 rxm->pkt.nb_segs = 1;
748 rxm->pkt.next = NULL;
749 rxm->pkt.pkt_len = pkt_len;
750 rxm->pkt.data_len = pkt_len;
751 rxm->pkt.in_port = rxq->port_id;
753 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
754 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
755 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
756 rxm->pkt.vlan_macip.f.vlan_tci =
757 rte_le_to_cpu_16(rxd.wb.upper.vlan);
759 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
760 pkt_flags = (uint16_t)(pkt_flags |
761 rx_desc_status_to_pkt_flags(staterr));
762 pkt_flags = (uint16_t)(pkt_flags |
763 rx_desc_error_to_pkt_flags(staterr));
764 rxm->ol_flags = pkt_flags;
767 * Store the mbuf address into the next entry of the array
768 * of returned packets.
770 rx_pkts[nb_rx++] = rxm;
772 rxq->rx_tail = rx_id;
775 * If the number of free RX descriptors is greater than the RX free
776 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
778 * Update the RDT with the value of the last processed RX descriptor
779 * minus 1, to guarantee that the RDT register is never equal to the
780 * RDH register, which creates a "full" ring situtation from the
781 * hardware point of view...
783 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
784 if (nb_hold > rxq->rx_free_thresh) {
785 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
786 "nb_hold=%u nb_rx=%u\n",
787 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
788 (unsigned) rx_id, (unsigned) nb_hold,
790 rx_id = (uint16_t) ((rx_id == 0) ?
791 (rxq->nb_rx_desc - 1) : (rx_id - 1));
792 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
795 rxq->nb_rx_hold = nb_hold;
800 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
803 struct igb_rx_queue *rxq;
804 volatile union e1000_adv_rx_desc *rx_ring;
805 volatile union e1000_adv_rx_desc *rxdp;
806 struct igb_rx_entry *sw_ring;
807 struct igb_rx_entry *rxe;
808 struct rte_mbuf *first_seg;
809 struct rte_mbuf *last_seg;
810 struct rte_mbuf *rxm;
811 struct rte_mbuf *nmb;
812 union e1000_adv_rx_desc rxd;
813 uint64_t dma; /* Physical address of mbuf data buffer */
815 uint32_t hlen_type_rss;
825 rx_id = rxq->rx_tail;
826 rx_ring = rxq->rx_ring;
827 sw_ring = rxq->sw_ring;
830 * Retrieve RX context of current packet, if any.
832 first_seg = rxq->pkt_first_seg;
833 last_seg = rxq->pkt_last_seg;
835 while (nb_rx < nb_pkts) {
838 * The order of operations here is important as the DD status
839 * bit must not be read after any other descriptor fields.
840 * rx_ring and rxdp are pointing to volatile data so the order
841 * of accesses cannot be reordered by the compiler. If they were
842 * not volatile, they could be reordered which could lead to
843 * using invalid descriptor fields when read from rxd.
845 rxdp = &rx_ring[rx_id];
846 staterr = rxdp->wb.upper.status_error;
847 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
854 * Allocate a new mbuf to replenish the RX ring descriptor.
855 * If the allocation fails:
856 * - arrange for that RX descriptor to be the first one
857 * being parsed the next time the receive function is
858 * invoked [on the same queue].
860 * - Stop parsing the RX ring and return immediately.
862 * This policy does not drop the packet received in the RX
863 * descriptor for which the allocation of a new mbuf failed.
864 * Thus, it allows that packet to be later retrieved if
865 * mbuf have been freed in the mean time.
866 * As a side effect, holding RX descriptors instead of
867 * systematically giving them back to the NIC may lead to
868 * RX ring exhaustion situations.
869 * However, the NIC can gracefully prevent such situations
870 * to happen by sending specific "back-pressure" flow control
871 * frames to its peer(s).
873 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
874 "staterr=0x%x data_len=%u\n",
875 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876 (unsigned) rx_id, (unsigned) staterr,
877 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
879 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
881 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882 "queue_id=%u\n", (unsigned) rxq->port_id,
883 (unsigned) rxq->queue_id);
884 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
889 rxe = &sw_ring[rx_id];
891 if (rx_id == rxq->nb_rx_desc)
894 /* Prefetch next mbuf while processing current one. */
895 rte_igb_prefetch(sw_ring[rx_id].mbuf);
898 * When next RX descriptor is on a cache-line boundary,
899 * prefetch the next 4 RX descriptors and the next 8 pointers
902 if ((rx_id & 0x3) == 0) {
903 rte_igb_prefetch(&rx_ring[rx_id]);
904 rte_igb_prefetch(&sw_ring[rx_id]);
908 * Update RX descriptor with the physical address of the new
909 * data buffer of the new allocated mbuf.
913 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
914 rxdp->read.pkt_addr = dma;
915 rxdp->read.hdr_addr = dma;
918 * Set data length & data buffer address of mbuf.
920 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
921 rxm->pkt.data_len = data_len;
922 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
925 * If this is the first buffer of the received packet,
926 * set the pointer to the first mbuf of the packet and
927 * initialize its context.
928 * Otherwise, update the total length and the number of segments
929 * of the current scattered packet, and update the pointer to
930 * the last mbuf of the current packet.
932 if (first_seg == NULL) {
934 first_seg->pkt.pkt_len = data_len;
935 first_seg->pkt.nb_segs = 1;
937 first_seg->pkt.pkt_len += data_len;
938 first_seg->pkt.nb_segs++;
939 last_seg->pkt.next = rxm;
943 * If this is not the last buffer of the received packet,
944 * update the pointer to the last mbuf of the current scattered
945 * packet and continue to parse the RX ring.
947 if (! (staterr & E1000_RXD_STAT_EOP)) {
953 * This is the last buffer of the received packet.
954 * If the CRC is not stripped by the hardware:
955 * - Subtract the CRC length from the total packet length.
956 * - If the last buffer only contains the whole CRC or a part
957 * of it, free the mbuf associated to the last buffer.
958 * If part of the CRC is also contained in the previous
959 * mbuf, subtract the length of that CRC part from the
960 * data length of the previous mbuf.
962 rxm->pkt.next = NULL;
963 if (unlikely(rxq->crc_len > 0)) {
964 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
965 if (data_len <= ETHER_CRC_LEN) {
966 rte_pktmbuf_free_seg(rxm);
967 first_seg->pkt.nb_segs--;
968 last_seg->pkt.data_len = (uint16_t)
969 (last_seg->pkt.data_len -
970 (ETHER_CRC_LEN - data_len));
971 last_seg->pkt.next = NULL;
974 (uint16_t) (data_len - ETHER_CRC_LEN);
978 * Initialize the first mbuf of the returned packet:
979 * - RX port identifier,
980 * - hardware offload data, if any:
982 * - IP checksum flag,
983 * - VLAN TCI, if any,
986 first_seg->pkt.in_port = rxq->port_id;
987 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
990 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
991 * set in the pkt_flags field.
993 first_seg->pkt.vlan_macip.f.vlan_tci =
994 rte_le_to_cpu_16(rxd.wb.upper.vlan);
995 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
996 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
997 pkt_flags = (uint16_t)(pkt_flags |
998 rx_desc_status_to_pkt_flags(staterr));
999 pkt_flags = (uint16_t)(pkt_flags |
1000 rx_desc_error_to_pkt_flags(staterr));
1001 first_seg->ol_flags = pkt_flags;
1003 /* Prefetch data of first segment, if configured to do so. */
1004 rte_packet_prefetch(first_seg->pkt.data);
1007 * Store the mbuf address into the next entry of the array
1008 * of returned packets.
1010 rx_pkts[nb_rx++] = first_seg;
1013 * Setup receipt context for a new packet.
1019 * Record index of the next RX descriptor to probe.
1021 rxq->rx_tail = rx_id;
1024 * Save receive context.
1026 rxq->pkt_first_seg = first_seg;
1027 rxq->pkt_last_seg = last_seg;
1030 * If the number of free RX descriptors is greater than the RX free
1031 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1033 * Update the RDT with the value of the last processed RX descriptor
1034 * minus 1, to guarantee that the RDT register is never equal to the
1035 * RDH register, which creates a "full" ring situtation from the
1036 * hardware point of view...
1038 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1039 if (nb_hold > rxq->rx_free_thresh) {
1040 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1041 "nb_hold=%u nb_rx=%u\n",
1042 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1043 (unsigned) rx_id, (unsigned) nb_hold,
1045 rx_id = (uint16_t) ((rx_id == 0) ?
1046 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1047 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1050 rxq->nb_rx_hold = nb_hold;
1055 * Rings setup and release.
1057 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1058 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1059 * This will also optimize cache line size effect.
1060 * H/W supports up to cache line size 128.
1062 #define IGB_ALIGN 128
1065 * Maximum number of Ring Descriptors.
1067 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1068 * desscriptors should meet the following condition:
1069 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1071 #define IGB_MIN_RING_DESC 32
1072 #define IGB_MAX_RING_DESC 4096
1074 static const struct rte_memzone *
1075 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1076 uint16_t queue_id, uint32_t ring_size, int socket_id)
1078 char z_name[RTE_MEMZONE_NAMESIZE];
1079 const struct rte_memzone *mz;
1081 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1082 dev->driver->pci_drv.name, ring_name,
1083 dev->data->port_id, queue_id);
1084 mz = rte_memzone_lookup(z_name);
1088 #ifdef RTE_LIBRTE_XEN_DOM0
1089 return rte_memzone_reserve_bounded(z_name, ring_size,
1090 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1092 return rte_memzone_reserve_aligned(z_name, ring_size,
1093 socket_id, 0, IGB_ALIGN);
1098 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1102 if (txq->sw_ring != NULL) {
1103 for (i = 0; i < txq->nb_tx_desc; i++) {
1104 if (txq->sw_ring[i].mbuf != NULL) {
1105 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1106 txq->sw_ring[i].mbuf = NULL;
1113 igb_tx_queue_release(struct igb_tx_queue *txq)
1116 igb_tx_queue_release_mbufs(txq);
1117 rte_free(txq->sw_ring);
1123 eth_igb_tx_queue_release(void *txq)
1125 igb_tx_queue_release(txq);
1129 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1134 memset((void*)&txq->ctx_cache, 0,
1135 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1139 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1141 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1143 struct igb_tx_entry *txe = txq->sw_ring;
1145 struct e1000_hw *hw;
1147 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148 /* Zero out HW ring memory */
1149 for (i = 0; i < txq->nb_tx_desc; i++) {
1150 txq->tx_ring[i] = zeroed_desc;
1153 /* Initialize ring entries */
1154 prev = (uint16_t)(txq->nb_tx_desc - 1);
1155 for (i = 0; i < txq->nb_tx_desc; i++) {
1156 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1158 txd->wb.status = E1000_TXD_STAT_DD;
1161 txe[prev].next_id = i;
1165 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1166 /* 82575 specific, each tx queue will use 2 hw contexts */
1167 if (hw->mac.type == e1000_82575)
1168 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1170 igb_reset_tx_queue_stat(txq);
1174 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1177 unsigned int socket_id,
1178 const struct rte_eth_txconf *tx_conf)
1180 const struct rte_memzone *tz;
1181 struct igb_tx_queue *txq;
1182 struct e1000_hw *hw;
1185 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1188 * Validate number of transmit descriptors.
1189 * It must not exceed hardware maximum, and must be multiple
1192 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1193 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1198 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1201 if (tx_conf->tx_free_thresh != 0)
1202 RTE_LOG(WARNING, PMD,
1203 "The tx_free_thresh parameter is not "
1204 "used for the 1G driver.\n");
1205 if (tx_conf->tx_rs_thresh != 0)
1206 RTE_LOG(WARNING, PMD,
1207 "The tx_rs_thresh parameter is not "
1208 "used for the 1G driver.\n");
1209 if (tx_conf->tx_thresh.wthresh == 0)
1210 RTE_LOG(WARNING, PMD,
1211 "To improve 1G driver performance, consider setting "
1212 "the TX WTHRESH value to 4, 8, or 16.\n");
1214 /* Free memory prior to re-allocation if needed */
1215 if (dev->data->tx_queues[queue_idx] != NULL)
1216 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1218 /* First allocate the tx queue data structure */
1219 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1225 * Allocate TX ring hardware descriptors. A memzone large enough to
1226 * handle the maximum ring size is allocated in order to allow for
1227 * resizing in later calls to the queue setup function.
1229 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1230 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1233 igb_tx_queue_release(txq);
1237 txq->nb_tx_desc = nb_desc;
1238 txq->pthresh = tx_conf->tx_thresh.pthresh;
1239 txq->hthresh = tx_conf->tx_thresh.hthresh;
1240 txq->wthresh = tx_conf->tx_thresh.wthresh;
1241 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1243 txq->queue_id = queue_idx;
1244 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1245 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1246 txq->port_id = dev->data->port_id;
1248 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1249 #ifndef RTE_LIBRTE_XEN_DOM0
1250 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1252 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1254 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1255 /* Allocate software ring */
1256 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1257 sizeof(struct igb_tx_entry) * nb_desc,
1259 if (txq->sw_ring == NULL) {
1260 igb_tx_queue_release(txq);
1263 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1264 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1266 igb_reset_tx_queue(txq, dev);
1267 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1268 dev->data->tx_queues[queue_idx] = txq;
1274 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1278 if (rxq->sw_ring != NULL) {
1279 for (i = 0; i < rxq->nb_rx_desc; i++) {
1280 if (rxq->sw_ring[i].mbuf != NULL) {
1281 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1282 rxq->sw_ring[i].mbuf = NULL;
1289 igb_rx_queue_release(struct igb_rx_queue *rxq)
1292 igb_rx_queue_release_mbufs(rxq);
1293 rte_free(rxq->sw_ring);
1299 eth_igb_rx_queue_release(void *rxq)
1301 igb_rx_queue_release(rxq);
1305 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1307 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1311 /* Zero out HW ring memory */
1312 for (i = 0; i < rxq->nb_rx_desc; i++) {
1313 rxq->rx_ring[i] = zeroed_desc;
1317 rxq->pkt_first_seg = NULL;
1318 rxq->pkt_last_seg = NULL;
1322 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1325 unsigned int socket_id,
1326 const struct rte_eth_rxconf *rx_conf,
1327 struct rte_mempool *mp)
1329 const struct rte_memzone *rz;
1330 struct igb_rx_queue *rxq;
1331 struct e1000_hw *hw;
1334 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 * Validate number of receive descriptors.
1338 * It must not exceed hardware maximum, and must be multiple
1341 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1342 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1346 /* Free memory prior to re-allocation if needed */
1347 if (dev->data->rx_queues[queue_idx] != NULL) {
1348 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1349 dev->data->rx_queues[queue_idx] = NULL;
1352 /* First allocate the RX queue data structure. */
1353 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1358 rxq->nb_rx_desc = nb_desc;
1359 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1360 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1361 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1362 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1364 rxq->drop_en = rx_conf->rx_drop_en;
1365 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1366 rxq->queue_id = queue_idx;
1367 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1368 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1369 rxq->port_id = dev->data->port_id;
1370 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1374 * Allocate RX ring hardware descriptors. A memzone large enough to
1375 * handle the maximum ring size is allocated in order to allow for
1376 * resizing in later calls to the queue setup function.
1378 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1379 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1381 igb_rx_queue_release(rxq);
1384 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1385 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1386 #ifndef RTE_LIBRTE_XEN_DOM0
1387 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1389 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1391 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1393 /* Allocate software ring. */
1394 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1395 sizeof(struct igb_rx_entry) * nb_desc,
1397 if (rxq->sw_ring == NULL) {
1398 igb_rx_queue_release(rxq);
1401 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1402 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1404 dev->data->rx_queues[queue_idx] = rxq;
1405 igb_reset_rx_queue(rxq);
1411 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1413 #define IGB_RXQ_SCAN_INTERVAL 4
1414 volatile union e1000_adv_rx_desc *rxdp;
1415 struct igb_rx_queue *rxq;
1418 if (rx_queue_id >= dev->data->nb_rx_queues) {
1419 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1423 rxq = dev->data->rx_queues[rx_queue_id];
1424 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1426 while ((desc < rxq->nb_rx_desc) &&
1427 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1428 desc += IGB_RXQ_SCAN_INTERVAL;
1429 rxdp += IGB_RXQ_SCAN_INTERVAL;
1430 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1431 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1432 desc - rxq->nb_rx_desc]);
1439 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1441 volatile union e1000_adv_rx_desc *rxdp;
1442 struct igb_rx_queue *rxq = rx_queue;
1445 if (unlikely(offset >= rxq->nb_rx_desc))
1447 desc = rxq->rx_tail + offset;
1448 if (desc >= rxq->nb_rx_desc)
1449 desc -= rxq->nb_rx_desc;
1451 rxdp = &rxq->rx_ring[desc];
1452 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1456 igb_dev_clear_queues(struct rte_eth_dev *dev)
1459 struct igb_tx_queue *txq;
1460 struct igb_rx_queue *rxq;
1462 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1463 txq = dev->data->tx_queues[i];
1465 igb_tx_queue_release_mbufs(txq);
1466 igb_reset_tx_queue(txq, dev);
1470 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1471 rxq = dev->data->rx_queues[i];
1473 igb_rx_queue_release_mbufs(rxq);
1474 igb_reset_rx_queue(rxq);
1480 * Receive Side Scaling (RSS).
1481 * See section 7.1.1.7 in the following document:
1482 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1485 * The source and destination IP addresses of the IP header and the source and
1486 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1487 * against a configurable random key to compute a 32-bit RSS hash result.
1488 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1489 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1490 * RSS output index which is used as the RX queue index where to store the
1492 * The following output is supplied in the RX write-back descriptor:
1493 * - 32-bit result of the Microsoft RSS hash function,
1494 * - 4-bit RSS type field.
1498 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1499 * Used as the default key.
1501 static uint8_t rss_intel_key[40] = {
1502 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1503 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1504 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1505 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1506 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1510 igb_rss_disable(struct rte_eth_dev *dev)
1512 struct e1000_hw *hw;
1515 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1517 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1518 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1522 igb_rss_configure(struct rte_eth_dev *dev)
1524 struct e1000_hw *hw;
1532 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1534 rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1535 if (rss_hf == 0) /* Disable RSS. */ {
1536 igb_rss_disable(dev);
1539 hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1540 if (hash_key == NULL)
1541 hash_key = rss_intel_key; /* Default hash key. */
1543 /* Fill in RSS hash key. */
1544 for (i = 0; i < 10; i++) {
1545 rss_key = hash_key[(i * 4)];
1546 rss_key |= hash_key[(i * 4) + 1] << 8;
1547 rss_key |= hash_key[(i * 4) + 2] << 16;
1548 rss_key |= hash_key[(i * 4) + 3] << 24;
1549 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1552 /* Fill in redirection table. */
1553 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1554 for (i = 0; i < 128; i++) {
1561 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1562 i % dev->data->nb_rx_queues : 0);
1563 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1565 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1568 /* Set configured hashing functions in MRQC register. */
1569 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1570 if (rss_hf & ETH_RSS_IPV4)
1571 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1572 if (rss_hf & ETH_RSS_IPV4_TCP)
1573 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1574 if (rss_hf & ETH_RSS_IPV6)
1575 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1576 if (rss_hf & ETH_RSS_IPV6_EX)
1577 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1578 if (rss_hf & ETH_RSS_IPV6_TCP)
1579 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1580 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1581 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1582 if (rss_hf & ETH_RSS_IPV4_UDP)
1583 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1584 if (rss_hf & ETH_RSS_IPV6_UDP)
1585 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1586 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1587 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1588 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1592 * Check if the mac type support VMDq or not.
1593 * Return 1 if it supports, otherwise, return 0.
1596 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1598 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1600 switch (hw->mac.type) {
1621 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1627 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1629 struct rte_eth_vmdq_rx_conf *cfg;
1630 struct e1000_hw *hw;
1631 uint32_t mrqc, vt_ctl, vmolr, rctl;
1634 PMD_INIT_LOG(DEBUG, ">>");
1635 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1638 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1639 if (igb_is_vmdq_supported(dev) == 0)
1642 igb_rss_disable(dev);
1644 /* RCTL: eanble VLAN filter */
1645 rctl = E1000_READ_REG(hw, E1000_RCTL);
1646 rctl |= E1000_RCTL_VFE;
1647 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1649 /* MRQC: enable vmdq */
1650 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1651 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1652 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1654 /* VTCTL: pool selection according to VLAN tag */
1655 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1656 if (cfg->enable_default_pool)
1657 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1658 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1659 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1662 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1663 * Both 82576 and 82580 support it
1665 if (hw->mac.type != e1000_i350) {
1666 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1667 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1668 vmolr |= E1000_VMOLR_STRVLAN;
1669 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1673 /* VFTA - enable all vlan filters */
1674 for (i = 0; i < IGB_VFTA_SIZE; i++)
1675 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1677 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1678 if (hw->mac.type != e1000_82580)
1679 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1682 * RAH/RAL - allow pools to read specific mac addresses
1683 * In this case, all pools should be able to read from mac addr 0
1685 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1686 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1688 /* VLVF: set up filters for vlan tags as configured */
1689 for (i = 0; i < cfg->nb_pool_maps; i++) {
1690 /* set vlan id in VF register and set the valid bit */
1691 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1692 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1693 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1694 E1000_VLVF_POOLSEL_MASK)));
1697 E1000_WRITE_FLUSH(hw);
1703 /*********************************************************************
1705 * Enable receive unit.
1707 **********************************************************************/
1710 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1712 struct igb_rx_entry *rxe = rxq->sw_ring;
1716 /* Initialize software ring entries. */
1717 for (i = 0; i < rxq->nb_rx_desc; i++) {
1718 volatile union e1000_adv_rx_desc *rxd;
1719 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1722 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1723 "queue_id=%hu\n", rxq->queue_id);
1724 igb_rx_queue_release(rxq);
1728 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1729 rxd = &rxq->rx_ring[i];
1730 rxd->read.hdr_addr = dma_addr;
1731 rxd->read.pkt_addr = dma_addr;
1738 #define E1000_MRQC_DEF_Q_SHIFT (3)
1740 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1742 struct e1000_hw *hw =
1743 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1748 * SRIOV active scheme
1749 * FIXME if support RSS together with VMDq & SRIOV
1751 mrqc = E1000_MRQC_ENABLE_VMDQ;
1752 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1753 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1754 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1755 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1757 * SRIOV inactive scheme
1759 switch (dev->data->dev_conf.rxmode.mq_mode) {
1761 igb_rss_configure(dev);
1763 case ETH_MQ_RX_VMDQ_ONLY:
1764 /*Configure general VMDQ only RX parameters*/
1765 igb_vmdq_rx_hw_configure(dev);
1767 case ETH_MQ_RX_NONE:
1768 /* if mq_mode is none, disable rss mode.*/
1770 igb_rss_disable(dev);
1779 eth_igb_rx_init(struct rte_eth_dev *dev)
1781 struct e1000_hw *hw;
1782 struct igb_rx_queue *rxq;
1783 struct rte_pktmbuf_pool_private *mbp_priv;
1788 uint16_t rctl_bsize;
1792 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1796 * Make sure receives are disabled while setting
1797 * up the descriptor ring.
1799 rctl = E1000_READ_REG(hw, E1000_RCTL);
1800 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1803 * Configure support of jumbo frames, if any.
1805 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1806 rctl |= E1000_RCTL_LPE;
1809 * Set maximum packet length by default, and might be updated
1810 * together with enabling/disabling dual VLAN.
1812 E1000_WRITE_REG(hw, E1000_RLPML,
1813 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1816 rctl &= ~E1000_RCTL_LPE;
1818 /* Configure and enable each RX queue. */
1820 dev->rx_pkt_burst = eth_igb_recv_pkts;
1821 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1825 rxq = dev->data->rx_queues[i];
1827 /* Allocate buffers for descriptor rings and set up queue */
1828 ret = igb_alloc_rx_queue_mbufs(rxq);
1833 * Reset crc_len in case it was changed after queue setup by a
1837 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1840 bus_addr = rxq->rx_ring_phys_addr;
1841 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1843 sizeof(union e1000_adv_rx_desc));
1844 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1845 (uint32_t)(bus_addr >> 32));
1846 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1848 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1851 * Configure RX buffer size.
1853 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1854 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1855 RTE_PKTMBUF_HEADROOM);
1856 if (buf_size >= 1024) {
1858 * Configure the BSIZEPACKET field of the SRRCTL
1859 * register of the queue.
1860 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1861 * If this field is equal to 0b, then RCTL.BSIZE
1862 * determines the RX packet buffer size.
1864 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1865 E1000_SRRCTL_BSIZEPKT_MASK);
1866 buf_size = (uint16_t) ((srrctl &
1867 E1000_SRRCTL_BSIZEPKT_MASK) <<
1868 E1000_SRRCTL_BSIZEPKT_SHIFT);
1870 /* It adds dual VLAN length for supporting dual VLAN */
1871 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1872 2 * VLAN_TAG_SIZE) > buf_size){
1873 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1874 dev->data->scattered_rx = 1;
1878 * Use BSIZE field of the device RCTL register.
1880 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1881 rctl_bsize = buf_size;
1882 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1883 dev->data->scattered_rx = 1;
1886 /* Set if packets are dropped when no descriptors available */
1888 srrctl |= E1000_SRRCTL_DROP_EN;
1890 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1892 /* Enable this RX queue. */
1893 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1894 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1895 rxdctl &= 0xFFF00000;
1896 rxdctl |= (rxq->pthresh & 0x1F);
1897 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1898 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1899 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1903 * Setup BSIZE field of RCTL register, if needed.
1904 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1905 * register, since the code above configures the SRRCTL register of
1906 * the RX queue in such a case.
1907 * All configurable sizes are:
1908 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1909 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1910 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1911 * 2048: rctl |= E1000_RCTL_SZ_2048;
1912 * 1024: rctl |= E1000_RCTL_SZ_1024;
1913 * 512: rctl |= E1000_RCTL_SZ_512;
1914 * 256: rctl |= E1000_RCTL_SZ_256;
1916 if (rctl_bsize > 0) {
1917 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1918 rctl |= E1000_RCTL_SZ_512;
1919 else /* 256 <= buf_size < 512 - use 256 */
1920 rctl |= E1000_RCTL_SZ_256;
1924 * Configure RSS if device configured with multiple RX queues.
1926 igb_dev_mq_rx_configure(dev);
1928 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
1929 rctl |= E1000_READ_REG(hw, E1000_RCTL);
1932 * Setup the Checksum Register.
1933 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1935 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1936 rxcsum |= E1000_RXCSUM_PCSD;
1938 /* Enable both L3/L4 rx checksum offload */
1939 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1940 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1942 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1943 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1945 /* Setup the Receive Control Register. */
1946 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1947 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1949 /* set STRCRC bit in all queues */
1950 if (hw->mac.type == e1000_i350 ||
1951 hw->mac.type == e1000_i210 ||
1952 hw->mac.type == e1000_i211 ||
1953 hw->mac.type == e1000_i354) {
1954 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1955 rxq = dev->data->rx_queues[i];
1956 uint32_t dvmolr = E1000_READ_REG(hw,
1957 E1000_DVMOLR(rxq->reg_idx));
1958 dvmolr |= E1000_DVMOLR_STRCRC;
1959 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1963 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1965 /* clear STRCRC bit in all queues */
1966 if (hw->mac.type == e1000_i350 ||
1967 hw->mac.type == e1000_i210 ||
1968 hw->mac.type == e1000_i211 ||
1969 hw->mac.type == e1000_i354) {
1970 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1971 rxq = dev->data->rx_queues[i];
1972 uint32_t dvmolr = E1000_READ_REG(hw,
1973 E1000_DVMOLR(rxq->reg_idx));
1974 dvmolr &= ~E1000_DVMOLR_STRCRC;
1975 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1980 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1981 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1982 E1000_RCTL_RDMTS_HALF |
1983 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1985 /* Make sure VLAN Filters are off. */
1986 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
1987 rctl &= ~E1000_RCTL_VFE;
1988 /* Don't store bad packets. */
1989 rctl &= ~E1000_RCTL_SBP;
1991 /* Enable Receives. */
1992 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1995 * Setup the HW Rx Head and Tail Descriptor Pointers.
1996 * This needs to be done after enable.
1998 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1999 rxq = dev->data->rx_queues[i];
2000 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2001 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2007 /*********************************************************************
2009 * Enable transmit unit.
2011 **********************************************************************/
2013 eth_igb_tx_init(struct rte_eth_dev *dev)
2015 struct e1000_hw *hw;
2016 struct igb_tx_queue *txq;
2021 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023 /* Setup the Base and Length of the Tx Descriptor Rings. */
2024 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2026 txq = dev->data->tx_queues[i];
2027 bus_addr = txq->tx_ring_phys_addr;
2029 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2031 sizeof(union e1000_adv_tx_desc));
2032 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2033 (uint32_t)(bus_addr >> 32));
2034 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2036 /* Setup the HW Tx Head and Tail descriptor pointers. */
2037 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2038 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2040 /* Setup Transmit threshold registers. */
2041 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2042 txdctl |= txq->pthresh & 0x1F;
2043 txdctl |= ((txq->hthresh & 0x1F) << 8);
2044 txdctl |= ((txq->wthresh & 0x1F) << 16);
2045 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2046 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2049 /* Program the Transmit Control Register. */
2050 tctl = E1000_READ_REG(hw, E1000_TCTL);
2051 tctl &= ~E1000_TCTL_CT;
2052 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2053 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2055 e1000_config_collision_dist(hw);
2057 /* This write will effectively turn on the transmit unit. */
2058 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2061 /*********************************************************************
2063 * Enable VF receive unit.
2065 **********************************************************************/
2067 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2069 struct e1000_hw *hw;
2070 struct igb_rx_queue *rxq;
2071 struct rte_pktmbuf_pool_private *mbp_priv;
2074 uint16_t rctl_bsize;
2078 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2081 e1000_rlpml_set_vf(hw,
2082 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2085 /* Configure and enable each RX queue. */
2087 dev->rx_pkt_burst = eth_igb_recv_pkts;
2088 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2092 rxq = dev->data->rx_queues[i];
2094 /* Allocate buffers for descriptor rings and set up queue */
2095 ret = igb_alloc_rx_queue_mbufs(rxq);
2099 bus_addr = rxq->rx_ring_phys_addr;
2100 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2102 sizeof(union e1000_adv_rx_desc));
2103 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2104 (uint32_t)(bus_addr >> 32));
2105 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2107 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2110 * Configure RX buffer size.
2112 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2113 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2114 RTE_PKTMBUF_HEADROOM);
2115 if (buf_size >= 1024) {
2117 * Configure the BSIZEPACKET field of the SRRCTL
2118 * register of the queue.
2119 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2120 * If this field is equal to 0b, then RCTL.BSIZE
2121 * determines the RX packet buffer size.
2123 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2124 E1000_SRRCTL_BSIZEPKT_MASK);
2125 buf_size = (uint16_t) ((srrctl &
2126 E1000_SRRCTL_BSIZEPKT_MASK) <<
2127 E1000_SRRCTL_BSIZEPKT_SHIFT);
2129 /* It adds dual VLAN length for supporting dual VLAN */
2130 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2131 2 * VLAN_TAG_SIZE) > buf_size){
2132 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2133 dev->data->scattered_rx = 1;
2137 * Use BSIZE field of the device RCTL register.
2139 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2140 rctl_bsize = buf_size;
2141 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2142 dev->data->scattered_rx = 1;
2145 /* Set if packets are dropped when no descriptors available */
2147 srrctl |= E1000_SRRCTL_DROP_EN;
2149 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2151 /* Enable this RX queue. */
2152 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2153 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2154 rxdctl &= 0xFFF00000;
2155 rxdctl |= (rxq->pthresh & 0x1F);
2156 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2157 if (hw->mac.type == e1000_82576) {
2159 * Workaround of 82576 VF Erratum
2160 * force set WTHRESH to 1
2161 * to avoid Write-Back not triggered sometimes
2164 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2167 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2168 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2172 * Setup the HW Rx Head and Tail Descriptor Pointers.
2173 * This needs to be done after enable.
2175 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2176 rxq = dev->data->rx_queues[i];
2177 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2178 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2184 /*********************************************************************
2186 * Enable VF transmit unit.
2188 **********************************************************************/
2190 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2192 struct e1000_hw *hw;
2193 struct igb_tx_queue *txq;
2197 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2199 /* Setup the Base and Length of the Tx Descriptor Rings. */
2200 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2203 txq = dev->data->tx_queues[i];
2204 bus_addr = txq->tx_ring_phys_addr;
2205 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2207 sizeof(union e1000_adv_tx_desc));
2208 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2209 (uint32_t)(bus_addr >> 32));
2210 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2212 /* Setup the HW Tx Head and Tail descriptor pointers. */
2213 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2214 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2216 /* Setup Transmit threshold registers. */
2217 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2218 txdctl |= txq->pthresh & 0x1F;
2219 txdctl |= ((txq->hthresh & 0x1F) << 8);
2220 if (hw->mac.type == e1000_82576) {
2222 * Workaround of 82576 VF Erratum
2223 * force set WTHRESH to 1
2224 * to avoid Write-Back not triggered sometimes
2227 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2230 txdctl |= ((txq->wthresh & 0x1F) << 16);
2231 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2232 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);