4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
45 #include <rte_interrupts.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
49 #include <rte_debug.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
73 #include "e1000_logs.h"
74 #include "e1000/e1000_api.h"
75 #include "e1000_ethdev.h"
77 static inline struct rte_mbuf *
78 rte_rxmbuf_alloc(struct rte_mempool *mp)
82 m = __rte_mbuf_raw_alloc(mp);
83 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
87 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
88 (uint64_t) ((mb)->buf_physaddr + \
89 (uint64_t) ((char *)((mb)->pkt.data) - \
90 (char *)(mb)->buf_addr))
92 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
93 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
96 * Structure associated with each descriptor of the RX ring of a RX queue.
99 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
103 * Structure associated with each descriptor of the TX ring of a TX queue.
105 struct igb_tx_entry {
106 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
107 uint16_t next_id; /**< Index of next descriptor in ring. */
108 uint16_t last_id; /**< Index of last scattered descriptor. */
112 * Structure associated with each RX queue.
114 struct igb_rx_queue {
115 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
116 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
117 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
118 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
119 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
120 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
121 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
122 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
123 uint16_t nb_rx_desc; /**< number of RX descriptors. */
124 uint16_t rx_tail; /**< current value of RDT register. */
125 uint16_t nb_rx_hold; /**< number of held free RX desc. */
126 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
127 uint16_t queue_id; /**< RX queue index. */
128 uint16_t reg_idx; /**< RX queue register index. */
129 uint8_t port_id; /**< Device port identifier. */
130 uint8_t pthresh; /**< Prefetch threshold register. */
131 uint8_t hthresh; /**< Host threshold register. */
132 uint8_t wthresh; /**< Write-back threshold register. */
133 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
134 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
138 * Hardware context number
140 enum igb_advctx_num {
141 IGB_CTX_0 = 0, /**< CTX0 */
142 IGB_CTX_1 = 1, /**< CTX1 */
143 IGB_CTX_NUM = 2, /**< CTX_NUM */
147 * Strucutre to check if new context need be built
149 struct igb_advctx_info {
150 uint16_t flags; /**< ol_flags related to context build. */
151 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
152 union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
156 * Structure associated with each TX queue.
158 struct igb_tx_queue {
159 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
160 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
161 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
162 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
163 uint32_t txd_type; /**< Device-specific TXD type */
164 uint16_t nb_tx_desc; /**< number of TX descriptors. */
165 uint16_t tx_tail; /**< Current value of TDT register. */
167 /**< Index of first used TX descriptor. */
168 uint16_t queue_id; /**< TX queue index. */
169 uint16_t reg_idx; /**< TX queue register index. */
170 uint8_t port_id; /**< Device port identifier. */
171 uint8_t pthresh; /**< Prefetch threshold register. */
172 uint8_t hthresh; /**< Host threshold register. */
173 uint8_t wthresh; /**< Write-back threshold register. */
175 /**< Current used hardware descriptor. */
177 /**< Start context position for transmit queue. */
178 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
179 /**< Hardware context history.*/
183 #define RTE_PMD_USE_PREFETCH
186 #ifdef RTE_PMD_USE_PREFETCH
187 #define rte_igb_prefetch(p) rte_prefetch0(p)
189 #define rte_igb_prefetch(p) do {} while(0)
192 #ifdef RTE_PMD_PACKET_PREFETCH
193 #define rte_packet_prefetch(p) rte_prefetch1(p)
195 #define rte_packet_prefetch(p) do {} while(0)
199 * Macro for VMDq feature for 1 GbE NIC.
201 #define E1000_VMOLR_SIZE (8)
203 /*********************************************************************
207 **********************************************************************/
210 * Advanced context descriptor are almost same between igb/ixgbe
211 * This is a separate function, looking for optimization opportunity here
212 * Rework required to go with the pre-defined values.
216 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
217 volatile struct e1000_adv_tx_context_desc *ctx_txd,
218 uint16_t ol_flags, uint32_t vlan_macip_lens)
220 uint32_t type_tucmd_mlhl;
221 uint32_t mss_l4len_idx;
222 uint32_t ctx_idx, ctx_curr;
225 ctx_curr = txq->ctx_curr;
226 ctx_idx = ctx_curr + txq->ctx_start;
231 if (ol_flags & PKT_TX_VLAN_PKT) {
232 cmp_mask |= TX_VLAN_CMP_MASK;
235 if (ol_flags & PKT_TX_IP_CKSUM) {
236 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
237 cmp_mask |= TX_MAC_LEN_CMP_MASK;
240 /* Specify which HW CTX to upload. */
241 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
242 switch (ol_flags & PKT_TX_L4_MASK) {
243 case PKT_TX_UDP_CKSUM:
244 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
245 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
246 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
247 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
249 case PKT_TX_TCP_CKSUM:
250 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
251 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
252 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
253 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
255 case PKT_TX_SCTP_CKSUM:
256 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
257 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
258 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
259 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
262 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
263 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
267 txq->ctx_cache[ctx_curr].flags = ol_flags;
268 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
269 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
270 vlan_macip_lens & cmp_mask;
272 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
273 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
274 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
275 ctx_txd->seqnum_seed = 0;
279 * Check which hardware context can be used. Use the existing match
280 * or create a new context descriptor.
282 static inline uint32_t
283 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
284 uint32_t vlan_macip_lens)
286 /* If match with the current context */
287 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
288 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
289 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
290 return txq->ctx_curr;
293 /* If match with the second context */
295 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
296 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
297 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
298 return txq->ctx_curr;
301 /* Mismatch, use the previous context */
302 return (IGB_CTX_NUM);
305 static inline uint32_t
306 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
308 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
309 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
312 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
313 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
317 static inline uint32_t
318 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
320 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
321 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
325 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
328 struct igb_tx_queue *txq;
329 struct igb_tx_entry *sw_ring;
330 struct igb_tx_entry *txe, *txn;
331 volatile union e1000_adv_tx_desc *txr;
332 volatile union e1000_adv_tx_desc *txd;
333 struct rte_mbuf *tx_pkt;
334 struct rte_mbuf *m_seg;
335 uint64_t buf_dma_addr;
336 uint32_t olinfo_status;
337 uint32_t cmd_type_len;
346 uint32_t new_ctx = 0;
348 uint32_t vlan_macip_lens;
351 sw_ring = txq->sw_ring;
353 tx_id = txq->tx_tail;
354 txe = &sw_ring[tx_id];
356 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
358 pkt_len = tx_pkt->pkt.pkt_len;
360 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
363 * The number of descriptors that must be allocated for a
364 * packet is the number of segments of that packet, plus 1
365 * Context Descriptor for the VLAN Tag Identifier, if any.
366 * Determine the last TX descriptor to allocate in the TX ring
367 * for the packet, starting from the current position (tx_id)
370 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
372 ol_flags = tx_pkt->ol_flags;
373 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
374 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
376 /* If a Context Descriptor need be built . */
378 ctx = what_advctx_update(txq, tx_ol_req,
380 /* Only allocate context descriptor if required*/
381 new_ctx = (ctx == IGB_CTX_NUM);
383 tx_last = (uint16_t) (tx_last + new_ctx);
385 if (tx_last >= txq->nb_tx_desc)
386 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
388 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
389 " tx_first=%u tx_last=%u\n",
390 (unsigned) txq->port_id,
391 (unsigned) txq->queue_id,
397 * Check if there are enough free descriptors in the TX ring
398 * to transmit the next packet.
399 * This operation is based on the two following rules:
401 * 1- Only check that the last needed TX descriptor can be
402 * allocated (by construction, if that descriptor is free,
403 * all intermediate ones are also free).
405 * For this purpose, the index of the last TX descriptor
406 * used for a packet (the "last descriptor" of a packet)
407 * is recorded in the TX entries (the last one included)
408 * that are associated with all TX descriptors allocated
411 * 2- Avoid to allocate the last free TX descriptor of the
412 * ring, in order to never set the TDT register with the
413 * same value stored in parallel by the NIC in the TDH
414 * register, which makes the TX engine of the NIC enter
415 * in a deadlock situation.
417 * By extension, avoid to allocate a free descriptor that
418 * belongs to the last set of free descriptors allocated
419 * to the same packet previously transmitted.
423 * The "last descriptor" of the previously sent packet, if any,
424 * which used the last descriptor to allocate.
426 tx_end = sw_ring[tx_last].last_id;
429 * The next descriptor following that "last descriptor" in the
432 tx_end = sw_ring[tx_end].next_id;
435 * The "last descriptor" associated with that next descriptor.
437 tx_end = sw_ring[tx_end].last_id;
440 * Check that this descriptor is free.
442 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
449 * Set common flags of all TX Data Descriptors.
451 * The following bits must be set in all Data Descriptors:
452 * - E1000_ADVTXD_DTYP_DATA
453 * - E1000_ADVTXD_DCMD_DEXT
455 * The following bits must be set in the first Data Descriptor
456 * and are ignored in the other ones:
457 * - E1000_ADVTXD_DCMD_IFCS
458 * - E1000_ADVTXD_MAC_1588
459 * - E1000_ADVTXD_DCMD_VLE
461 * The following bits must only be set in the last Data
463 * - E1000_TXD_CMD_EOP
465 * The following bits can be set in any Data Descriptor, but
466 * are only set in the last Data Descriptor:
469 cmd_type_len = txq->txd_type |
470 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
471 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
472 #if defined(RTE_LIBRTE_IEEE1588)
473 if (ol_flags & PKT_TX_IEEE1588_TMST)
474 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
477 /* Setup TX Advanced context descriptor if required */
479 volatile struct e1000_adv_tx_context_desc *
482 ctx_txd = (volatile struct
483 e1000_adv_tx_context_desc *)
486 txn = &sw_ring[txe->next_id];
487 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
489 if (txe->mbuf != NULL) {
490 rte_pktmbuf_free_seg(txe->mbuf);
494 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
497 txe->last_id = tx_last;
498 tx_id = txe->next_id;
502 /* Setup the TX Advanced Data Descriptor */
503 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
504 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
505 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
510 txn = &sw_ring[txe->next_id];
513 if (txe->mbuf != NULL)
514 rte_pktmbuf_free_seg(txe->mbuf);
518 * Set up transmit descriptor.
520 slen = (uint16_t) m_seg->pkt.data_len;
521 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
522 txd->read.buffer_addr =
523 rte_cpu_to_le_64(buf_dma_addr);
524 txd->read.cmd_type_len =
525 rte_cpu_to_le_32(cmd_type_len | slen);
526 txd->read.olinfo_status =
527 rte_cpu_to_le_32(olinfo_status);
528 txe->last_id = tx_last;
529 tx_id = txe->next_id;
531 m_seg = m_seg->pkt.next;
532 } while (m_seg != NULL);
535 * The last packet data descriptor needs End Of Packet (EOP)
536 * and Report Status (RS).
538 txd->read.cmd_type_len |=
539 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
545 * Set the Transmit Descriptor Tail (TDT).
547 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
548 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
549 (unsigned) txq->port_id, (unsigned) txq->queue_id,
550 (unsigned) tx_id, (unsigned) nb_tx);
551 txq->tx_tail = tx_id;
556 /*********************************************************************
560 **********************************************************************/
561 static inline uint16_t
562 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
566 static uint16_t ip_pkt_types_map[16] = {
567 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
568 PKT_RX_IPV6_HDR, 0, 0, 0,
569 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
570 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
573 #if defined(RTE_LIBRTE_IEEE1588)
574 static uint32_t ip_pkt_etqf_map[8] = {
575 0, 0, 0, PKT_RX_IEEE1588_PTP,
579 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
580 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
581 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
583 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
584 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
586 return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
587 0 : PKT_RX_RSS_HASH));
590 static inline uint16_t
591 rx_desc_status_to_pkt_flags(uint32_t rx_status)
595 /* Check if VLAN present */
596 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
597 PKT_RX_VLAN_PKT : 0);
599 #if defined(RTE_LIBRTE_IEEE1588)
600 if (rx_status & E1000_RXD_STAT_TMST)
601 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
606 static inline uint16_t
607 rx_desc_error_to_pkt_flags(uint32_t rx_status)
610 * Bit 30: IPE, IPv4 checksum error
611 * Bit 29: L4I, L4I integrity error
614 static uint16_t error_to_pkt_flags_map[4] = {
615 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
616 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
618 return error_to_pkt_flags_map[(rx_status >>
619 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
623 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
626 struct igb_rx_queue *rxq;
627 volatile union e1000_adv_rx_desc *rx_ring;
628 volatile union e1000_adv_rx_desc *rxdp;
629 struct igb_rx_entry *sw_ring;
630 struct igb_rx_entry *rxe;
631 struct rte_mbuf *rxm;
632 struct rte_mbuf *nmb;
633 union e1000_adv_rx_desc rxd;
636 uint32_t hlen_type_rss;
646 rx_id = rxq->rx_tail;
647 rx_ring = rxq->rx_ring;
648 sw_ring = rxq->sw_ring;
649 while (nb_rx < nb_pkts) {
651 * The order of operations here is important as the DD status
652 * bit must not be read after any other descriptor fields.
653 * rx_ring and rxdp are pointing to volatile data so the order
654 * of accesses cannot be reordered by the compiler. If they were
655 * not volatile, they could be reordered which could lead to
656 * using invalid descriptor fields when read from rxd.
658 rxdp = &rx_ring[rx_id];
659 staterr = rxdp->wb.upper.status_error;
660 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
667 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
668 * likely to be invalid and to be dropped by the various
669 * validation checks performed by the network stack.
671 * Allocate a new mbuf to replenish the RX ring descriptor.
672 * If the allocation fails:
673 * - arrange for that RX descriptor to be the first one
674 * being parsed the next time the receive function is
675 * invoked [on the same queue].
677 * - Stop parsing the RX ring and return immediately.
679 * This policy do not drop the packet received in the RX
680 * descriptor for which the allocation of a new mbuf failed.
681 * Thus, it allows that packet to be later retrieved if
682 * mbuf have been freed in the mean time.
683 * As a side effect, holding RX descriptors instead of
684 * systematically giving them back to the NIC may lead to
685 * RX ring exhaustion situations.
686 * However, the NIC can gracefully prevent such situations
687 * to happen by sending specific "back-pressure" flow control
688 * frames to its peer(s).
690 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
691 "staterr=0x%x pkt_len=%u\n",
692 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
693 (unsigned) rx_id, (unsigned) staterr,
694 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
696 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
698 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
699 "queue_id=%u\n", (unsigned) rxq->port_id,
700 (unsigned) rxq->queue_id);
701 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
706 rxe = &sw_ring[rx_id];
708 if (rx_id == rxq->nb_rx_desc)
711 /* Prefetch next mbuf while processing current one. */
712 rte_igb_prefetch(sw_ring[rx_id].mbuf);
715 * When next RX descriptor is on a cache-line boundary,
716 * prefetch the next 4 RX descriptors and the next 8 pointers
719 if ((rx_id & 0x3) == 0) {
720 rte_igb_prefetch(&rx_ring[rx_id]);
721 rte_igb_prefetch(&sw_ring[rx_id]);
727 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
728 rxdp->read.hdr_addr = dma_addr;
729 rxdp->read.pkt_addr = dma_addr;
732 * Initialize the returned mbuf.
733 * 1) setup generic mbuf fields:
734 * - number of segments,
737 * - RX port identifier.
738 * 2) integrate hardware offload data, if any:
740 * - IP checksum flag,
741 * - VLAN TCI, if any,
744 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
746 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
747 rte_packet_prefetch(rxm->pkt.data);
748 rxm->pkt.nb_segs = 1;
749 rxm->pkt.next = NULL;
750 rxm->pkt.pkt_len = pkt_len;
751 rxm->pkt.data_len = pkt_len;
752 rxm->pkt.in_port = rxq->port_id;
754 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
755 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
756 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
757 rxm->pkt.vlan_macip.f.vlan_tci =
758 rte_le_to_cpu_16(rxd.wb.upper.vlan);
760 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
761 pkt_flags = (uint16_t)(pkt_flags |
762 rx_desc_status_to_pkt_flags(staterr));
763 pkt_flags = (uint16_t)(pkt_flags |
764 rx_desc_error_to_pkt_flags(staterr));
765 rxm->ol_flags = pkt_flags;
768 * Store the mbuf address into the next entry of the array
769 * of returned packets.
771 rx_pkts[nb_rx++] = rxm;
773 rxq->rx_tail = rx_id;
776 * If the number of free RX descriptors is greater than the RX free
777 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
779 * Update the RDT with the value of the last processed RX descriptor
780 * minus 1, to guarantee that the RDT register is never equal to the
781 * RDH register, which creates a "full" ring situtation from the
782 * hardware point of view...
784 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
785 if (nb_hold > rxq->rx_free_thresh) {
786 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
787 "nb_hold=%u nb_rx=%u\n",
788 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
789 (unsigned) rx_id, (unsigned) nb_hold,
791 rx_id = (uint16_t) ((rx_id == 0) ?
792 (rxq->nb_rx_desc - 1) : (rx_id - 1));
793 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
796 rxq->nb_rx_hold = nb_hold;
801 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
804 struct igb_rx_queue *rxq;
805 volatile union e1000_adv_rx_desc *rx_ring;
806 volatile union e1000_adv_rx_desc *rxdp;
807 struct igb_rx_entry *sw_ring;
808 struct igb_rx_entry *rxe;
809 struct rte_mbuf *first_seg;
810 struct rte_mbuf *last_seg;
811 struct rte_mbuf *rxm;
812 struct rte_mbuf *nmb;
813 union e1000_adv_rx_desc rxd;
814 uint64_t dma; /* Physical address of mbuf data buffer */
816 uint32_t hlen_type_rss;
826 rx_id = rxq->rx_tail;
827 rx_ring = rxq->rx_ring;
828 sw_ring = rxq->sw_ring;
831 * Retrieve RX context of current packet, if any.
833 first_seg = rxq->pkt_first_seg;
834 last_seg = rxq->pkt_last_seg;
836 while (nb_rx < nb_pkts) {
839 * The order of operations here is important as the DD status
840 * bit must not be read after any other descriptor fields.
841 * rx_ring and rxdp are pointing to volatile data so the order
842 * of accesses cannot be reordered by the compiler. If they were
843 * not volatile, they could be reordered which could lead to
844 * using invalid descriptor fields when read from rxd.
846 rxdp = &rx_ring[rx_id];
847 staterr = rxdp->wb.upper.status_error;
848 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
855 * Allocate a new mbuf to replenish the RX ring descriptor.
856 * If the allocation fails:
857 * - arrange for that RX descriptor to be the first one
858 * being parsed the next time the receive function is
859 * invoked [on the same queue].
861 * - Stop parsing the RX ring and return immediately.
863 * This policy does not drop the packet received in the RX
864 * descriptor for which the allocation of a new mbuf failed.
865 * Thus, it allows that packet to be later retrieved if
866 * mbuf have been freed in the mean time.
867 * As a side effect, holding RX descriptors instead of
868 * systematically giving them back to the NIC may lead to
869 * RX ring exhaustion situations.
870 * However, the NIC can gracefully prevent such situations
871 * to happen by sending specific "back-pressure" flow control
872 * frames to its peer(s).
874 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
875 "staterr=0x%x data_len=%u\n",
876 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
877 (unsigned) rx_id, (unsigned) staterr,
878 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
880 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
882 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
883 "queue_id=%u\n", (unsigned) rxq->port_id,
884 (unsigned) rxq->queue_id);
885 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
890 rxe = &sw_ring[rx_id];
892 if (rx_id == rxq->nb_rx_desc)
895 /* Prefetch next mbuf while processing current one. */
896 rte_igb_prefetch(sw_ring[rx_id].mbuf);
899 * When next RX descriptor is on a cache-line boundary,
900 * prefetch the next 4 RX descriptors and the next 8 pointers
903 if ((rx_id & 0x3) == 0) {
904 rte_igb_prefetch(&rx_ring[rx_id]);
905 rte_igb_prefetch(&sw_ring[rx_id]);
909 * Update RX descriptor with the physical address of the new
910 * data buffer of the new allocated mbuf.
914 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
915 rxdp->read.pkt_addr = dma;
916 rxdp->read.hdr_addr = dma;
919 * Set data length & data buffer address of mbuf.
921 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
922 rxm->pkt.data_len = data_len;
923 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
926 * If this is the first buffer of the received packet,
927 * set the pointer to the first mbuf of the packet and
928 * initialize its context.
929 * Otherwise, update the total length and the number of segments
930 * of the current scattered packet, and update the pointer to
931 * the last mbuf of the current packet.
933 if (first_seg == NULL) {
935 first_seg->pkt.pkt_len = data_len;
936 first_seg->pkt.nb_segs = 1;
938 first_seg->pkt.pkt_len += data_len;
939 first_seg->pkt.nb_segs++;
940 last_seg->pkt.next = rxm;
944 * If this is not the last buffer of the received packet,
945 * update the pointer to the last mbuf of the current scattered
946 * packet and continue to parse the RX ring.
948 if (! (staterr & E1000_RXD_STAT_EOP)) {
954 * This is the last buffer of the received packet.
955 * If the CRC is not stripped by the hardware:
956 * - Subtract the CRC length from the total packet length.
957 * - If the last buffer only contains the whole CRC or a part
958 * of it, free the mbuf associated to the last buffer.
959 * If part of the CRC is also contained in the previous
960 * mbuf, subtract the length of that CRC part from the
961 * data length of the previous mbuf.
963 rxm->pkt.next = NULL;
964 if (unlikely(rxq->crc_len > 0)) {
965 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
966 if (data_len <= ETHER_CRC_LEN) {
967 rte_pktmbuf_free_seg(rxm);
968 first_seg->pkt.nb_segs--;
969 last_seg->pkt.data_len = (uint16_t)
970 (last_seg->pkt.data_len -
971 (ETHER_CRC_LEN - data_len));
972 last_seg->pkt.next = NULL;
975 (uint16_t) (data_len - ETHER_CRC_LEN);
979 * Initialize the first mbuf of the returned packet:
980 * - RX port identifier,
981 * - hardware offload data, if any:
983 * - IP checksum flag,
984 * - VLAN TCI, if any,
987 first_seg->pkt.in_port = rxq->port_id;
988 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
991 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
992 * set in the pkt_flags field.
994 first_seg->pkt.vlan_macip.f.vlan_tci =
995 rte_le_to_cpu_16(rxd.wb.upper.vlan);
996 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
997 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
998 pkt_flags = (uint16_t)(pkt_flags |
999 rx_desc_status_to_pkt_flags(staterr));
1000 pkt_flags = (uint16_t)(pkt_flags |
1001 rx_desc_error_to_pkt_flags(staterr));
1002 first_seg->ol_flags = pkt_flags;
1004 /* Prefetch data of first segment, if configured to do so. */
1005 rte_packet_prefetch(first_seg->pkt.data);
1008 * Store the mbuf address into the next entry of the array
1009 * of returned packets.
1011 rx_pkts[nb_rx++] = first_seg;
1014 * Setup receipt context for a new packet.
1020 * Record index of the next RX descriptor to probe.
1022 rxq->rx_tail = rx_id;
1025 * Save receive context.
1027 rxq->pkt_first_seg = first_seg;
1028 rxq->pkt_last_seg = last_seg;
1031 * If the number of free RX descriptors is greater than the RX free
1032 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1034 * Update the RDT with the value of the last processed RX descriptor
1035 * minus 1, to guarantee that the RDT register is never equal to the
1036 * RDH register, which creates a "full" ring situtation from the
1037 * hardware point of view...
1039 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1040 if (nb_hold > rxq->rx_free_thresh) {
1041 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1042 "nb_hold=%u nb_rx=%u\n",
1043 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1044 (unsigned) rx_id, (unsigned) nb_hold,
1046 rx_id = (uint16_t) ((rx_id == 0) ?
1047 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1048 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1051 rxq->nb_rx_hold = nb_hold;
1056 * Rings setup and release.
1058 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1059 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1060 * This will also optimize cache line size effect.
1061 * H/W supports up to cache line size 128.
1063 #define IGB_ALIGN 128
1066 * Maximum number of Ring Descriptors.
1068 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1069 * desscriptors should meet the following condition:
1070 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1072 #define IGB_MIN_RING_DESC 32
1073 #define IGB_MAX_RING_DESC 4096
1075 static const struct rte_memzone *
1076 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1077 uint16_t queue_id, uint32_t ring_size, int socket_id)
1079 char z_name[RTE_MEMZONE_NAMESIZE];
1080 const struct rte_memzone *mz;
1082 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1083 dev->driver->pci_drv.name, ring_name,
1084 dev->data->port_id, queue_id);
1085 mz = rte_memzone_lookup(z_name);
1089 #ifdef RTE_LIBRTE_XEN_DOM0
1090 return rte_memzone_reserve_bounded(z_name, ring_size,
1091 socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
1093 return rte_memzone_reserve_aligned(z_name, ring_size,
1094 socket_id, 0, IGB_ALIGN);
1099 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1103 if (txq->sw_ring != NULL) {
1104 for (i = 0; i < txq->nb_tx_desc; i++) {
1105 if (txq->sw_ring[i].mbuf != NULL) {
1106 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1107 txq->sw_ring[i].mbuf = NULL;
1114 igb_tx_queue_release(struct igb_tx_queue *txq)
1117 igb_tx_queue_release_mbufs(txq);
1118 rte_free(txq->sw_ring);
1124 eth_igb_tx_queue_release(void *txq)
1126 igb_tx_queue_release(txq);
1130 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1135 memset((void*)&txq->ctx_cache, 0,
1136 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1140 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1142 static const union e1000_adv_tx_desc zeroed_desc = { .read = {
1144 struct igb_tx_entry *txe = txq->sw_ring;
1146 struct e1000_hw *hw;
1148 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149 /* Zero out HW ring memory */
1150 for (i = 0; i < txq->nb_tx_desc; i++) {
1151 txq->tx_ring[i] = zeroed_desc;
1154 /* Initialize ring entries */
1155 prev = (uint16_t)(txq->nb_tx_desc - 1);
1156 for (i = 0; i < txq->nb_tx_desc; i++) {
1157 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1159 txd->wb.status = E1000_TXD_STAT_DD;
1162 txe[prev].next_id = i;
1166 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1167 /* 82575 specific, each tx queue will use 2 hw contexts */
1168 if (hw->mac.type == e1000_82575)
1169 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1171 igb_reset_tx_queue_stat(txq);
1175 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1178 unsigned int socket_id,
1179 const struct rte_eth_txconf *tx_conf)
1181 const struct rte_memzone *tz;
1182 struct igb_tx_queue *txq;
1183 struct e1000_hw *hw;
1186 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 * Validate number of transmit descriptors.
1190 * It must not exceed hardware maximum, and must be multiple
1193 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1194 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1199 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1202 if (tx_conf->tx_free_thresh != 0)
1203 RTE_LOG(WARNING, PMD,
1204 "The tx_free_thresh parameter is not "
1205 "used for the 1G driver.\n");
1206 if (tx_conf->tx_rs_thresh != 0)
1207 RTE_LOG(WARNING, PMD,
1208 "The tx_rs_thresh parameter is not "
1209 "used for the 1G driver.\n");
1210 if (tx_conf->tx_thresh.wthresh == 0)
1211 RTE_LOG(WARNING, PMD,
1212 "To improve 1G driver performance, consider setting "
1213 "the TX WTHRESH value to 4, 8, or 16.\n");
1215 /* Free memory prior to re-allocation if needed */
1216 if (dev->data->tx_queues[queue_idx] != NULL)
1217 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1219 /* First allocate the tx queue data structure */
1220 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1226 * Allocate TX ring hardware descriptors. A memzone large enough to
1227 * handle the maximum ring size is allocated in order to allow for
1228 * resizing in later calls to the queue setup function.
1230 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1231 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1234 igb_tx_queue_release(txq);
1238 txq->nb_tx_desc = nb_desc;
1239 txq->pthresh = tx_conf->tx_thresh.pthresh;
1240 txq->hthresh = tx_conf->tx_thresh.hthresh;
1241 txq->wthresh = tx_conf->tx_thresh.wthresh;
1242 txq->queue_id = queue_idx;
1243 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1244 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1245 txq->port_id = dev->data->port_id;
1247 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1248 #ifndef RTE_LIBRTE_XEN_DOM0
1249 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1251 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1253 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1254 /* Allocate software ring */
1255 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1256 sizeof(struct igb_tx_entry) * nb_desc,
1258 if (txq->sw_ring == NULL) {
1259 igb_tx_queue_release(txq);
1262 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1263 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1265 igb_reset_tx_queue(txq, dev);
1266 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1267 dev->data->tx_queues[queue_idx] = txq;
1273 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1277 if (rxq->sw_ring != NULL) {
1278 for (i = 0; i < rxq->nb_rx_desc; i++) {
1279 if (rxq->sw_ring[i].mbuf != NULL) {
1280 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1281 rxq->sw_ring[i].mbuf = NULL;
1288 igb_rx_queue_release(struct igb_rx_queue *rxq)
1291 igb_rx_queue_release_mbufs(rxq);
1292 rte_free(rxq->sw_ring);
1298 eth_igb_rx_queue_release(void *rxq)
1300 igb_rx_queue_release(rxq);
1304 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1306 static const union e1000_adv_rx_desc zeroed_desc = { .read = {
1310 /* Zero out HW ring memory */
1311 for (i = 0; i < rxq->nb_rx_desc; i++) {
1312 rxq->rx_ring[i] = zeroed_desc;
1316 rxq->pkt_first_seg = NULL;
1317 rxq->pkt_last_seg = NULL;
1321 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1324 unsigned int socket_id,
1325 const struct rte_eth_rxconf *rx_conf,
1326 struct rte_mempool *mp)
1328 const struct rte_memzone *rz;
1329 struct igb_rx_queue *rxq;
1330 struct e1000_hw *hw;
1333 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1336 * Validate number of receive descriptors.
1337 * It must not exceed hardware maximum, and must be multiple
1340 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1341 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1345 /* Free memory prior to re-allocation if needed */
1346 if (dev->data->rx_queues[queue_idx] != NULL) {
1347 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1348 dev->data->rx_queues[queue_idx] = NULL;
1351 /* First allocate the RX queue data structure. */
1352 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1357 rxq->nb_rx_desc = nb_desc;
1358 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1359 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1360 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1361 rxq->drop_en = rx_conf->rx_drop_en;
1362 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1363 rxq->queue_id = queue_idx;
1364 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1365 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1366 rxq->port_id = dev->data->port_id;
1367 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1371 * Allocate RX ring hardware descriptors. A memzone large enough to
1372 * handle the maximum ring size is allocated in order to allow for
1373 * resizing in later calls to the queue setup function.
1375 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1376 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1378 igb_rx_queue_release(rxq);
1381 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1382 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1383 #ifndef RTE_LIBRTE_XEN_DOM0
1384 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1386 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1388 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1390 /* Allocate software ring. */
1391 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1392 sizeof(struct igb_rx_entry) * nb_desc,
1394 if (rxq->sw_ring == NULL) {
1395 igb_rx_queue_release(rxq);
1398 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1399 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1401 dev->data->rx_queues[queue_idx] = rxq;
1402 igb_reset_rx_queue(rxq);
1408 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1410 #define IGB_RXQ_SCAN_INTERVAL 4
1411 volatile union e1000_adv_rx_desc *rxdp;
1412 struct igb_rx_queue *rxq;
1415 if (rx_queue_id >= dev->data->nb_rx_queues) {
1416 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
1420 rxq = dev->data->rx_queues[rx_queue_id];
1421 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1423 while ((desc < rxq->nb_rx_desc) &&
1424 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1425 desc += IGB_RXQ_SCAN_INTERVAL;
1426 rxdp += IGB_RXQ_SCAN_INTERVAL;
1427 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1428 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1429 desc - rxq->nb_rx_desc]);
1436 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1438 volatile union e1000_adv_rx_desc *rxdp;
1439 struct igb_rx_queue *rxq = rx_queue;
1442 if (unlikely(offset >= rxq->nb_rx_desc))
1444 desc = rxq->rx_tail + offset;
1445 if (desc >= rxq->nb_rx_desc)
1446 desc -= rxq->nb_rx_desc;
1448 rxdp = &rxq->rx_ring[desc];
1449 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1453 igb_dev_clear_queues(struct rte_eth_dev *dev)
1456 struct igb_tx_queue *txq;
1457 struct igb_rx_queue *rxq;
1459 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1460 txq = dev->data->tx_queues[i];
1462 igb_tx_queue_release_mbufs(txq);
1463 igb_reset_tx_queue(txq, dev);
1467 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1468 rxq = dev->data->rx_queues[i];
1470 igb_rx_queue_release_mbufs(rxq);
1471 igb_reset_rx_queue(rxq);
1477 * Receive Side Scaling (RSS).
1478 * See section 7.1.1.7 in the following document:
1479 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1482 * The source and destination IP addresses of the IP header and the source and
1483 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1484 * against a configurable random key to compute a 32-bit RSS hash result.
1485 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1486 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1487 * RSS output index which is used as the RX queue index where to store the
1489 * The following output is supplied in the RX write-back descriptor:
1490 * - 32-bit result of the Microsoft RSS hash function,
1491 * - 4-bit RSS type field.
1495 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1496 * Used as the default key.
1498 static uint8_t rss_intel_key[40] = {
1499 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1500 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1501 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1502 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1503 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1507 igb_rss_disable(struct rte_eth_dev *dev)
1509 struct e1000_hw *hw;
1512 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1514 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1515 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1519 igb_rss_configure(struct rte_eth_dev *dev)
1521 struct e1000_hw *hw;
1529 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1531 rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1532 if (rss_hf == 0) /* Disable RSS. */ {
1533 igb_rss_disable(dev);
1536 hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1537 if (hash_key == NULL)
1538 hash_key = rss_intel_key; /* Default hash key. */
1540 /* Fill in RSS hash key. */
1541 for (i = 0; i < 10; i++) {
1542 rss_key = hash_key[(i * 4)];
1543 rss_key |= hash_key[(i * 4) + 1] << 8;
1544 rss_key |= hash_key[(i * 4) + 2] << 16;
1545 rss_key |= hash_key[(i * 4) + 3] << 24;
1546 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1549 /* Fill in redirection table. */
1550 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1551 for (i = 0; i < 128; i++) {
1558 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1559 i % dev->data->nb_rx_queues : 0);
1560 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1562 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1565 /* Set configured hashing functions in MRQC register. */
1566 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1567 if (rss_hf & ETH_RSS_IPV4)
1568 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1569 if (rss_hf & ETH_RSS_IPV4_TCP)
1570 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1571 if (rss_hf & ETH_RSS_IPV6)
1572 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1573 if (rss_hf & ETH_RSS_IPV6_EX)
1574 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1575 if (rss_hf & ETH_RSS_IPV6_TCP)
1576 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1577 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1578 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1579 if (rss_hf & ETH_RSS_IPV4_UDP)
1580 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1581 if (rss_hf & ETH_RSS_IPV6_UDP)
1582 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1583 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1584 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1585 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1589 * Check if the mac type support VMDq or not.
1590 * Return 1 if it supports, otherwise, return 0.
1593 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1595 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1597 switch (hw->mac.type) {
1618 PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
1624 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1626 struct rte_eth_vmdq_rx_conf *cfg;
1627 struct e1000_hw *hw;
1628 uint32_t mrqc, vt_ctl, vmolr, rctl;
1631 PMD_INIT_LOG(DEBUG, ">>");
1632 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1633 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1635 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1636 if (igb_is_vmdq_supported(dev) == 0)
1639 igb_rss_disable(dev);
1641 /* RCTL: eanble VLAN filter */
1642 rctl = E1000_READ_REG(hw, E1000_RCTL);
1643 rctl |= E1000_RCTL_VFE;
1644 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1646 /* MRQC: enable vmdq */
1647 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1648 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1649 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1651 /* VTCTL: pool selection according to VLAN tag */
1652 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1653 if (cfg->enable_default_pool)
1654 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1655 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1656 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1659 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1660 * Both 82576 and 82580 support it
1662 if (hw->mac.type != e1000_i350) {
1663 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1664 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1665 vmolr |= E1000_VMOLR_STRVLAN;
1666 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1670 /* VFTA - enable all vlan filters */
1671 for (i = 0; i < IGB_VFTA_SIZE; i++)
1672 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1674 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1675 if (hw->mac.type != e1000_82580)
1676 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1679 * RAH/RAL - allow pools to read specific mac addresses
1680 * In this case, all pools should be able to read from mac addr 0
1682 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1683 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1685 /* VLVF: set up filters for vlan tags as configured */
1686 for (i = 0; i < cfg->nb_pool_maps; i++) {
1687 /* set vlan id in VF register and set the valid bit */
1688 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1689 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1690 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1691 E1000_VLVF_POOLSEL_MASK)));
1694 E1000_WRITE_FLUSH(hw);
1700 /*********************************************************************
1702 * Enable receive unit.
1704 **********************************************************************/
1707 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1709 struct igb_rx_entry *rxe = rxq->sw_ring;
1713 /* Initialize software ring entries. */
1714 for (i = 0; i < rxq->nb_rx_desc; i++) {
1715 volatile union e1000_adv_rx_desc *rxd;
1716 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1719 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1720 "queue_id=%hu\n", rxq->queue_id);
1721 igb_rx_queue_release(rxq);
1725 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1726 rxd = &rxq->rx_ring[i];
1727 rxd->read.hdr_addr = dma_addr;
1728 rxd->read.pkt_addr = dma_addr;
1735 #define E1000_MRQC_DEF_Q_SHIFT (3)
1737 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1739 struct e1000_hw *hw =
1740 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1745 * SRIOV active scheme
1746 * FIXME if support RSS together with VMDq & SRIOV
1748 mrqc = E1000_MRQC_ENABLE_VMDQ;
1749 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1750 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1751 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1752 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1754 * SRIOV inactive scheme
1756 switch (dev->data->dev_conf.rxmode.mq_mode) {
1758 igb_rss_configure(dev);
1760 case ETH_MQ_RX_VMDQ_ONLY:
1761 /*Configure general VMDQ only RX parameters*/
1762 igb_vmdq_rx_hw_configure(dev);
1764 case ETH_MQ_RX_NONE:
1765 /* if mq_mode is none, disable rss mode.*/
1767 igb_rss_disable(dev);
1776 eth_igb_rx_init(struct rte_eth_dev *dev)
1778 struct e1000_hw *hw;
1779 struct igb_rx_queue *rxq;
1780 struct rte_pktmbuf_pool_private *mbp_priv;
1785 uint16_t rctl_bsize;
1789 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1793 * Make sure receives are disabled while setting
1794 * up the descriptor ring.
1796 rctl = E1000_READ_REG(hw, E1000_RCTL);
1797 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1800 * Configure support of jumbo frames, if any.
1802 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1803 rctl |= E1000_RCTL_LPE;
1806 * Set maximum packet length by default, and might be updated
1807 * together with enabling/disabling dual VLAN.
1809 E1000_WRITE_REG(hw, E1000_RLPML,
1810 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1813 rctl &= ~E1000_RCTL_LPE;
1815 /* Configure and enable each RX queue. */
1817 dev->rx_pkt_burst = eth_igb_recv_pkts;
1818 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1822 rxq = dev->data->rx_queues[i];
1824 /* Allocate buffers for descriptor rings and set up queue */
1825 ret = igb_alloc_rx_queue_mbufs(rxq);
1830 * Reset crc_len in case it was changed after queue setup by a
1834 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1837 bus_addr = rxq->rx_ring_phys_addr;
1838 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
1840 sizeof(union e1000_adv_rx_desc));
1841 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
1842 (uint32_t)(bus_addr >> 32));
1843 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
1845 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1848 * Configure RX buffer size.
1850 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1851 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1852 RTE_PKTMBUF_HEADROOM);
1853 if (buf_size >= 1024) {
1855 * Configure the BSIZEPACKET field of the SRRCTL
1856 * register of the queue.
1857 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1858 * If this field is equal to 0b, then RCTL.BSIZE
1859 * determines the RX packet buffer size.
1861 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1862 E1000_SRRCTL_BSIZEPKT_MASK);
1863 buf_size = (uint16_t) ((srrctl &
1864 E1000_SRRCTL_BSIZEPKT_MASK) <<
1865 E1000_SRRCTL_BSIZEPKT_SHIFT);
1867 /* It adds dual VLAN length for supporting dual VLAN */
1868 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1869 2 * VLAN_TAG_SIZE) > buf_size){
1870 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1871 dev->data->scattered_rx = 1;
1875 * Use BSIZE field of the device RCTL register.
1877 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1878 rctl_bsize = buf_size;
1879 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1880 dev->data->scattered_rx = 1;
1883 /* Set if packets are dropped when no descriptors available */
1885 srrctl |= E1000_SRRCTL_DROP_EN;
1887 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
1889 /* Enable this RX queue. */
1890 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
1891 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1892 rxdctl &= 0xFFF00000;
1893 rxdctl |= (rxq->pthresh & 0x1F);
1894 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1895 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1896 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
1900 * Setup BSIZE field of RCTL register, if needed.
1901 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1902 * register, since the code above configures the SRRCTL register of
1903 * the RX queue in such a case.
1904 * All configurable sizes are:
1905 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1906 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1907 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1908 * 2048: rctl |= E1000_RCTL_SZ_2048;
1909 * 1024: rctl |= E1000_RCTL_SZ_1024;
1910 * 512: rctl |= E1000_RCTL_SZ_512;
1911 * 256: rctl |= E1000_RCTL_SZ_256;
1913 if (rctl_bsize > 0) {
1914 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1915 rctl |= E1000_RCTL_SZ_512;
1916 else /* 256 <= buf_size < 512 - use 256 */
1917 rctl |= E1000_RCTL_SZ_256;
1921 * Configure RSS if device configured with multiple RX queues.
1923 igb_dev_mq_rx_configure(dev);
1925 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
1926 rctl |= E1000_READ_REG(hw, E1000_RCTL);
1929 * Setup the Checksum Register.
1930 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1932 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1933 rxcsum |= E1000_RXCSUM_PCSD;
1935 /* Enable both L3/L4 rx checksum offload */
1936 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1937 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1939 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1940 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1942 /* Setup the Receive Control Register. */
1943 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1944 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1946 /* set STRCRC bit in all queues */
1947 if (hw->mac.type == e1000_i350 ||
1948 hw->mac.type == e1000_i210 ||
1949 hw->mac.type == e1000_i211 ||
1950 hw->mac.type == e1000_i354) {
1951 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1952 rxq = dev->data->rx_queues[i];
1953 uint32_t dvmolr = E1000_READ_REG(hw,
1954 E1000_DVMOLR(rxq->reg_idx));
1955 dvmolr |= E1000_DVMOLR_STRCRC;
1956 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1960 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1962 /* clear STRCRC bit in all queues */
1963 if (hw->mac.type == e1000_i350 ||
1964 hw->mac.type == e1000_i210 ||
1965 hw->mac.type == e1000_i211 ||
1966 hw->mac.type == e1000_i354) {
1967 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1968 rxq = dev->data->rx_queues[i];
1969 uint32_t dvmolr = E1000_READ_REG(hw,
1970 E1000_DVMOLR(rxq->reg_idx));
1971 dvmolr &= ~E1000_DVMOLR_STRCRC;
1972 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
1977 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1978 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1979 E1000_RCTL_RDMTS_HALF |
1980 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1982 /* Make sure VLAN Filters are off. */
1983 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
1984 rctl &= ~E1000_RCTL_VFE;
1985 /* Don't store bad packets. */
1986 rctl &= ~E1000_RCTL_SBP;
1988 /* Enable Receives. */
1989 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1992 * Setup the HW Rx Head and Tail Descriptor Pointers.
1993 * This needs to be done after enable.
1995 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1996 rxq = dev->data->rx_queues[i];
1997 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
1998 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2004 /*********************************************************************
2006 * Enable transmit unit.
2008 **********************************************************************/
2010 eth_igb_tx_init(struct rte_eth_dev *dev)
2012 struct e1000_hw *hw;
2013 struct igb_tx_queue *txq;
2018 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2020 /* Setup the Base and Length of the Tx Descriptor Rings. */
2021 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2023 txq = dev->data->tx_queues[i];
2024 bus_addr = txq->tx_ring_phys_addr;
2026 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2028 sizeof(union e1000_adv_tx_desc));
2029 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2030 (uint32_t)(bus_addr >> 32));
2031 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2033 /* Setup the HW Tx Head and Tail descriptor pointers. */
2034 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2035 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2037 /* Setup Transmit threshold registers. */
2038 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2039 txdctl |= txq->pthresh & 0x1F;
2040 txdctl |= ((txq->hthresh & 0x1F) << 8);
2041 txdctl |= ((txq->wthresh & 0x1F) << 16);
2042 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2043 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2046 /* Program the Transmit Control Register. */
2047 tctl = E1000_READ_REG(hw, E1000_TCTL);
2048 tctl &= ~E1000_TCTL_CT;
2049 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2050 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2052 e1000_config_collision_dist(hw);
2054 /* This write will effectively turn on the transmit unit. */
2055 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2058 /*********************************************************************
2060 * Enable VF receive unit.
2062 **********************************************************************/
2064 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2066 struct e1000_hw *hw;
2067 struct igb_rx_queue *rxq;
2068 struct rte_pktmbuf_pool_private *mbp_priv;
2071 uint16_t rctl_bsize;
2075 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2077 /* Configure and enable each RX queue. */
2079 dev->rx_pkt_burst = eth_igb_recv_pkts;
2080 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2084 rxq = dev->data->rx_queues[i];
2086 /* Allocate buffers for descriptor rings and set up queue */
2087 ret = igb_alloc_rx_queue_mbufs(rxq);
2091 bus_addr = rxq->rx_ring_phys_addr;
2092 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2094 sizeof(union e1000_adv_rx_desc));
2095 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2096 (uint32_t)(bus_addr >> 32));
2097 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2099 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2102 * Configure RX buffer size.
2104 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
2105 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
2106 RTE_PKTMBUF_HEADROOM);
2107 if (buf_size >= 1024) {
2109 * Configure the BSIZEPACKET field of the SRRCTL
2110 * register of the queue.
2111 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2112 * If this field is equal to 0b, then RCTL.BSIZE
2113 * determines the RX packet buffer size.
2115 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2116 E1000_SRRCTL_BSIZEPKT_MASK);
2117 buf_size = (uint16_t) ((srrctl &
2118 E1000_SRRCTL_BSIZEPKT_MASK) <<
2119 E1000_SRRCTL_BSIZEPKT_SHIFT);
2121 /* It adds dual VLAN length for supporting dual VLAN */
2122 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2123 2 * VLAN_TAG_SIZE) > buf_size){
2124 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2125 dev->data->scattered_rx = 1;
2129 * Use BSIZE field of the device RCTL register.
2131 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2132 rctl_bsize = buf_size;
2133 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2134 dev->data->scattered_rx = 1;
2137 /* Set if packets are dropped when no descriptors available */
2139 srrctl |= E1000_SRRCTL_DROP_EN;
2141 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2143 /* Enable this RX queue. */
2144 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2145 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2146 rxdctl &= 0xFFF00000;
2147 rxdctl |= (rxq->pthresh & 0x1F);
2148 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2149 if (hw->mac.type == e1000_82576) {
2151 * Workaround of 82576 VF Erratum
2152 * force set WTHRESH to 1
2153 * to avoid Write-Back not triggered sometimes
2156 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
2159 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2160 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2164 * Setup the HW Rx Head and Tail Descriptor Pointers.
2165 * This needs to be done after enable.
2167 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2168 rxq = dev->data->rx_queues[i];
2169 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2170 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2176 /*********************************************************************
2178 * Enable VF transmit unit.
2180 **********************************************************************/
2182 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2184 struct e1000_hw *hw;
2185 struct igb_tx_queue *txq;
2189 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2191 /* Setup the Base and Length of the Tx Descriptor Rings. */
2192 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2195 txq = dev->data->tx_queues[i];
2196 bus_addr = txq->tx_ring_phys_addr;
2197 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2199 sizeof(union e1000_adv_tx_desc));
2200 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2201 (uint32_t)(bus_addr >> 32));
2202 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2204 /* Setup the HW Tx Head and Tail descriptor pointers. */
2205 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2206 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2208 /* Setup Transmit threshold registers. */
2209 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2210 txdctl |= txq->pthresh & 0x1F;
2211 txdctl |= ((txq->hthresh & 0x1F) << 8);
2212 if (hw->mac.type == e1000_82576) {
2214 * Workaround of 82576 VF Erratum
2215 * force set WTHRESH to 1
2216 * to avoid Write-Back not triggered sometimes
2219 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2222 txdctl |= ((txq->wthresh & 0x1F) << 16);
2223 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2224 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);