4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
50 #include <rte_debug.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
72 #include <rte_string_fns.h>
74 #include "e1000_logs.h"
75 #include "e1000/e1000_api.h"
76 #include "e1000_ethdev.h"
78 static inline struct rte_mbuf *
79 rte_rxmbuf_alloc(struct rte_mempool *mp)
83 m = __rte_mbuf_raw_alloc(mp);
84 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
88 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
89 (uint64_t) ((mb)->buf_physaddr + \
90 (uint64_t) ((char *)((mb)->pkt.data) - \
91 (char *)(mb)->buf_addr))
93 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
94 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
104 * Structure associated with each descriptor of the TX ring of a TX queue.
106 struct igb_tx_entry {
107 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
108 uint16_t next_id; /**< Index of next descriptor in ring. */
109 uint16_t last_id; /**< Index of last scattered descriptor. */
113 * Structure associated with each RX queue.
115 struct igb_rx_queue {
116 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
117 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
118 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
119 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
120 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
121 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
122 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
123 uint16_t nb_rx_desc; /**< number of RX descriptors. */
124 uint16_t rx_tail; /**< current value of RDT register. */
125 uint16_t nb_rx_hold; /**< number of held free RX desc. */
126 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
127 uint16_t queue_id; /**< RX queue index. */
128 uint8_t port_id; /**< Device port identifier. */
129 uint8_t pthresh; /**< Prefetch threshold register. */
130 uint8_t hthresh; /**< Host threshold register. */
131 uint8_t wthresh; /**< Write-back threshold register. */
132 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
133 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
137 * Hardware context number
139 enum igb_advctx_num {
140 IGB_CTX_0 = 0, /**< CTX0 */
141 IGB_CTX_1 = 1, /**< CTX1 */
142 IGB_CTX_NUM = 2, /**< CTX_NUM */
146 * Strucutre to check if new context need be built
148 struct igb_advctx_info {
149 uint16_t flags; /**< ol_flags related to context build. */
150 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
151 union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
155 * Structure associated with each TX queue.
157 struct igb_tx_queue {
158 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
159 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
160 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
161 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
162 uint32_t txd_type; /**< Device-specific TXD type */
163 uint16_t nb_tx_desc; /**< number of TX descriptors. */
164 uint16_t tx_tail; /**< Current value of TDT register. */
166 /**< Index of first used TX descriptor. */
167 uint16_t queue_id; /**< TX queue index. */
168 uint8_t port_id; /**< Device port identifier. */
169 uint8_t pthresh; /**< Prefetch threshold register. */
170 uint8_t hthresh; /**< Host threshold register. */
171 uint8_t wthresh; /**< Write-back threshold register. */
173 /**< Current used hardware descriptor. */
175 /**< Start context position for transmit queue. */
176 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
177 /**< Hardware context history.*/
181 #define RTE_PMD_USE_PREFETCH
184 #ifdef RTE_PMD_USE_PREFETCH
185 #define rte_igb_prefetch(p) rte_prefetch0(p)
187 #define rte_igb_prefetch(p) do {} while(0)
190 #ifdef RTE_PMD_PACKET_PREFETCH
191 #define rte_packet_prefetch(p) rte_prefetch1(p)
193 #define rte_packet_prefetch(p) do {} while(0)
196 /*********************************************************************
200 **********************************************************************/
203 * Advanced context descriptor are almost same between igb/ixgbe
204 * This is a separate function, looking for optimization opportunity here
205 * Rework required to go with the pre-defined values.
209 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
210 volatile struct e1000_adv_tx_context_desc *ctx_txd,
211 uint16_t ol_flags, uint32_t vlan_macip_lens)
213 uint32_t type_tucmd_mlhl;
214 uint32_t mss_l4len_idx;
215 uint32_t ctx_idx, ctx_curr;
218 ctx_curr = txq->ctx_curr;
219 ctx_idx = ctx_curr + txq->ctx_start;
224 if (ol_flags & PKT_TX_VLAN_PKT) {
225 cmp_mask |= TX_VLAN_CMP_MASK;
228 if (ol_flags & PKT_TX_IP_CKSUM) {
229 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
230 cmp_mask |= TX_MAC_LEN_CMP_MASK;
233 /* Specify which HW CTX to upload. */
234 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
235 switch (ol_flags & PKT_TX_L4_MASK) {
236 case PKT_TX_UDP_CKSUM:
237 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
238 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
239 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
240 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
242 case PKT_TX_TCP_CKSUM:
243 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
244 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
245 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
246 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
248 case PKT_TX_SCTP_CKSUM:
249 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
250 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
251 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
252 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
255 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
256 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
260 txq->ctx_cache[ctx_curr].flags = ol_flags;
261 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
262 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
263 vlan_macip_lens & cmp_mask;
265 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
266 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
267 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
268 ctx_txd->seqnum_seed = 0;
272 * Check which hardware context can be used. Use the existing match
273 * or create a new context descriptor.
275 static inline uint32_t
276 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
277 uint32_t vlan_macip_lens)
279 /* If match with the current context */
280 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
281 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
282 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
283 return txq->ctx_curr;
286 /* If match with the second context */
288 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
289 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
290 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
291 return txq->ctx_curr;
294 /* Mismatch, use the previous context */
295 return (IGB_CTX_NUM);
298 static inline uint32_t
299 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
301 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
302 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
305 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
306 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
310 static inline uint32_t
311 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
313 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
314 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
318 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
321 struct igb_tx_queue *txq;
322 struct igb_tx_entry *sw_ring;
323 struct igb_tx_entry *txe, *txn;
324 volatile union e1000_adv_tx_desc *txr;
325 volatile union e1000_adv_tx_desc *txd;
326 struct rte_mbuf *tx_pkt;
327 struct rte_mbuf *m_seg;
328 uint64_t buf_dma_addr;
329 uint32_t olinfo_status;
330 uint32_t cmd_type_len;
339 uint32_t new_ctx = 0;
341 uint32_t vlan_macip_lens;
344 sw_ring = txq->sw_ring;
346 tx_id = txq->tx_tail;
347 txe = &sw_ring[tx_id];
349 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
351 pkt_len = tx_pkt->pkt.pkt_len;
353 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
356 * The number of descriptors that must be allocated for a
357 * packet is the number of segments of that packet, plus 1
358 * Context Descriptor for the VLAN Tag Identifier, if any.
359 * Determine the last TX descriptor to allocate in the TX ring
360 * for the packet, starting from the current position (tx_id)
363 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
365 ol_flags = tx_pkt->ol_flags;
366 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
367 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
369 /* If a Context Descriptor need be built . */
371 ctx = what_advctx_update(txq, tx_ol_req,
373 /* Only allocate context descriptor if required*/
374 new_ctx = (ctx == IGB_CTX_NUM);
376 tx_last = (uint16_t) (tx_last + new_ctx);
378 if (tx_last >= txq->nb_tx_desc)
379 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
381 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
382 " tx_first=%u tx_last=%u\n",
383 (unsigned) txq->port_id,
384 (unsigned) txq->queue_id,
390 * Check if there are enough free descriptors in the TX ring
391 * to transmit the next packet.
392 * This operation is based on the two following rules:
394 * 1- Only check that the last needed TX descriptor can be
395 * allocated (by construction, if that descriptor is free,
396 * all intermediate ones are also free).
398 * For this purpose, the index of the last TX descriptor
399 * used for a packet (the "last descriptor" of a packet)
400 * is recorded in the TX entries (the last one included)
401 * that are associated with all TX descriptors allocated
404 * 2- Avoid to allocate the last free TX descriptor of the
405 * ring, in order to never set the TDT register with the
406 * same value stored in parallel by the NIC in the TDH
407 * register, which makes the TX engine of the NIC enter
408 * in a deadlock situation.
410 * By extension, avoid to allocate a free descriptor that
411 * belongs to the last set of free descriptors allocated
412 * to the same packet previously transmitted.
416 * The "last descriptor" of the previously sent packet, if any,
417 * which used the last descriptor to allocate.
419 tx_end = sw_ring[tx_last].last_id;
422 * The next descriptor following that "last descriptor" in the
425 tx_end = sw_ring[tx_end].next_id;
428 * The "last descriptor" associated with that next descriptor.
430 tx_end = sw_ring[tx_end].last_id;
433 * Check that this descriptor is free.
435 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
442 * Set common flags of all TX Data Descriptors.
444 * The following bits must be set in all Data Descriptors:
445 * - E1000_ADVTXD_DTYP_DATA
446 * - E1000_ADVTXD_DCMD_DEXT
448 * The following bits must be set in the first Data Descriptor
449 * and are ignored in the other ones:
450 * - E1000_ADVTXD_DCMD_IFCS
451 * - E1000_ADVTXD_MAC_1588
452 * - E1000_ADVTXD_DCMD_VLE
454 * The following bits must only be set in the last Data
456 * - E1000_TXD_CMD_EOP
458 * The following bits can be set in any Data Descriptor, but
459 * are only set in the last Data Descriptor:
462 cmd_type_len = txq->txd_type |
463 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
464 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
465 #if defined(RTE_LIBRTE_IEEE1588)
466 if (ol_flags & PKT_TX_IEEE1588_TMST)
467 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
470 /* Setup TX Advanced context descriptor if required */
472 volatile struct e1000_adv_tx_context_desc *
475 ctx_txd = (volatile struct
476 e1000_adv_tx_context_desc *)
479 txn = &sw_ring[txe->next_id];
480 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
482 if (txe->mbuf != NULL) {
483 rte_pktmbuf_free_seg(txe->mbuf);
487 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
490 txe->last_id = tx_last;
491 tx_id = txe->next_id;
495 /* Setup the TX Advanced Data Descriptor */
496 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
497 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
498 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
503 txn = &sw_ring[txe->next_id];
506 if (txe->mbuf != NULL)
507 rte_pktmbuf_free_seg(txe->mbuf);
511 * Set up transmit descriptor.
513 slen = (uint16_t) m_seg->pkt.data_len;
514 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
515 txd->read.buffer_addr =
516 rte_cpu_to_le_64(buf_dma_addr);
517 txd->read.cmd_type_len =
518 rte_cpu_to_le_32(cmd_type_len | slen);
519 txd->read.olinfo_status =
520 rte_cpu_to_le_32(olinfo_status);
521 txe->last_id = tx_last;
522 tx_id = txe->next_id;
524 m_seg = m_seg->pkt.next;
525 } while (m_seg != NULL);
528 * The last packet data descriptor needs End Of Packet (EOP)
529 * and Report Status (RS).
531 txd->read.cmd_type_len |=
532 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
538 * Set the Transmit Descriptor Tail (TDT).
540 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
541 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
542 (unsigned) txq->port_id, (unsigned) txq->queue_id,
543 (unsigned) tx_id, (unsigned) nb_tx);
544 txq->tx_tail = tx_id;
549 /*********************************************************************
553 **********************************************************************/
554 static inline uint16_t
555 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
559 static uint16_t ip_pkt_types_map[16] = {
560 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
561 PKT_RX_IPV6_HDR, 0, 0, 0,
562 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
563 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
566 #if defined(RTE_LIBRTE_IEEE1588)
567 static uint32_t ip_pkt_etqf_map[8] = {
568 0, 0, 0, PKT_RX_IEEE1588_PTP,
572 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
573 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
574 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
576 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
577 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
579 return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
580 0 : PKT_RX_RSS_HASH));
583 static inline uint16_t
584 rx_desc_status_to_pkt_flags(uint32_t rx_status)
588 /* Check if VLAN present */
589 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
590 PKT_RX_VLAN_PKT : 0);
592 #if defined(RTE_LIBRTE_IEEE1588)
593 if (rx_status & E1000_RXD_STAT_TMST)
594 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
599 static inline uint16_t
600 rx_desc_error_to_pkt_flags(uint32_t rx_status)
603 * Bit 30: IPE, IPv4 checksum error
604 * Bit 29: L4I, L4I integrity error
607 static uint16_t error_to_pkt_flags_map[4] = {
608 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
609 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
611 return error_to_pkt_flags_map[(rx_status >>
612 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
616 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
619 struct igb_rx_queue *rxq;
620 volatile union e1000_adv_rx_desc *rx_ring;
621 volatile union e1000_adv_rx_desc *rxdp;
622 struct igb_rx_entry *sw_ring;
623 struct igb_rx_entry *rxe;
624 struct rte_mbuf *rxm;
625 struct rte_mbuf *nmb;
626 union e1000_adv_rx_desc rxd;
629 uint32_t hlen_type_rss;
639 rx_id = rxq->rx_tail;
640 rx_ring = rxq->rx_ring;
641 sw_ring = rxq->sw_ring;
642 while (nb_rx < nb_pkts) {
644 * The order of operations here is important as the DD status
645 * bit must not be read after any other descriptor fields.
646 * rx_ring and rxdp are pointing to volatile data so the order
647 * of accesses cannot be reordered by the compiler. If they were
648 * not volatile, they could be reordered which could lead to
649 * using invalid descriptor fields when read from rxd.
651 rxdp = &rx_ring[rx_id];
652 staterr = rxdp->wb.upper.status_error;
653 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
660 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
661 * likely to be invalid and to be dropped by the various
662 * validation checks performed by the network stack.
664 * Allocate a new mbuf to replenish the RX ring descriptor.
665 * If the allocation fails:
666 * - arrange for that RX descriptor to be the first one
667 * being parsed the next time the receive function is
668 * invoked [on the same queue].
670 * - Stop parsing the RX ring and return immediately.
672 * This policy do not drop the packet received in the RX
673 * descriptor for which the allocation of a new mbuf failed.
674 * Thus, it allows that packet to be later retrieved if
675 * mbuf have been freed in the mean time.
676 * As a side effect, holding RX descriptors instead of
677 * systematically giving them back to the NIC may lead to
678 * RX ring exhaustion situations.
679 * However, the NIC can gracefully prevent such situations
680 * to happen by sending specific "back-pressure" flow control
681 * frames to its peer(s).
683 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
684 "staterr=0x%x pkt_len=%u\n",
685 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
686 (unsigned) rx_id, (unsigned) staterr,
687 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
689 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
691 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
692 "queue_id=%u\n", (unsigned) rxq->port_id,
693 (unsigned) rxq->queue_id);
694 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
699 rxe = &sw_ring[rx_id];
701 if (rx_id == rxq->nb_rx_desc)
704 /* Prefetch next mbuf while processing current one. */
705 rte_igb_prefetch(sw_ring[rx_id].mbuf);
708 * When next RX descriptor is on a cache-line boundary,
709 * prefetch the next 4 RX descriptors and the next 8 pointers
712 if ((rx_id & 0x3) == 0) {
713 rte_igb_prefetch(&rx_ring[rx_id]);
714 rte_igb_prefetch(&sw_ring[rx_id]);
720 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
721 rxdp->read.hdr_addr = dma_addr;
722 rxdp->read.pkt_addr = dma_addr;
725 * Initialize the returned mbuf.
726 * 1) setup generic mbuf fields:
727 * - number of segments,
730 * - RX port identifier.
731 * 2) integrate hardware offload data, if any:
733 * - IP checksum flag,
734 * - VLAN TCI, if any,
737 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
739 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
740 rte_packet_prefetch(rxm->pkt.data);
741 rxm->pkt.nb_segs = 1;
742 rxm->pkt.next = NULL;
743 rxm->pkt.pkt_len = pkt_len;
744 rxm->pkt.data_len = pkt_len;
745 rxm->pkt.in_port = rxq->port_id;
747 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
748 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
749 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
750 rxm->pkt.vlan_macip.f.vlan_tci =
751 rte_le_to_cpu_16(rxd.wb.upper.vlan);
753 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
754 pkt_flags = (uint16_t)(pkt_flags |
755 rx_desc_status_to_pkt_flags(staterr));
756 pkt_flags = (uint16_t)(pkt_flags |
757 rx_desc_error_to_pkt_flags(staterr));
758 rxm->ol_flags = pkt_flags;
761 * Store the mbuf address into the next entry of the array
762 * of returned packets.
764 rx_pkts[nb_rx++] = rxm;
766 rxq->rx_tail = rx_id;
769 * If the number of free RX descriptors is greater than the RX free
770 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
772 * Update the RDT with the value of the last processed RX descriptor
773 * minus 1, to guarantee that the RDT register is never equal to the
774 * RDH register, which creates a "full" ring situtation from the
775 * hardware point of view...
777 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
778 if (nb_hold > rxq->rx_free_thresh) {
779 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
780 "nb_hold=%u nb_rx=%u\n",
781 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
782 (unsigned) rx_id, (unsigned) nb_hold,
784 rx_id = (uint16_t) ((rx_id == 0) ?
785 (rxq->nb_rx_desc - 1) : (rx_id - 1));
786 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
789 rxq->nb_rx_hold = nb_hold;
794 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
797 struct igb_rx_queue *rxq;
798 volatile union e1000_adv_rx_desc *rx_ring;
799 volatile union e1000_adv_rx_desc *rxdp;
800 struct igb_rx_entry *sw_ring;
801 struct igb_rx_entry *rxe;
802 struct rte_mbuf *first_seg;
803 struct rte_mbuf *last_seg;
804 struct rte_mbuf *rxm;
805 struct rte_mbuf *nmb;
806 union e1000_adv_rx_desc rxd;
807 uint64_t dma; /* Physical address of mbuf data buffer */
809 uint32_t hlen_type_rss;
819 rx_id = rxq->rx_tail;
820 rx_ring = rxq->rx_ring;
821 sw_ring = rxq->sw_ring;
824 * Retrieve RX context of current packet, if any.
826 first_seg = rxq->pkt_first_seg;
827 last_seg = rxq->pkt_last_seg;
829 while (nb_rx < nb_pkts) {
832 * The order of operations here is important as the DD status
833 * bit must not be read after any other descriptor fields.
834 * rx_ring and rxdp are pointing to volatile data so the order
835 * of accesses cannot be reordered by the compiler. If they were
836 * not volatile, they could be reordered which could lead to
837 * using invalid descriptor fields when read from rxd.
839 rxdp = &rx_ring[rx_id];
840 staterr = rxdp->wb.upper.status_error;
841 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
848 * Allocate a new mbuf to replenish the RX ring descriptor.
849 * If the allocation fails:
850 * - arrange for that RX descriptor to be the first one
851 * being parsed the next time the receive function is
852 * invoked [on the same queue].
854 * - Stop parsing the RX ring and return immediately.
856 * This policy does not drop the packet received in the RX
857 * descriptor for which the allocation of a new mbuf failed.
858 * Thus, it allows that packet to be later retrieved if
859 * mbuf have been freed in the mean time.
860 * As a side effect, holding RX descriptors instead of
861 * systematically giving them back to the NIC may lead to
862 * RX ring exhaustion situations.
863 * However, the NIC can gracefully prevent such situations
864 * to happen by sending specific "back-pressure" flow control
865 * frames to its peer(s).
867 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
868 "staterr=0x%x data_len=%u\n",
869 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
870 (unsigned) rx_id, (unsigned) staterr,
871 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
873 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
875 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
876 "queue_id=%u\n", (unsigned) rxq->port_id,
877 (unsigned) rxq->queue_id);
878 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
883 rxe = &sw_ring[rx_id];
885 if (rx_id == rxq->nb_rx_desc)
888 /* Prefetch next mbuf while processing current one. */
889 rte_igb_prefetch(sw_ring[rx_id].mbuf);
892 * When next RX descriptor is on a cache-line boundary,
893 * prefetch the next 4 RX descriptors and the next 8 pointers
896 if ((rx_id & 0x3) == 0) {
897 rte_igb_prefetch(&rx_ring[rx_id]);
898 rte_igb_prefetch(&sw_ring[rx_id]);
902 * Update RX descriptor with the physical address of the new
903 * data buffer of the new allocated mbuf.
907 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
908 rxdp->read.pkt_addr = dma;
909 rxdp->read.hdr_addr = dma;
912 * Set data length & data buffer address of mbuf.
914 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
915 rxm->pkt.data_len = data_len;
916 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
919 * If this is the first buffer of the received packet,
920 * set the pointer to the first mbuf of the packet and
921 * initialize its context.
922 * Otherwise, update the total length and the number of segments
923 * of the current scattered packet, and update the pointer to
924 * the last mbuf of the current packet.
926 if (first_seg == NULL) {
928 first_seg->pkt.pkt_len = data_len;
929 first_seg->pkt.nb_segs = 1;
931 first_seg->pkt.pkt_len += data_len;
932 first_seg->pkt.nb_segs++;
933 last_seg->pkt.next = rxm;
937 * If this is not the last buffer of the received packet,
938 * update the pointer to the last mbuf of the current scattered
939 * packet and continue to parse the RX ring.
941 if (! (staterr & E1000_RXD_STAT_EOP)) {
947 * This is the last buffer of the received packet.
948 * If the CRC is not stripped by the hardware:
949 * - Subtract the CRC length from the total packet length.
950 * - If the last buffer only contains the whole CRC or a part
951 * of it, free the mbuf associated to the last buffer.
952 * If part of the CRC is also contained in the previous
953 * mbuf, subtract the length of that CRC part from the
954 * data length of the previous mbuf.
956 rxm->pkt.next = NULL;
957 if (unlikely(rxq->crc_len > 0)) {
958 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
959 if (data_len <= ETHER_CRC_LEN) {
960 rte_pktmbuf_free_seg(rxm);
961 first_seg->pkt.nb_segs--;
962 last_seg->pkt.data_len = (uint16_t)
963 (last_seg->pkt.data_len -
964 (ETHER_CRC_LEN - data_len));
965 last_seg->pkt.next = NULL;
968 (uint16_t) (data_len - ETHER_CRC_LEN);
972 * Initialize the first mbuf of the returned packet:
973 * - RX port identifier,
974 * - hardware offload data, if any:
976 * - IP checksum flag,
977 * - VLAN TCI, if any,
980 first_seg->pkt.in_port = rxq->port_id;
981 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
984 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
985 * set in the pkt_flags field.
987 first_seg->pkt.vlan_macip.f.vlan_tci =
988 rte_le_to_cpu_16(rxd.wb.upper.vlan);
989 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
990 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
991 pkt_flags = (uint16_t)(pkt_flags |
992 rx_desc_status_to_pkt_flags(staterr));
993 pkt_flags = (uint16_t)(pkt_flags |
994 rx_desc_error_to_pkt_flags(staterr));
995 first_seg->ol_flags = pkt_flags;
997 /* Prefetch data of first segment, if configured to do so. */
998 rte_packet_prefetch(first_seg->pkt.data);
1001 * Store the mbuf address into the next entry of the array
1002 * of returned packets.
1004 rx_pkts[nb_rx++] = first_seg;
1007 * Setup receipt context for a new packet.
1013 * Record index of the next RX descriptor to probe.
1015 rxq->rx_tail = rx_id;
1018 * Save receive context.
1020 rxq->pkt_first_seg = first_seg;
1021 rxq->pkt_last_seg = last_seg;
1024 * If the number of free RX descriptors is greater than the RX free
1025 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1027 * Update the RDT with the value of the last processed RX descriptor
1028 * minus 1, to guarantee that the RDT register is never equal to the
1029 * RDH register, which creates a "full" ring situtation from the
1030 * hardware point of view...
1032 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1033 if (nb_hold > rxq->rx_free_thresh) {
1034 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1035 "nb_hold=%u nb_rx=%u\n",
1036 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1037 (unsigned) rx_id, (unsigned) nb_hold,
1039 rx_id = (uint16_t) ((rx_id == 0) ?
1040 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1041 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1044 rxq->nb_rx_hold = nb_hold;
1049 * Rings setup and release.
1051 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1052 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1053 * This will also optimize cache line size effect.
1054 * H/W supports up to cache line size 128.
1056 #define IGB_ALIGN 128
1059 * Maximum number of Ring Descriptors.
1061 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1062 * desscriptors should meet the following condition:
1063 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1065 #define IGB_MIN_RING_DESC 32
1066 #define IGB_MAX_RING_DESC 4096
1068 static const struct rte_memzone *
1069 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1070 uint16_t queue_id, uint32_t ring_size, int socket_id)
1072 char z_name[RTE_MEMZONE_NAMESIZE];
1073 const struct rte_memzone *mz;
1075 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1076 dev->driver->pci_drv.name, ring_name,
1077 dev->data->port_id, queue_id);
1078 mz = rte_memzone_lookup(z_name);
1082 return rte_memzone_reserve_aligned(z_name, ring_size,
1083 socket_id, 0, IGB_ALIGN);
1087 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1091 if (txq->sw_ring != NULL) {
1092 for (i = 0; i < txq->nb_tx_desc; i++) {
1093 if (txq->sw_ring[i].mbuf != NULL) {
1094 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1095 txq->sw_ring[i].mbuf = NULL;
1102 igb_tx_queue_release(struct igb_tx_queue *txq)
1105 igb_tx_queue_release_mbufs(txq);
1106 rte_free(txq->sw_ring);
1112 eth_igb_tx_queue_release(void *txq)
1114 igb_tx_queue_release(txq);
1118 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1123 memset((void*)&txq->ctx_cache, 0,
1124 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1128 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1130 struct igb_tx_entry *txe = txq->sw_ring;
1133 struct e1000_hw *hw;
1135 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1136 size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
1137 /* Zero out HW ring memory */
1138 for (i = 0; i < size; i++) {
1139 ((volatile char *)txq->tx_ring)[i] = 0;
1142 /* Initialize ring entries */
1143 prev = (uint16_t)(txq->nb_tx_desc - 1);
1144 for (i = 0; i < txq->nb_tx_desc; i++) {
1145 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1147 txd->wb.status = E1000_TXD_STAT_DD;
1150 txe[prev].next_id = i;
1154 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1155 /* 82575 specific, each tx queue will use 2 hw contexts */
1156 if (hw->mac.type == e1000_82575)
1157 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1159 igb_reset_tx_queue_stat(txq);
1163 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1166 unsigned int socket_id,
1167 const struct rte_eth_txconf *tx_conf)
1169 const struct rte_memzone *tz;
1170 struct igb_tx_queue *txq;
1171 struct e1000_hw *hw;
1174 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1177 * Validate number of transmit descriptors.
1178 * It must not exceed hardware maximum, and must be multiple
1181 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1182 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1187 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1190 if (tx_conf->tx_free_thresh != 0)
1191 RTE_LOG(WARNING, PMD,
1192 "The tx_free_thresh parameter is not "
1193 "used for the 1G driver.\n");
1194 if (tx_conf->tx_rs_thresh != 0)
1195 RTE_LOG(WARNING, PMD,
1196 "The tx_rs_thresh parameter is not "
1197 "used for the 1G driver.\n");
1198 if (tx_conf->tx_thresh.wthresh == 0)
1199 RTE_LOG(WARNING, PMD,
1200 "To improve 1G driver performance, consider setting "
1201 "the TX WTHRESH value to 4, 8, or 16.\n");
1203 /* Free memory prior to re-allocation if needed */
1204 if (dev->data->tx_queues[queue_idx] != NULL)
1205 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1207 /* First allocate the tx queue data structure */
1208 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1214 * Allocate TX ring hardware descriptors. A memzone large enough to
1215 * handle the maximum ring size is allocated in order to allow for
1216 * resizing in later calls to the queue setup function.
1218 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1219 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1222 igb_tx_queue_release(txq);
1226 txq->nb_tx_desc = nb_desc;
1227 txq->pthresh = tx_conf->tx_thresh.pthresh;
1228 txq->hthresh = tx_conf->tx_thresh.hthresh;
1229 txq->wthresh = tx_conf->tx_thresh.wthresh;
1230 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1232 txq->queue_id = queue_idx;
1233 txq->port_id = dev->data->port_id;
1235 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1236 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1237 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1239 /* Allocate software ring */
1240 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1241 sizeof(struct igb_tx_entry) * nb_desc,
1243 if (txq->sw_ring == NULL) {
1244 igb_tx_queue_release(txq);
1247 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1248 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1250 igb_reset_tx_queue(txq, dev);
1251 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1252 dev->data->tx_queues[queue_idx] = txq;
1258 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1262 if (rxq->sw_ring != NULL) {
1263 for (i = 0; i < rxq->nb_rx_desc; i++) {
1264 if (rxq->sw_ring[i].mbuf != NULL) {
1265 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1266 rxq->sw_ring[i].mbuf = NULL;
1273 igb_rx_queue_release(struct igb_rx_queue *rxq)
1276 igb_rx_queue_release_mbufs(rxq);
1277 rte_free(rxq->sw_ring);
1283 eth_igb_rx_queue_release(void *rxq)
1285 igb_rx_queue_release(rxq);
1289 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1294 /* Zero out HW ring memory */
1295 size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
1296 for (i = 0; i < size; i++) {
1297 ((volatile char *)rxq->rx_ring)[i] = 0;
1301 rxq->pkt_first_seg = NULL;
1302 rxq->pkt_last_seg = NULL;
1306 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1309 unsigned int socket_id,
1310 const struct rte_eth_rxconf *rx_conf,
1311 struct rte_mempool *mp)
1313 const struct rte_memzone *rz;
1314 struct igb_rx_queue *rxq;
1315 struct e1000_hw *hw;
1318 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1321 * Validate number of receive descriptors.
1322 * It must not exceed hardware maximum, and must be multiple
1325 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1326 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1330 /* Free memory prior to re-allocation if needed */
1331 if (dev->data->rx_queues[queue_idx] != NULL) {
1332 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1333 dev->data->rx_queues[queue_idx] = NULL;
1336 /* First allocate the RX queue data structure. */
1337 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1342 rxq->nb_rx_desc = nb_desc;
1343 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1344 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1345 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1346 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1348 rxq->drop_en = rx_conf->rx_drop_en;
1349 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1350 rxq->queue_id = queue_idx;
1351 rxq->port_id = dev->data->port_id;
1352 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1356 * Allocate RX ring hardware descriptors. A memzone large enough to
1357 * handle the maximum ring size is allocated in order to allow for
1358 * resizing in later calls to the queue setup function.
1360 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1361 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1363 igb_rx_queue_release(rxq);
1366 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1367 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1368 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1370 /* Allocate software ring. */
1371 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1372 sizeof(struct igb_rx_entry) * nb_desc,
1374 if (rxq->sw_ring == NULL) {
1375 igb_rx_queue_release(rxq);
1378 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1379 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1381 dev->data->rx_queues[queue_idx] = rxq;
1382 igb_reset_rx_queue(rxq);
1388 igb_dev_clear_queues(struct rte_eth_dev *dev)
1391 struct igb_tx_queue *txq;
1392 struct igb_rx_queue *rxq;
1394 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1395 txq = dev->data->tx_queues[i];
1397 igb_tx_queue_release_mbufs(txq);
1398 igb_reset_tx_queue(txq, dev);
1402 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1403 rxq = dev->data->rx_queues[i];
1405 igb_rx_queue_release_mbufs(rxq);
1406 igb_reset_rx_queue(rxq);
1412 * Receive Side Scaling (RSS).
1413 * See section 7.1.1.7 in the following document:
1414 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1417 * The source and destination IP addresses of the IP header and the source and
1418 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1419 * against a configurable random key to compute a 32-bit RSS hash result.
1420 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1421 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1422 * RSS output index which is used as the RX queue index where to store the
1424 * The following output is supplied in the RX write-back descriptor:
1425 * - 32-bit result of the Microsoft RSS hash function,
1426 * - 4-bit RSS type field.
1430 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1431 * Used as the default key.
1433 static uint8_t rss_intel_key[40] = {
1434 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1435 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1436 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1437 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1438 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1442 igb_rss_disable(struct rte_eth_dev *dev)
1444 struct e1000_hw *hw;
1447 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1449 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1450 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1454 igb_rss_configure(struct rte_eth_dev *dev)
1456 struct e1000_hw *hw;
1464 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1466 rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1467 if (rss_hf == 0) /* Disable RSS. */ {
1468 igb_rss_disable(dev);
1471 hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1472 if (hash_key == NULL)
1473 hash_key = rss_intel_key; /* Default hash key. */
1475 /* Fill in RSS hash key. */
1476 for (i = 0; i < 10; i++) {
1477 rss_key = hash_key[(i * 4)];
1478 rss_key |= hash_key[(i * 4) + 1] << 8;
1479 rss_key |= hash_key[(i * 4) + 2] << 16;
1480 rss_key |= hash_key[(i * 4) + 3] << 24;
1481 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1484 /* Fill in redirection table. */
1485 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1486 for (i = 0; i < 128; i++) {
1493 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1494 i % dev->data->nb_rx_queues : 0);
1495 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1497 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1500 /* Set configured hashing functions in MRQC register. */
1501 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1502 if (rss_hf & ETH_RSS_IPV4)
1503 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1504 if (rss_hf & ETH_RSS_IPV4_TCP)
1505 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1506 if (rss_hf & ETH_RSS_IPV6)
1507 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1508 if (rss_hf & ETH_RSS_IPV6_EX)
1509 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1510 if (rss_hf & ETH_RSS_IPV6_TCP)
1511 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1512 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1513 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1514 if (rss_hf & ETH_RSS_IPV4_UDP)
1515 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1516 if (rss_hf & ETH_RSS_IPV6_UDP)
1517 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1518 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1519 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1520 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1523 /*********************************************************************
1525 * Enable receive unit.
1527 **********************************************************************/
1530 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1532 struct igb_rx_entry *rxe = rxq->sw_ring;
1536 /* Initialize software ring entries. */
1537 for (i = 0; i < rxq->nb_rx_desc; i++) {
1538 volatile union e1000_adv_rx_desc *rxd;
1539 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1542 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1543 "queue_id=%hu\n", rxq->queue_id);
1544 igb_rx_queue_release(rxq);
1548 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1549 rxd = &rxq->rx_ring[i];
1550 rxd->read.hdr_addr = dma_addr;
1551 rxd->read.pkt_addr = dma_addr;
1559 eth_igb_rx_init(struct rte_eth_dev *dev)
1561 struct e1000_hw *hw;
1562 struct igb_rx_queue *rxq;
1563 struct rte_pktmbuf_pool_private *mbp_priv;
1568 uint16_t rctl_bsize;
1572 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1576 * Make sure receives are disabled while setting
1577 * up the descriptor ring.
1579 rctl = E1000_READ_REG(hw, E1000_RCTL);
1580 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1583 * Configure support of jumbo frames, if any.
1585 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1586 rctl |= E1000_RCTL_LPE;
1588 /* Set maximum packet length. */
1589 E1000_WRITE_REG(hw, E1000_RLPML,
1590 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1592 rctl &= ~E1000_RCTL_LPE;
1594 /* Configure and enable each RX queue. */
1596 dev->rx_pkt_burst = eth_igb_recv_pkts;
1597 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1601 rxq = dev->data->rx_queues[i];
1603 /* Allocate buffers for descriptor rings and set up queue */
1604 ret = igb_alloc_rx_queue_mbufs(rxq);
1609 * Reset crc_len in case it was changed after queue setup by a
1613 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1616 bus_addr = rxq->rx_ring_phys_addr;
1617 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1619 sizeof(union e1000_adv_rx_desc));
1620 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1621 (uint32_t)(bus_addr >> 32));
1622 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1624 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1627 * Configure RX buffer size.
1629 mbp_priv = (struct rte_pktmbuf_pool_private *)
1630 ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1631 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1632 RTE_PKTMBUF_HEADROOM);
1633 if (buf_size >= 1024) {
1635 * Configure the BSIZEPACKET field of the SRRCTL
1636 * register of the queue.
1637 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1638 * If this field is equal to 0b, then RCTL.BSIZE
1639 * determines the RX packet buffer size.
1641 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1642 E1000_SRRCTL_BSIZEPKT_MASK);
1643 buf_size = (uint16_t) ((srrctl &
1644 E1000_SRRCTL_BSIZEPKT_MASK) <<
1645 E1000_SRRCTL_BSIZEPKT_SHIFT);
1647 if (dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE
1649 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1650 dev->data->scattered_rx = 1;
1654 * Use BSIZE field of the device RCTL register.
1656 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1657 rctl_bsize = buf_size;
1658 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1659 dev->data->scattered_rx = 1;
1662 /* Set if packets are dropped when no descriptors available */
1664 srrctl |= E1000_SRRCTL_DROP_EN;
1666 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1668 /* Enable this RX queue. */
1669 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1670 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1671 rxdctl &= 0xFFF00000;
1672 rxdctl |= (rxq->pthresh & 0x1F);
1673 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1674 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1675 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1679 * Setup BSIZE field of RCTL register, if needed.
1680 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1681 * register, since the code above configures the SRRCTL register of
1682 * the RX queue in such a case.
1683 * All configurable sizes are:
1684 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1685 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1686 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1687 * 2048: rctl |= E1000_RCTL_SZ_2048;
1688 * 1024: rctl |= E1000_RCTL_SZ_1024;
1689 * 512: rctl |= E1000_RCTL_SZ_512;
1690 * 256: rctl |= E1000_RCTL_SZ_256;
1692 if (rctl_bsize > 0) {
1693 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1694 rctl |= E1000_RCTL_SZ_512;
1695 else /* 256 <= buf_size < 512 - use 256 */
1696 rctl |= E1000_RCTL_SZ_256;
1700 * Configure RSS if device configured with multiple RX queues.
1702 if (dev->data->nb_rx_queues > 1)
1703 igb_rss_configure(dev);
1705 igb_rss_disable(dev);
1708 * Setup the Checksum Register.
1709 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1711 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1712 rxcsum |= E1000_RXCSUM_PCSD;
1714 /* Enable both L3/L4 rx checksum offload */
1715 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1716 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1718 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1719 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1721 /* Setup the Receive Control Register. */
1722 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1723 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1725 /* set STRCRC bit in all queues for Powerville/Springville */
1726 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1728 uint32_t dvmolr = E1000_READ_REG(hw,
1730 dvmolr |= E1000_DVMOLR_STRCRC;
1731 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1735 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1737 /* clear STRCRC bit in all queues for Powerville/Springville */
1738 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1739 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1740 uint32_t dvmolr = E1000_READ_REG(hw,
1742 dvmolr &= ~E1000_DVMOLR_STRCRC;
1743 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1748 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1749 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1750 E1000_RCTL_RDMTS_HALF |
1751 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1753 /* Make sure VLAN Filters are off. */
1754 rctl &= ~E1000_RCTL_VFE;
1755 /* Don't store bad packets. */
1756 rctl &= ~E1000_RCTL_SBP;
1758 /* Enable Receives. */
1759 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1762 * Setup the HW Rx Head and Tail Descriptor Pointers.
1763 * This needs to be done after enable.
1765 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1766 rxq = dev->data->rx_queues[i];
1767 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1768 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1774 /*********************************************************************
1776 * Enable transmit unit.
1778 **********************************************************************/
1780 eth_igb_tx_init(struct rte_eth_dev *dev)
1782 struct e1000_hw *hw;
1783 struct igb_tx_queue *txq;
1788 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790 /* Setup the Base and Length of the Tx Descriptor Rings. */
1791 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1793 txq = dev->data->tx_queues[i];
1794 bus_addr = txq->tx_ring_phys_addr;
1796 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1798 sizeof(union e1000_adv_tx_desc));
1799 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1800 (uint32_t)(bus_addr >> 32));
1801 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1803 /* Setup the HW Tx Head and Tail descriptor pointers. */
1804 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1805 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1807 /* Setup Transmit threshold registers. */
1808 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1809 txdctl |= txq->pthresh & 0x1F;
1810 txdctl |= ((txq->hthresh & 0x1F) << 8);
1811 txdctl |= ((txq->wthresh & 0x1F) << 16);
1812 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1813 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1816 /* Program the Transmit Control Register. */
1817 tctl = E1000_READ_REG(hw, E1000_TCTL);
1818 tctl &= ~E1000_TCTL_CT;
1819 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1820 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1822 e1000_config_collision_dist(hw);
1824 /* This write will effectively turn on the transmit unit. */
1825 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1828 /*********************************************************************
1830 * Enable VF receive unit.
1832 **********************************************************************/
1834 eth_igbvf_rx_init(struct rte_eth_dev *dev)
1836 struct e1000_hw *hw;
1837 struct igb_rx_queue *rxq;
1838 struct rte_pktmbuf_pool_private *mbp_priv;
1841 uint16_t rctl_bsize;
1845 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847 /* Configure and enable each RX queue. */
1849 dev->rx_pkt_burst = eth_igb_recv_pkts;
1850 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1854 rxq = dev->data->rx_queues[i];
1856 /* Allocate buffers for descriptor rings and set up queue */
1857 ret = igb_alloc_rx_queue_mbufs(rxq);
1861 bus_addr = rxq->rx_ring_phys_addr;
1862 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1864 sizeof(union e1000_adv_rx_desc));
1865 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1866 (uint32_t)(bus_addr >> 32));
1867 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1869 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1872 * Configure RX buffer size.
1874 mbp_priv = (struct rte_pktmbuf_pool_private *)
1875 ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1876 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1877 RTE_PKTMBUF_HEADROOM);
1878 if (buf_size >= 1024) {
1880 * Configure the BSIZEPACKET field of the SRRCTL
1881 * register of the queue.
1882 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1883 * If this field is equal to 0b, then RCTL.BSIZE
1884 * determines the RX packet buffer size.
1886 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1887 E1000_SRRCTL_BSIZEPKT_MASK);
1888 buf_size = (uint16_t) ((srrctl &
1889 E1000_SRRCTL_BSIZEPKT_MASK) <<
1890 E1000_SRRCTL_BSIZEPKT_SHIFT);
1892 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
1893 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1894 dev->data->scattered_rx = 1;
1898 * Use BSIZE field of the device RCTL register.
1900 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1901 rctl_bsize = buf_size;
1902 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1903 dev->data->scattered_rx = 1;
1906 /* Set if packets are dropped when no descriptors available */
1908 srrctl |= E1000_SRRCTL_DROP_EN;
1910 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1912 /* Enable this RX queue. */
1913 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1914 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1915 rxdctl &= 0xFFF00000;
1916 rxdctl |= (rxq->pthresh & 0x1F);
1917 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1918 if (hw->mac.type == e1000_82576) {
1920 * Workaround of 82576 VF Erratum
1921 * force set WTHRESH to 1
1922 * to avoid Write-Back not triggered sometimes
1925 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
1928 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1929 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1933 * Setup the HW Rx Head and Tail Descriptor Pointers.
1934 * This needs to be done after enable.
1936 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1937 rxq = dev->data->rx_queues[i];
1938 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1939 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1945 /*********************************************************************
1947 * Enable VF transmit unit.
1949 **********************************************************************/
1951 eth_igbvf_tx_init(struct rte_eth_dev *dev)
1953 struct e1000_hw *hw;
1954 struct igb_tx_queue *txq;
1958 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1960 /* Setup the Base and Length of the Tx Descriptor Rings. */
1961 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1964 txq = dev->data->tx_queues[i];
1965 bus_addr = txq->tx_ring_phys_addr;
1966 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1968 sizeof(union e1000_adv_tx_desc));
1969 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1970 (uint32_t)(bus_addr >> 32));
1971 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1973 /* Setup the HW Tx Head and Tail descriptor pointers. */
1974 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1975 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1977 /* Setup Transmit threshold registers. */
1978 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1979 txdctl |= txq->pthresh & 0x1F;
1980 txdctl |= ((txq->hthresh & 0x1F) << 8);
1981 if (hw->mac.type == e1000_82576) {
1983 * Workaround of 82576 VF Erratum
1984 * force set WTHRESH to 1
1985 * to avoid Write-Back not triggered sometimes
1988 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
1991 txdctl |= ((txq->wthresh & 0x1F) << 16);
1992 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1993 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);