4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
50 #include <rte_debug.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
72 #include <rte_string_fns.h>
74 #include "e1000_logs.h"
75 #include "e1000/e1000_api.h"
76 #include "e1000_ethdev.h"
78 static inline struct rte_mbuf *
79 rte_rxmbuf_alloc(struct rte_mempool *mp)
83 m = __rte_mbuf_raw_alloc(mp);
84 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
88 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
89 (uint64_t) ((mb)->buf_physaddr + \
90 (uint64_t) ((char *)((mb)->pkt.data) - \
91 (char *)(mb)->buf_addr))
93 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
94 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
104 * Structure associated with each descriptor of the TX ring of a TX queue.
106 struct igb_tx_entry {
107 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
108 uint16_t next_id; /**< Index of next descriptor in ring. */
109 uint16_t last_id; /**< Index of last scattered descriptor. */
113 * Structure associated with each RX queue.
115 struct igb_rx_queue {
116 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
117 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
118 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
119 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
120 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
121 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
122 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
123 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
124 uint16_t nb_rx_desc; /**< number of RX descriptors. */
125 uint16_t rx_tail; /**< current value of RDT register. */
126 uint16_t nb_rx_hold; /**< number of held free RX desc. */
127 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
128 uint16_t queue_id; /**< RX queue index. */
129 uint8_t port_id; /**< Device port identifier. */
130 uint8_t pthresh; /**< Prefetch threshold register. */
131 uint8_t hthresh; /**< Host threshold register. */
132 uint8_t wthresh; /**< Write-back threshold register. */
133 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
134 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
138 * Hardware context number
140 enum igb_advctx_num {
141 IGB_CTX_0 = 0, /**< CTX0 */
142 IGB_CTX_1 = 1, /**< CTX1 */
143 IGB_CTX_NUM = 2, /**< CTX_NUM */
147 * Strucutre to check if new context need be built
149 struct igb_advctx_info {
150 uint16_t flags; /**< ol_flags related to context build. */
151 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
152 union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
156 * Structure associated with each TX queue.
158 struct igb_tx_queue {
159 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
160 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
161 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
162 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
163 uint32_t txd_type; /**< Device-specific TXD type */
164 uint16_t nb_tx_desc; /**< number of TX descriptors. */
165 uint16_t tx_tail; /**< Current value of TDT register. */
167 /**< Index of first used TX descriptor. */
168 uint16_t queue_id; /**< TX queue index. */
169 uint8_t port_id; /**< Device port identifier. */
170 uint8_t pthresh; /**< Prefetch threshold register. */
171 uint8_t hthresh; /**< Host threshold register. */
172 uint8_t wthresh; /**< Write-back threshold register. */
174 /**< Current used hardware descriptor. */
176 /**< Start context position for transmit queue. */
177 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
178 /**< Hardware context history.*/
182 #define RTE_PMD_USE_PREFETCH
185 #ifdef RTE_PMD_USE_PREFETCH
186 #define rte_igb_prefetch(p) rte_prefetch0(p)
188 #define rte_igb_prefetch(p) do {} while(0)
191 #ifdef RTE_PMD_PACKET_PREFETCH
192 #define rte_packet_prefetch(p) rte_prefetch1(p)
194 #define rte_packet_prefetch(p) do {} while(0)
197 /*********************************************************************
201 **********************************************************************/
204 * Advanced context descriptor are almost same between igb/ixgbe
205 * This is a separate function, looking for optimization opportunity here
206 * Rework required to go with the pre-defined values.
210 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
211 volatile struct e1000_adv_tx_context_desc *ctx_txd,
212 uint16_t ol_flags, uint32_t vlan_macip_lens)
214 uint32_t type_tucmd_mlhl;
215 uint32_t mss_l4len_idx;
216 uint32_t ctx_idx, ctx_curr;
219 ctx_curr = txq->ctx_curr;
220 ctx_idx = ctx_curr + txq->ctx_start;
225 if (ol_flags & PKT_TX_VLAN_PKT) {
226 cmp_mask |= TX_VLAN_CMP_MASK;
229 if (ol_flags & PKT_TX_IP_CKSUM) {
230 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
231 cmp_mask |= TX_MAC_LEN_CMP_MASK;
234 /* Specify which HW CTX to upload. */
235 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
236 switch (ol_flags & PKT_TX_L4_MASK) {
237 case PKT_TX_UDP_CKSUM:
238 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
239 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
240 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
241 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
243 case PKT_TX_TCP_CKSUM:
244 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
245 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
246 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
247 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
249 case PKT_TX_SCTP_CKSUM:
250 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
251 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
252 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
253 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
256 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
257 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
261 txq->ctx_cache[ctx_curr].flags = ol_flags;
262 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
263 txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
264 vlan_macip_lens & cmp_mask;
266 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
267 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
268 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
269 ctx_txd->seqnum_seed = 0;
273 * Check which hardware context can be used. Use the existing match
274 * or create a new context descriptor.
276 static inline uint32_t
277 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
278 uint32_t vlan_macip_lens)
280 /* If match with the current context */
281 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
282 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
283 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
284 return txq->ctx_curr;
287 /* If match with the second context */
289 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
290 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
291 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
292 return txq->ctx_curr;
295 /* Mismatch, use the previous context */
296 return (IGB_CTX_NUM);
299 static inline uint32_t
300 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
302 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
303 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
306 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
307 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
311 static inline uint32_t
312 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
314 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
315 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
319 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
322 struct igb_tx_queue *txq;
323 struct igb_tx_entry *sw_ring;
324 struct igb_tx_entry *txe, *txn;
325 volatile union e1000_adv_tx_desc *txr;
326 volatile union e1000_adv_tx_desc *txd;
327 struct rte_mbuf *tx_pkt;
328 struct rte_mbuf *m_seg;
329 uint64_t buf_dma_addr;
330 uint32_t olinfo_status;
331 uint32_t cmd_type_len;
340 uint32_t new_ctx = 0;
342 uint32_t vlan_macip_lens;
345 sw_ring = txq->sw_ring;
347 tx_id = txq->tx_tail;
348 txe = &sw_ring[tx_id];
350 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
352 pkt_len = tx_pkt->pkt.pkt_len;
354 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
357 * The number of descriptors that must be allocated for a
358 * packet is the number of segments of that packet, plus 1
359 * Context Descriptor for the VLAN Tag Identifier, if any.
360 * Determine the last TX descriptor to allocate in the TX ring
361 * for the packet, starting from the current position (tx_id)
364 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
366 ol_flags = tx_pkt->ol_flags;
367 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
368 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
370 /* If a Context Descriptor need be built . */
372 ctx = what_advctx_update(txq, tx_ol_req,
374 /* Only allocate context descriptor if required*/
375 new_ctx = (ctx == IGB_CTX_NUM);
377 tx_last = (uint16_t) (tx_last + new_ctx);
379 if (tx_last >= txq->nb_tx_desc)
380 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
382 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
383 " tx_first=%u tx_last=%u\n",
384 (unsigned) txq->port_id,
385 (unsigned) txq->queue_id,
391 * Check if there are enough free descriptors in the TX ring
392 * to transmit the next packet.
393 * This operation is based on the two following rules:
395 * 1- Only check that the last needed TX descriptor can be
396 * allocated (by construction, if that descriptor is free,
397 * all intermediate ones are also free).
399 * For this purpose, the index of the last TX descriptor
400 * used for a packet (the "last descriptor" of a packet)
401 * is recorded in the TX entries (the last one included)
402 * that are associated with all TX descriptors allocated
405 * 2- Avoid to allocate the last free TX descriptor of the
406 * ring, in order to never set the TDT register with the
407 * same value stored in parallel by the NIC in the TDH
408 * register, which makes the TX engine of the NIC enter
409 * in a deadlock situation.
411 * By extension, avoid to allocate a free descriptor that
412 * belongs to the last set of free descriptors allocated
413 * to the same packet previously transmitted.
417 * The "last descriptor" of the previously sent packet, if any,
418 * which used the last descriptor to allocate.
420 tx_end = sw_ring[tx_last].last_id;
423 * The next descriptor following that "last descriptor" in the
426 tx_end = sw_ring[tx_end].next_id;
429 * The "last descriptor" associated with that next descriptor.
431 tx_end = sw_ring[tx_end].last_id;
434 * Check that this descriptor is free.
436 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
443 * Set common flags of all TX Data Descriptors.
445 * The following bits must be set in all Data Descriptors:
446 * - E1000_ADVTXD_DTYP_DATA
447 * - E1000_ADVTXD_DCMD_DEXT
449 * The following bits must be set in the first Data Descriptor
450 * and are ignored in the other ones:
451 * - E1000_ADVTXD_DCMD_IFCS
452 * - E1000_ADVTXD_MAC_1588
453 * - E1000_ADVTXD_DCMD_VLE
455 * The following bits must only be set in the last Data
457 * - E1000_TXD_CMD_EOP
459 * The following bits can be set in any Data Descriptor, but
460 * are only set in the last Data Descriptor:
463 cmd_type_len = txq->txd_type |
464 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
465 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
466 #if defined(RTE_LIBRTE_IEEE1588)
467 if (ol_flags & PKT_TX_IEEE1588_TMST)
468 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
471 /* Setup TX Advanced context descriptor if required */
473 volatile struct e1000_adv_tx_context_desc *
476 ctx_txd = (volatile struct
477 e1000_adv_tx_context_desc *)
480 txn = &sw_ring[txe->next_id];
481 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
483 if (txe->mbuf != NULL) {
484 rte_pktmbuf_free_seg(txe->mbuf);
488 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
491 txe->last_id = tx_last;
492 tx_id = txe->next_id;
496 /* Setup the TX Advanced Data Descriptor */
497 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
498 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
499 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
504 txn = &sw_ring[txe->next_id];
507 if (txe->mbuf != NULL)
508 rte_pktmbuf_free_seg(txe->mbuf);
512 * Set up transmit descriptor.
514 slen = (uint16_t) m_seg->pkt.data_len;
515 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
516 txd->read.buffer_addr =
517 rte_cpu_to_le_64(buf_dma_addr);
518 txd->read.cmd_type_len =
519 rte_cpu_to_le_32(cmd_type_len | slen);
520 txd->read.olinfo_status =
521 rte_cpu_to_le_32(olinfo_status);
522 txe->last_id = tx_last;
523 tx_id = txe->next_id;
525 m_seg = m_seg->pkt.next;
526 } while (m_seg != NULL);
529 * The last packet data descriptor needs End Of Packet (EOP)
530 * and Report Status (RS).
532 txd->read.cmd_type_len |=
533 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
539 * Set the Transmit Descriptor Tail (TDT).
541 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
542 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
543 (unsigned) txq->port_id, (unsigned) txq->queue_id,
544 (unsigned) tx_id, (unsigned) nb_tx);
545 txq->tx_tail = tx_id;
550 /*********************************************************************
554 **********************************************************************/
555 static inline uint16_t
556 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
560 static uint16_t ip_pkt_types_map[16] = {
561 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
562 PKT_RX_IPV6_HDR, 0, 0, 0,
563 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
564 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
567 #if defined(RTE_LIBRTE_IEEE1588)
568 static uint32_t ip_pkt_etqf_map[8] = {
569 0, 0, 0, PKT_RX_IEEE1588_PTP,
573 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
574 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
575 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
577 pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
578 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
580 return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
581 0 : PKT_RX_RSS_HASH));
584 static inline uint16_t
585 rx_desc_status_to_pkt_flags(uint32_t rx_status)
589 /* Check if VLAN present */
590 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
591 PKT_RX_VLAN_PKT : 0);
593 #if defined(RTE_LIBRTE_IEEE1588)
594 if (rx_status & E1000_RXD_STAT_TMST)
595 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
600 static inline uint16_t
601 rx_desc_error_to_pkt_flags(uint32_t rx_status)
604 * Bit 30: IPE, IPv4 checksum error
605 * Bit 29: L4I, L4I integrity error
608 static uint16_t error_to_pkt_flags_map[4] = {
609 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
610 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
612 return error_to_pkt_flags_map[(rx_status >>
613 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
617 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
620 struct igb_rx_queue *rxq;
621 volatile union e1000_adv_rx_desc *rx_ring;
622 volatile union e1000_adv_rx_desc *rxdp;
623 struct igb_rx_entry *sw_ring;
624 struct igb_rx_entry *rxe;
625 struct rte_mbuf *rxm;
626 struct rte_mbuf *nmb;
627 union e1000_adv_rx_desc rxd;
630 uint32_t hlen_type_rss;
640 rx_id = rxq->rx_tail;
641 rx_ring = rxq->rx_ring;
642 sw_ring = rxq->sw_ring;
643 while (nb_rx < nb_pkts) {
645 * The order of operations here is important as the DD status
646 * bit must not be read after any other descriptor fields.
647 * rx_ring and rxdp are pointing to volatile data so the order
648 * of accesses cannot be reordered by the compiler. If they were
649 * not volatile, they could be reordered which could lead to
650 * using invalid descriptor fields when read from rxd.
652 rxdp = &rx_ring[rx_id];
653 staterr = rxdp->wb.upper.status_error;
654 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
661 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
662 * likely to be invalid and to be dropped by the various
663 * validation checks performed by the network stack.
665 * Allocate a new mbuf to replenish the RX ring descriptor.
666 * If the allocation fails:
667 * - arrange for that RX descriptor to be the first one
668 * being parsed the next time the receive function is
669 * invoked [on the same queue].
671 * - Stop parsing the RX ring and return immediately.
673 * This policy do not drop the packet received in the RX
674 * descriptor for which the allocation of a new mbuf failed.
675 * Thus, it allows that packet to be later retrieved if
676 * mbuf have been freed in the mean time.
677 * As a side effect, holding RX descriptors instead of
678 * systematically giving them back to the NIC may lead to
679 * RX ring exhaustion situations.
680 * However, the NIC can gracefully prevent such situations
681 * to happen by sending specific "back-pressure" flow control
682 * frames to its peer(s).
684 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
685 "staterr=0x%x pkt_len=%u\n",
686 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
687 (unsigned) rx_id, (unsigned) staterr,
688 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
690 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
692 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
693 "queue_id=%u\n", (unsigned) rxq->port_id,
694 (unsigned) rxq->queue_id);
695 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
700 rxe = &sw_ring[rx_id];
702 if (rx_id == rxq->nb_rx_desc)
705 /* Prefetch next mbuf while processing current one. */
706 rte_igb_prefetch(sw_ring[rx_id].mbuf);
709 * When next RX descriptor is on a cache-line boundary,
710 * prefetch the next 4 RX descriptors and the next 8 pointers
713 if ((rx_id & 0x3) == 0) {
714 rte_igb_prefetch(&rx_ring[rx_id]);
715 rte_igb_prefetch(&sw_ring[rx_id]);
721 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
722 rxdp->read.hdr_addr = dma_addr;
723 rxdp->read.pkt_addr = dma_addr;
726 * Initialize the returned mbuf.
727 * 1) setup generic mbuf fields:
728 * - number of segments,
731 * - RX port identifier.
732 * 2) integrate hardware offload data, if any:
734 * - IP checksum flag,
735 * - VLAN TCI, if any,
738 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
740 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
741 rte_packet_prefetch(rxm->pkt.data);
742 rxm->pkt.nb_segs = 1;
743 rxm->pkt.next = NULL;
744 rxm->pkt.pkt_len = pkt_len;
745 rxm->pkt.data_len = pkt_len;
746 rxm->pkt.in_port = rxq->port_id;
748 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
749 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
750 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
751 rxm->pkt.vlan_macip.f.vlan_tci =
752 rte_le_to_cpu_16(rxd.wb.upper.vlan);
754 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
755 pkt_flags = (uint16_t)(pkt_flags |
756 rx_desc_status_to_pkt_flags(staterr));
757 pkt_flags = (uint16_t)(pkt_flags |
758 rx_desc_error_to_pkt_flags(staterr));
759 rxm->ol_flags = pkt_flags;
762 * Store the mbuf address into the next entry of the array
763 * of returned packets.
765 rx_pkts[nb_rx++] = rxm;
767 rxq->rx_tail = rx_id;
770 * If the number of free RX descriptors is greater than the RX free
771 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
773 * Update the RDT with the value of the last processed RX descriptor
774 * minus 1, to guarantee that the RDT register is never equal to the
775 * RDH register, which creates a "full" ring situtation from the
776 * hardware point of view...
778 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
779 if (nb_hold > rxq->rx_free_thresh) {
780 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
781 "nb_hold=%u nb_rx=%u\n",
782 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
783 (unsigned) rx_id, (unsigned) nb_hold,
785 rx_id = (uint16_t) ((rx_id == 0) ?
786 (rxq->nb_rx_desc - 1) : (rx_id - 1));
787 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
790 rxq->nb_rx_hold = nb_hold;
795 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
798 struct igb_rx_queue *rxq;
799 volatile union e1000_adv_rx_desc *rx_ring;
800 volatile union e1000_adv_rx_desc *rxdp;
801 struct igb_rx_entry *sw_ring;
802 struct igb_rx_entry *rxe;
803 struct rte_mbuf *first_seg;
804 struct rte_mbuf *last_seg;
805 struct rte_mbuf *rxm;
806 struct rte_mbuf *nmb;
807 union e1000_adv_rx_desc rxd;
808 uint64_t dma; /* Physical address of mbuf data buffer */
810 uint32_t hlen_type_rss;
820 rx_id = rxq->rx_tail;
821 rx_ring = rxq->rx_ring;
822 sw_ring = rxq->sw_ring;
825 * Retrieve RX context of current packet, if any.
827 first_seg = rxq->pkt_first_seg;
828 last_seg = rxq->pkt_last_seg;
830 while (nb_rx < nb_pkts) {
833 * The order of operations here is important as the DD status
834 * bit must not be read after any other descriptor fields.
835 * rx_ring and rxdp are pointing to volatile data so the order
836 * of accesses cannot be reordered by the compiler. If they were
837 * not volatile, they could be reordered which could lead to
838 * using invalid descriptor fields when read from rxd.
840 rxdp = &rx_ring[rx_id];
841 staterr = rxdp->wb.upper.status_error;
842 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
849 * Allocate a new mbuf to replenish the RX ring descriptor.
850 * If the allocation fails:
851 * - arrange for that RX descriptor to be the first one
852 * being parsed the next time the receive function is
853 * invoked [on the same queue].
855 * - Stop parsing the RX ring and return immediately.
857 * This policy does not drop the packet received in the RX
858 * descriptor for which the allocation of a new mbuf failed.
859 * Thus, it allows that packet to be later retrieved if
860 * mbuf have been freed in the mean time.
861 * As a side effect, holding RX descriptors instead of
862 * systematically giving them back to the NIC may lead to
863 * RX ring exhaustion situations.
864 * However, the NIC can gracefully prevent such situations
865 * to happen by sending specific "back-pressure" flow control
866 * frames to its peer(s).
868 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
869 "staterr=0x%x data_len=%u\n",
870 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
871 (unsigned) rx_id, (unsigned) staterr,
872 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
874 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
876 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
877 "queue_id=%u\n", (unsigned) rxq->port_id,
878 (unsigned) rxq->queue_id);
879 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
884 rxe = &sw_ring[rx_id];
886 if (rx_id == rxq->nb_rx_desc)
889 /* Prefetch next mbuf while processing current one. */
890 rte_igb_prefetch(sw_ring[rx_id].mbuf);
893 * When next RX descriptor is on a cache-line boundary,
894 * prefetch the next 4 RX descriptors and the next 8 pointers
897 if ((rx_id & 0x3) == 0) {
898 rte_igb_prefetch(&rx_ring[rx_id]);
899 rte_igb_prefetch(&sw_ring[rx_id]);
903 * Update RX descriptor with the physical address of the new
904 * data buffer of the new allocated mbuf.
908 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
909 rxdp->read.pkt_addr = dma;
910 rxdp->read.hdr_addr = dma;
913 * Set data length & data buffer address of mbuf.
915 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
916 rxm->pkt.data_len = data_len;
917 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
920 * If this is the first buffer of the received packet,
921 * set the pointer to the first mbuf of the packet and
922 * initialize its context.
923 * Otherwise, update the total length and the number of segments
924 * of the current scattered packet, and update the pointer to
925 * the last mbuf of the current packet.
927 if (first_seg == NULL) {
929 first_seg->pkt.pkt_len = data_len;
930 first_seg->pkt.nb_segs = 1;
932 first_seg->pkt.pkt_len += data_len;
933 first_seg->pkt.nb_segs++;
934 last_seg->pkt.next = rxm;
938 * If this is not the last buffer of the received packet,
939 * update the pointer to the last mbuf of the current scattered
940 * packet and continue to parse the RX ring.
942 if (! (staterr & E1000_RXD_STAT_EOP)) {
948 * This is the last buffer of the received packet.
949 * If the CRC is not stripped by the hardware:
950 * - Subtract the CRC length from the total packet length.
951 * - If the last buffer only contains the whole CRC or a part
952 * of it, free the mbuf associated to the last buffer.
953 * If part of the CRC is also contained in the previous
954 * mbuf, subtract the length of that CRC part from the
955 * data length of the previous mbuf.
957 rxm->pkt.next = NULL;
958 if (unlikely(rxq->crc_len > 0)) {
959 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
960 if (data_len <= ETHER_CRC_LEN) {
961 rte_pktmbuf_free_seg(rxm);
962 first_seg->pkt.nb_segs--;
963 last_seg->pkt.data_len = (uint16_t)
964 (last_seg->pkt.data_len -
965 (ETHER_CRC_LEN - data_len));
966 last_seg->pkt.next = NULL;
969 (uint16_t) (data_len - ETHER_CRC_LEN);
973 * Initialize the first mbuf of the returned packet:
974 * - RX port identifier,
975 * - hardware offload data, if any:
977 * - IP checksum flag,
978 * - VLAN TCI, if any,
981 first_seg->pkt.in_port = rxq->port_id;
982 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
985 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
986 * set in the pkt_flags field.
988 first_seg->pkt.vlan_macip.f.vlan_tci =
989 rte_le_to_cpu_16(rxd.wb.upper.vlan);
990 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
991 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
992 pkt_flags = (uint16_t)(pkt_flags |
993 rx_desc_status_to_pkt_flags(staterr));
994 pkt_flags = (uint16_t)(pkt_flags |
995 rx_desc_error_to_pkt_flags(staterr));
996 first_seg->ol_flags = pkt_flags;
998 /* Prefetch data of first segment, if configured to do so. */
999 rte_packet_prefetch(first_seg->pkt.data);
1002 * Store the mbuf address into the next entry of the array
1003 * of returned packets.
1005 rx_pkts[nb_rx++] = first_seg;
1008 * Setup receipt context for a new packet.
1014 * Record index of the next RX descriptor to probe.
1016 rxq->rx_tail = rx_id;
1019 * Save receive context.
1021 rxq->pkt_first_seg = first_seg;
1022 rxq->pkt_last_seg = last_seg;
1025 * If the number of free RX descriptors is greater than the RX free
1026 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1028 * Update the RDT with the value of the last processed RX descriptor
1029 * minus 1, to guarantee that the RDT register is never equal to the
1030 * RDH register, which creates a "full" ring situtation from the
1031 * hardware point of view...
1033 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1034 if (nb_hold > rxq->rx_free_thresh) {
1035 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1036 "nb_hold=%u nb_rx=%u\n",
1037 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1038 (unsigned) rx_id, (unsigned) nb_hold,
1040 rx_id = (uint16_t) ((rx_id == 0) ?
1041 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1042 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1045 rxq->nb_rx_hold = nb_hold;
1050 * Rings setup and release.
1052 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1053 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1054 * This will also optimize cache line size effect.
1055 * H/W supports up to cache line size 128.
1057 #define IGB_ALIGN 128
1060 * Maximum number of Ring Descriptors.
1062 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1063 * desscriptors should meet the following condition:
1064 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1066 #define IGB_MIN_RING_DESC 32
1067 #define IGB_MAX_RING_DESC 4096
1069 static const struct rte_memzone *
1070 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1071 uint16_t queue_id, uint32_t ring_size, int socket_id)
1073 char z_name[RTE_MEMZONE_NAMESIZE];
1074 const struct rte_memzone *mz;
1076 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1077 dev->driver->pci_drv.name, ring_name,
1078 dev->data->port_id, queue_id);
1079 mz = rte_memzone_lookup(z_name);
1083 return rte_memzone_reserve_aligned(z_name, ring_size,
1084 socket_id, 0, IGB_ALIGN);
1088 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1092 if (txq->sw_ring != NULL) {
1093 for (i = 0; i < txq->nb_tx_desc; i++) {
1094 if (txq->sw_ring[i].mbuf != NULL) {
1095 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1096 txq->sw_ring[i].mbuf = NULL;
1103 igb_tx_queue_release(struct igb_tx_queue *txq)
1106 igb_tx_queue_release_mbufs(txq);
1107 rte_free(txq->sw_ring);
1113 eth_igb_tx_queue_release(void *txq)
1115 igb_tx_queue_release(txq);
1119 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1124 memset((void*)&txq->ctx_cache, 0,
1125 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1129 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1131 struct igb_tx_entry *txe = txq->sw_ring;
1134 struct e1000_hw *hw;
1136 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1137 size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
1138 /* Zero out HW ring memory */
1139 for (i = 0; i < size; i++) {
1140 ((volatile char *)txq->tx_ring)[i] = 0;
1143 /* Initialize ring entries */
1144 prev = (uint16_t)(txq->nb_tx_desc - 1);
1145 for (i = 0; i < txq->nb_tx_desc; i++) {
1146 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1148 txd->wb.status = E1000_TXD_STAT_DD;
1151 txe[prev].next_id = i;
1155 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1156 /* 82575 specific, each tx queue will use 2 hw contexts */
1157 if (hw->mac.type == e1000_82575)
1158 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1160 igb_reset_tx_queue_stat(txq);
1164 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1167 unsigned int socket_id,
1168 const struct rte_eth_txconf *tx_conf)
1170 const struct rte_memzone *tz;
1171 struct igb_tx_queue *txq;
1172 struct e1000_hw *hw;
1175 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178 * Validate number of transmit descriptors.
1179 * It must not exceed hardware maximum, and must be multiple
1182 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1183 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1188 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1191 if (tx_conf->tx_free_thresh != 0)
1192 RTE_LOG(WARNING, PMD,
1193 "The tx_free_thresh parameter is not "
1194 "used for the 1G driver.\n");
1195 if (tx_conf->tx_rs_thresh != 0)
1196 RTE_LOG(WARNING, PMD,
1197 "The tx_rs_thresh parameter is not "
1198 "used for the 1G driver.\n");
1199 if (tx_conf->tx_thresh.wthresh == 0)
1200 RTE_LOG(WARNING, PMD,
1201 "To improve 1G driver performance, consider setting "
1202 "the TX WTHRESH value to 4, 8, or 16.\n");
1204 /* Free memory prior to re-allocation if needed */
1205 if (dev->data->tx_queues[queue_idx] != NULL)
1206 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1208 /* First allocate the tx queue data structure */
1209 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1215 * Allocate TX ring hardware descriptors. A memzone large enough to
1216 * handle the maximum ring size is allocated in order to allow for
1217 * resizing in later calls to the queue setup function.
1219 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1220 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1223 igb_tx_queue_release(txq);
1227 txq->nb_tx_desc = nb_desc;
1228 txq->pthresh = tx_conf->tx_thresh.pthresh;
1229 txq->hthresh = tx_conf->tx_thresh.hthresh;
1230 txq->wthresh = tx_conf->tx_thresh.wthresh;
1231 txq->queue_id = queue_idx;
1232 txq->port_id = dev->data->port_id;
1234 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1235 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1236 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1238 /* Allocate software ring */
1239 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1240 sizeof(struct igb_tx_entry) * nb_desc,
1242 if (txq->sw_ring == NULL) {
1243 igb_tx_queue_release(txq);
1246 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1247 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1249 igb_reset_tx_queue(txq, dev);
1250 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1251 dev->data->tx_queues[queue_idx] = txq;
1257 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1261 if (rxq->sw_ring != NULL) {
1262 for (i = 0; i < rxq->nb_rx_desc; i++) {
1263 if (rxq->sw_ring[i].mbuf != NULL) {
1264 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1265 rxq->sw_ring[i].mbuf = NULL;
1272 igb_rx_queue_release(struct igb_rx_queue *rxq)
1275 igb_rx_queue_release_mbufs(rxq);
1276 rte_free(rxq->sw_ring);
1282 eth_igb_rx_queue_release(void *rxq)
1284 igb_rx_queue_release(rxq);
1288 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1293 /* Zero out HW ring memory */
1294 size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
1295 for (i = 0; i < size; i++) {
1296 ((volatile char *)rxq->rx_ring)[i] = 0;
1300 rxq->pkt_first_seg = NULL;
1301 rxq->pkt_last_seg = NULL;
1305 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1308 unsigned int socket_id,
1309 const struct rte_eth_rxconf *rx_conf,
1310 struct rte_mempool *mp)
1312 const struct rte_memzone *rz;
1313 struct igb_rx_queue *rxq;
1314 struct e1000_hw *hw;
1317 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1320 * Validate number of receive descriptors.
1321 * It must not exceed hardware maximum, and must be multiple
1324 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1325 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1329 /* Free memory prior to re-allocation if needed */
1330 if (dev->data->rx_queues[queue_idx] != NULL) {
1331 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1332 dev->data->rx_queues[queue_idx] = NULL;
1335 /* First allocate the RX queue data structure. */
1336 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1341 rxq->nb_rx_desc = nb_desc;
1342 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1343 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1344 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1345 rxq->drop_en = rx_conf->rx_drop_en;
1346 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1347 rxq->queue_id = queue_idx;
1348 rxq->port_id = dev->data->port_id;
1349 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1353 * Allocate RX ring hardware descriptors. A memzone large enough to
1354 * handle the maximum ring size is allocated in order to allow for
1355 * resizing in later calls to the queue setup function.
1357 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1358 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1360 igb_rx_queue_release(rxq);
1363 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1364 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1365 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1366 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1368 /* Allocate software ring. */
1369 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1370 sizeof(struct igb_rx_entry) * nb_desc,
1372 if (rxq->sw_ring == NULL) {
1373 igb_rx_queue_release(rxq);
1376 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1377 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1379 dev->data->rx_queues[queue_idx] = rxq;
1380 igb_reset_rx_queue(rxq);
1386 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1388 struct igb_rx_queue *rxq;
1389 uint32_t nb_pkts_available;
1393 if (rx_queue_id >= dev->data->nb_rx_queues) {
1394 PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
1398 rxq = dev->data->rx_queues[rx_queue_id];
1399 rx_id = (uint16_t) ((rxq->rx_tail == 0) ? (rxq->nb_rx_desc - 1) :
1400 (rxq->rx_tail - 1));
1401 rx_rdh = E1000_PCI_REG(rxq->rdh_reg_addr);
1403 nb_pkts_available = rx_rdh - rx_id;
1405 nb_pkts_available = rx_rdh - rx_id + rxq->nb_rx_desc;
1407 return (nb_pkts_available);
1411 igb_dev_clear_queues(struct rte_eth_dev *dev)
1414 struct igb_tx_queue *txq;
1415 struct igb_rx_queue *rxq;
1417 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1418 txq = dev->data->tx_queues[i];
1420 igb_tx_queue_release_mbufs(txq);
1421 igb_reset_tx_queue(txq, dev);
1425 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1426 rxq = dev->data->rx_queues[i];
1428 igb_rx_queue_release_mbufs(rxq);
1429 igb_reset_rx_queue(rxq);
1435 * Receive Side Scaling (RSS).
1436 * See section 7.1.1.7 in the following document:
1437 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1440 * The source and destination IP addresses of the IP header and the source and
1441 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1442 * against a configurable random key to compute a 32-bit RSS hash result.
1443 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1444 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1445 * RSS output index which is used as the RX queue index where to store the
1447 * The following output is supplied in the RX write-back descriptor:
1448 * - 32-bit result of the Microsoft RSS hash function,
1449 * - 4-bit RSS type field.
1453 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1454 * Used as the default key.
1456 static uint8_t rss_intel_key[40] = {
1457 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1458 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1459 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1460 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1461 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1465 igb_rss_disable(struct rte_eth_dev *dev)
1467 struct e1000_hw *hw;
1470 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1471 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1472 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1473 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1477 igb_rss_configure(struct rte_eth_dev *dev)
1479 struct e1000_hw *hw;
1487 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489 rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1490 if (rss_hf == 0) /* Disable RSS. */ {
1491 igb_rss_disable(dev);
1494 hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1495 if (hash_key == NULL)
1496 hash_key = rss_intel_key; /* Default hash key. */
1498 /* Fill in RSS hash key. */
1499 for (i = 0; i < 10; i++) {
1500 rss_key = hash_key[(i * 4)];
1501 rss_key |= hash_key[(i * 4) + 1] << 8;
1502 rss_key |= hash_key[(i * 4) + 2] << 16;
1503 rss_key |= hash_key[(i * 4) + 3] << 24;
1504 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1507 /* Fill in redirection table. */
1508 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1509 for (i = 0; i < 128; i++) {
1516 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1517 i % dev->data->nb_rx_queues : 0);
1518 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1520 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1523 /* Set configured hashing functions in MRQC register. */
1524 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1525 if (rss_hf & ETH_RSS_IPV4)
1526 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1527 if (rss_hf & ETH_RSS_IPV4_TCP)
1528 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1529 if (rss_hf & ETH_RSS_IPV6)
1530 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1531 if (rss_hf & ETH_RSS_IPV6_EX)
1532 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1533 if (rss_hf & ETH_RSS_IPV6_TCP)
1534 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1535 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1536 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1537 if (rss_hf & ETH_RSS_IPV4_UDP)
1538 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1539 if (rss_hf & ETH_RSS_IPV6_UDP)
1540 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1541 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1542 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1543 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1546 /*********************************************************************
1548 * Enable receive unit.
1550 **********************************************************************/
1553 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1555 struct igb_rx_entry *rxe = rxq->sw_ring;
1559 /* Initialize software ring entries. */
1560 for (i = 0; i < rxq->nb_rx_desc; i++) {
1561 volatile union e1000_adv_rx_desc *rxd;
1562 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1565 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1566 "queue_id=%hu\n", rxq->queue_id);
1567 igb_rx_queue_release(rxq);
1571 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1572 rxd = &rxq->rx_ring[i];
1573 rxd->read.hdr_addr = dma_addr;
1574 rxd->read.pkt_addr = dma_addr;
1582 eth_igb_rx_init(struct rte_eth_dev *dev)
1584 struct e1000_hw *hw;
1585 struct igb_rx_queue *rxq;
1586 struct rte_pktmbuf_pool_private *mbp_priv;
1591 uint16_t rctl_bsize;
1595 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1599 * Make sure receives are disabled while setting
1600 * up the descriptor ring.
1602 rctl = E1000_READ_REG(hw, E1000_RCTL);
1603 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1606 * Configure support of jumbo frames, if any.
1608 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1609 rctl |= E1000_RCTL_LPE;
1612 * Set maximum packet length by default, and might be updated
1613 * together with enabling/disabling dual VLAN.
1615 E1000_WRITE_REG(hw, E1000_RLPML,
1616 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1619 rctl &= ~E1000_RCTL_LPE;
1621 /* Configure and enable each RX queue. */
1623 dev->rx_pkt_burst = eth_igb_recv_pkts;
1624 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1628 rxq = dev->data->rx_queues[i];
1630 /* Allocate buffers for descriptor rings and set up queue */
1631 ret = igb_alloc_rx_queue_mbufs(rxq);
1636 * Reset crc_len in case it was changed after queue setup by a
1640 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1643 bus_addr = rxq->rx_ring_phys_addr;
1644 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1646 sizeof(union e1000_adv_rx_desc));
1647 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1648 (uint32_t)(bus_addr >> 32));
1649 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1651 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1654 * Configure RX buffer size.
1656 mbp_priv = (struct rte_pktmbuf_pool_private *)
1657 ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1658 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1659 RTE_PKTMBUF_HEADROOM);
1660 if (buf_size >= 1024) {
1662 * Configure the BSIZEPACKET field of the SRRCTL
1663 * register of the queue.
1664 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1665 * If this field is equal to 0b, then RCTL.BSIZE
1666 * determines the RX packet buffer size.
1668 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1669 E1000_SRRCTL_BSIZEPKT_MASK);
1670 buf_size = (uint16_t) ((srrctl &
1671 E1000_SRRCTL_BSIZEPKT_MASK) <<
1672 E1000_SRRCTL_BSIZEPKT_SHIFT);
1674 /* It adds dual VLAN length for supporting dual VLAN */
1675 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1676 2 * VLAN_TAG_SIZE) > buf_size){
1677 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1678 dev->data->scattered_rx = 1;
1682 * Use BSIZE field of the device RCTL register.
1684 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1685 rctl_bsize = buf_size;
1686 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1687 dev->data->scattered_rx = 1;
1690 /* Set if packets are dropped when no descriptors available */
1692 srrctl |= E1000_SRRCTL_DROP_EN;
1694 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1696 /* Enable this RX queue. */
1697 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1698 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1699 rxdctl &= 0xFFF00000;
1700 rxdctl |= (rxq->pthresh & 0x1F);
1701 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1702 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1703 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1707 * Setup BSIZE field of RCTL register, if needed.
1708 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1709 * register, since the code above configures the SRRCTL register of
1710 * the RX queue in such a case.
1711 * All configurable sizes are:
1712 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1713 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1714 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1715 * 2048: rctl |= E1000_RCTL_SZ_2048;
1716 * 1024: rctl |= E1000_RCTL_SZ_1024;
1717 * 512: rctl |= E1000_RCTL_SZ_512;
1718 * 256: rctl |= E1000_RCTL_SZ_256;
1720 if (rctl_bsize > 0) {
1721 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1722 rctl |= E1000_RCTL_SZ_512;
1723 else /* 256 <= buf_size < 512 - use 256 */
1724 rctl |= E1000_RCTL_SZ_256;
1728 * Configure RSS if device configured with multiple RX queues.
1730 if (dev->data->nb_rx_queues > 1)
1731 igb_rss_configure(dev);
1733 igb_rss_disable(dev);
1736 * Setup the Checksum Register.
1737 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1739 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1740 rxcsum |= E1000_RXCSUM_PCSD;
1742 /* Enable both L3/L4 rx checksum offload */
1743 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1744 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1746 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1747 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1749 /* Setup the Receive Control Register. */
1750 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1751 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1753 /* set STRCRC bit in all queues for Powerville/Springville */
1754 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1755 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1756 uint32_t dvmolr = E1000_READ_REG(hw,
1758 dvmolr |= E1000_DVMOLR_STRCRC;
1759 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1763 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1765 /* clear STRCRC bit in all queues for Powerville/Springville */
1766 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
1767 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1768 uint32_t dvmolr = E1000_READ_REG(hw,
1770 dvmolr &= ~E1000_DVMOLR_STRCRC;
1771 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1776 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1777 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1778 E1000_RCTL_RDMTS_HALF |
1779 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1781 /* Make sure VLAN Filters are off. */
1782 rctl &= ~E1000_RCTL_VFE;
1783 /* Don't store bad packets. */
1784 rctl &= ~E1000_RCTL_SBP;
1786 /* Enable Receives. */
1787 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1790 * Setup the HW Rx Head and Tail Descriptor Pointers.
1791 * This needs to be done after enable.
1793 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1794 rxq = dev->data->rx_queues[i];
1795 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1796 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1802 /*********************************************************************
1804 * Enable transmit unit.
1806 **********************************************************************/
1808 eth_igb_tx_init(struct rte_eth_dev *dev)
1810 struct e1000_hw *hw;
1811 struct igb_tx_queue *txq;
1816 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1818 /* Setup the Base and Length of the Tx Descriptor Rings. */
1819 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1821 txq = dev->data->tx_queues[i];
1822 bus_addr = txq->tx_ring_phys_addr;
1824 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1826 sizeof(union e1000_adv_tx_desc));
1827 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1828 (uint32_t)(bus_addr >> 32));
1829 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1831 /* Setup the HW Tx Head and Tail descriptor pointers. */
1832 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1833 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1835 /* Setup Transmit threshold registers. */
1836 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1837 txdctl |= txq->pthresh & 0x1F;
1838 txdctl |= ((txq->hthresh & 0x1F) << 8);
1839 txdctl |= ((txq->wthresh & 0x1F) << 16);
1840 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1841 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1844 /* Program the Transmit Control Register. */
1845 tctl = E1000_READ_REG(hw, E1000_TCTL);
1846 tctl &= ~E1000_TCTL_CT;
1847 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1848 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1850 e1000_config_collision_dist(hw);
1852 /* This write will effectively turn on the transmit unit. */
1853 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1856 /*********************************************************************
1858 * Enable VF receive unit.
1860 **********************************************************************/
1862 eth_igbvf_rx_init(struct rte_eth_dev *dev)
1864 struct e1000_hw *hw;
1865 struct igb_rx_queue *rxq;
1866 struct rte_pktmbuf_pool_private *mbp_priv;
1869 uint16_t rctl_bsize;
1873 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1875 /* Configure and enable each RX queue. */
1877 dev->rx_pkt_burst = eth_igb_recv_pkts;
1878 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1882 rxq = dev->data->rx_queues[i];
1884 /* Allocate buffers for descriptor rings and set up queue */
1885 ret = igb_alloc_rx_queue_mbufs(rxq);
1889 bus_addr = rxq->rx_ring_phys_addr;
1890 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1892 sizeof(union e1000_adv_rx_desc));
1893 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1894 (uint32_t)(bus_addr >> 32));
1895 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1897 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1900 * Configure RX buffer size.
1902 mbp_priv = (struct rte_pktmbuf_pool_private *)
1903 ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1904 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1905 RTE_PKTMBUF_HEADROOM);
1906 if (buf_size >= 1024) {
1908 * Configure the BSIZEPACKET field of the SRRCTL
1909 * register of the queue.
1910 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1911 * If this field is equal to 0b, then RCTL.BSIZE
1912 * determines the RX packet buffer size.
1914 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1915 E1000_SRRCTL_BSIZEPKT_MASK);
1916 buf_size = (uint16_t) ((srrctl &
1917 E1000_SRRCTL_BSIZEPKT_MASK) <<
1918 E1000_SRRCTL_BSIZEPKT_SHIFT);
1920 /* It adds dual VLAN length for supporting dual VLAN */
1921 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
1922 2 * VLAN_TAG_SIZE) > buf_size){
1923 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1924 dev->data->scattered_rx = 1;
1928 * Use BSIZE field of the device RCTL register.
1930 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1931 rctl_bsize = buf_size;
1932 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1933 dev->data->scattered_rx = 1;
1936 /* Set if packets are dropped when no descriptors available */
1938 srrctl |= E1000_SRRCTL_DROP_EN;
1940 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1942 /* Enable this RX queue. */
1943 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1944 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1945 rxdctl &= 0xFFF00000;
1946 rxdctl |= (rxq->pthresh & 0x1F);
1947 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1948 if (hw->mac.type == e1000_82576) {
1950 * Workaround of 82576 VF Erratum
1951 * force set WTHRESH to 1
1952 * to avoid Write-Back not triggered sometimes
1955 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
1958 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1959 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1963 * Setup the HW Rx Head and Tail Descriptor Pointers.
1964 * This needs to be done after enable.
1966 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1967 rxq = dev->data->rx_queues[i];
1968 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1969 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1975 /*********************************************************************
1977 * Enable VF transmit unit.
1979 **********************************************************************/
1981 eth_igbvf_tx_init(struct rte_eth_dev *dev)
1983 struct e1000_hw *hw;
1984 struct igb_tx_queue *txq;
1988 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1990 /* Setup the Base and Length of the Tx Descriptor Rings. */
1991 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1994 txq = dev->data->tx_queues[i];
1995 bus_addr = txq->tx_ring_phys_addr;
1996 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1998 sizeof(union e1000_adv_tx_desc));
1999 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2000 (uint32_t)(bus_addr >> 32));
2001 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2003 /* Setup the HW Tx Head and Tail descriptor pointers. */
2004 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2005 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2007 /* Setup Transmit threshold registers. */
2008 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2009 txdctl |= txq->pthresh & 0x1F;
2010 txdctl |= ((txq->hthresh & 0x1F) << 8);
2011 if (hw->mac.type == e1000_82576) {
2013 * Workaround of 82576 VF Erratum
2014 * force set WTHRESH to 1
2015 * to avoid Write-Back not triggered sometimes
2018 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
2021 txdctl |= ((txq->wthresh & 0x1F) << 16);
2022 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2023 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);