4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
83 * Structure associated with each descriptor of the RX ring of a RX queue.
86 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
90 * Structure associated with each descriptor of the TX ring of a TX queue.
93 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
94 uint16_t next_id; /**< Index of next descriptor in ring. */
95 uint16_t last_id; /**< Index of last scattered descriptor. */
99 * Structure associated with each RX queue.
101 struct igb_rx_queue {
102 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
103 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
104 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
105 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
106 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
107 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
108 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
109 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
110 uint16_t nb_rx_desc; /**< number of RX descriptors. */
111 uint16_t rx_tail; /**< current value of RDT register. */
112 uint16_t nb_rx_hold; /**< number of held free RX desc. */
113 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
114 uint16_t queue_id; /**< RX queue index. */
115 uint16_t reg_idx; /**< RX queue register index. */
116 uint8_t port_id; /**< Device port identifier. */
117 uint8_t pthresh; /**< Prefetch threshold register. */
118 uint8_t hthresh; /**< Host threshold register. */
119 uint8_t wthresh; /**< Write-back threshold register. */
120 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
121 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
125 * Hardware context number
127 enum igb_advctx_num {
128 IGB_CTX_0 = 0, /**< CTX0 */
129 IGB_CTX_1 = 1, /**< CTX1 */
130 IGB_CTX_NUM = 2, /**< CTX_NUM */
133 /** Offload features */
134 union igb_tx_offload {
137 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
138 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
139 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
140 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
141 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
143 /* uint64_t unused:8; */
148 * Compare mask for igb_tx_offload.data,
149 * should be in sync with igb_tx_offload layout.
151 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
152 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
153 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
154 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
155 /** Mac + IP + TCP + Mss mask. */
156 #define TX_TSO_CMP_MASK \
157 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
160 * Strucutre to check if new context need be built
162 struct igb_advctx_info {
163 uint64_t flags; /**< ol_flags related to context build. */
164 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
165 union igb_tx_offload tx_offload;
166 /** compare mask for tx offload. */
167 union igb_tx_offload tx_offload_mask;
171 * Structure associated with each TX queue.
173 struct igb_tx_queue {
174 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
175 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
176 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
177 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
178 uint32_t txd_type; /**< Device-specific TXD type */
179 uint16_t nb_tx_desc; /**< number of TX descriptors. */
180 uint16_t tx_tail; /**< Current value of TDT register. */
182 /**< Index of first used TX descriptor. */
183 uint16_t queue_id; /**< TX queue index. */
184 uint16_t reg_idx; /**< TX queue register index. */
185 uint8_t port_id; /**< Device port identifier. */
186 uint8_t pthresh; /**< Prefetch threshold register. */
187 uint8_t hthresh; /**< Host threshold register. */
188 uint8_t wthresh; /**< Write-back threshold register. */
190 /**< Current used hardware descriptor. */
192 /**< Start context position for transmit queue. */
193 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
194 /**< Hardware context history.*/
198 #define RTE_PMD_USE_PREFETCH
201 #ifdef RTE_PMD_USE_PREFETCH
202 #define rte_igb_prefetch(p) rte_prefetch0(p)
204 #define rte_igb_prefetch(p) do {} while(0)
207 #ifdef RTE_PMD_PACKET_PREFETCH
208 #define rte_packet_prefetch(p) rte_prefetch1(p)
210 #define rte_packet_prefetch(p) do {} while(0)
214 * Macro for VMDq feature for 1 GbE NIC.
216 #define E1000_VMOLR_SIZE (8)
217 #define IGB_TSO_MAX_HDRLEN (512)
218 #define IGB_TSO_MAX_MSS (9216)
220 /*********************************************************************
224 **********************************************************************/
227 *There're some limitations in hardware for TCP segmentation offload. We
228 *should check whether the parameters are valid.
230 static inline uint64_t
231 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
233 if (!(ol_req & PKT_TX_TCP_SEG))
235 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
236 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
237 ol_req &= ~PKT_TX_TCP_SEG;
238 ol_req |= PKT_TX_TCP_CKSUM;
244 * Advanced context descriptor are almost same between igb/ixgbe
245 * This is a separate function, looking for optimization opportunity here
246 * Rework required to go with the pre-defined values.
250 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
251 volatile struct e1000_adv_tx_context_desc *ctx_txd,
252 uint64_t ol_flags, union igb_tx_offload tx_offload)
254 uint32_t type_tucmd_mlhl;
255 uint32_t mss_l4len_idx;
256 uint32_t ctx_idx, ctx_curr;
257 uint32_t vlan_macip_lens;
258 union igb_tx_offload tx_offload_mask;
260 ctx_curr = txq->ctx_curr;
261 ctx_idx = ctx_curr + txq->ctx_start;
263 tx_offload_mask.data = 0;
266 /* Specify which HW CTX to upload. */
267 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
269 if (ol_flags & PKT_TX_VLAN_PKT)
270 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
272 /* check if TCP segmentation required for this packet */
273 if (ol_flags & PKT_TX_TCP_SEG) {
274 /* implies IP cksum in IPv4 */
275 if (ol_flags & PKT_TX_IP_CKSUM)
276 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
277 E1000_ADVTXD_TUCMD_L4T_TCP |
278 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
281 E1000_ADVTXD_TUCMD_L4T_TCP |
282 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
284 tx_offload_mask.data |= TX_TSO_CMP_MASK;
285 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
286 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
287 } else { /* no TSO, check if hardware checksum is needed */
288 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
289 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
291 if (ol_flags & PKT_TX_IP_CKSUM)
292 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
294 switch (ol_flags & PKT_TX_L4_MASK) {
295 case PKT_TX_UDP_CKSUM:
296 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
297 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
300 case PKT_TX_TCP_CKSUM:
301 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
302 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
303 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
305 case PKT_TX_SCTP_CKSUM:
306 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
307 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
308 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
311 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
312 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
317 txq->ctx_cache[ctx_curr].flags = ol_flags;
318 txq->ctx_cache[ctx_curr].tx_offload.data =
319 tx_offload_mask.data & tx_offload.data;
320 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
322 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
323 vlan_macip_lens = (uint32_t)tx_offload.data;
324 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
325 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
326 ctx_txd->seqnum_seed = 0;
330 * Check which hardware context can be used. Use the existing match
331 * or create a new context descriptor.
333 static inline uint32_t
334 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
335 union igb_tx_offload tx_offload)
337 /* If match with the current context */
338 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
339 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
340 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
341 return txq->ctx_curr;
344 /* If match with the second context */
346 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
347 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
348 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
349 return txq->ctx_curr;
352 /* Mismatch, use the previous context */
356 static inline uint32_t
357 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
359 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
360 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
363 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
364 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
365 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
369 static inline uint32_t
370 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
373 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
374 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
375 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
376 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
381 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
384 struct igb_tx_queue *txq;
385 struct igb_tx_entry *sw_ring;
386 struct igb_tx_entry *txe, *txn;
387 volatile union e1000_adv_tx_desc *txr;
388 volatile union e1000_adv_tx_desc *txd;
389 struct rte_mbuf *tx_pkt;
390 struct rte_mbuf *m_seg;
391 uint64_t buf_dma_addr;
392 uint32_t olinfo_status;
393 uint32_t cmd_type_len;
402 uint32_t new_ctx = 0;
404 union igb_tx_offload tx_offload = {0};
407 sw_ring = txq->sw_ring;
409 tx_id = txq->tx_tail;
410 txe = &sw_ring[tx_id];
412 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
414 pkt_len = tx_pkt->pkt_len;
416 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
419 * The number of descriptors that must be allocated for a
420 * packet is the number of segments of that packet, plus 1
421 * Context Descriptor for the VLAN Tag Identifier, if any.
422 * Determine the last TX descriptor to allocate in the TX ring
423 * for the packet, starting from the current position (tx_id)
426 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
428 ol_flags = tx_pkt->ol_flags;
429 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
431 /* If a Context Descriptor need be built . */
433 tx_offload.l2_len = tx_pkt->l2_len;
434 tx_offload.l3_len = tx_pkt->l3_len;
435 tx_offload.l4_len = tx_pkt->l4_len;
436 tx_offload.vlan_tci = tx_pkt->vlan_tci;
437 tx_offload.tso_segsz = tx_pkt->tso_segsz;
438 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
440 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
441 /* Only allocate context descriptor if required*/
442 new_ctx = (ctx == IGB_CTX_NUM);
443 ctx = txq->ctx_curr + txq->ctx_start;
444 tx_last = (uint16_t) (tx_last + new_ctx);
446 if (tx_last >= txq->nb_tx_desc)
447 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
449 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
450 " tx_first=%u tx_last=%u",
451 (unsigned) txq->port_id,
452 (unsigned) txq->queue_id,
458 * Check if there are enough free descriptors in the TX ring
459 * to transmit the next packet.
460 * This operation is based on the two following rules:
462 * 1- Only check that the last needed TX descriptor can be
463 * allocated (by construction, if that descriptor is free,
464 * all intermediate ones are also free).
466 * For this purpose, the index of the last TX descriptor
467 * used for a packet (the "last descriptor" of a packet)
468 * is recorded in the TX entries (the last one included)
469 * that are associated with all TX descriptors allocated
472 * 2- Avoid to allocate the last free TX descriptor of the
473 * ring, in order to never set the TDT register with the
474 * same value stored in parallel by the NIC in the TDH
475 * register, which makes the TX engine of the NIC enter
476 * in a deadlock situation.
478 * By extension, avoid to allocate a free descriptor that
479 * belongs to the last set of free descriptors allocated
480 * to the same packet previously transmitted.
484 * The "last descriptor" of the previously sent packet, if any,
485 * which used the last descriptor to allocate.
487 tx_end = sw_ring[tx_last].last_id;
490 * The next descriptor following that "last descriptor" in the
493 tx_end = sw_ring[tx_end].next_id;
496 * The "last descriptor" associated with that next descriptor.
498 tx_end = sw_ring[tx_end].last_id;
501 * Check that this descriptor is free.
503 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
510 * Set common flags of all TX Data Descriptors.
512 * The following bits must be set in all Data Descriptors:
513 * - E1000_ADVTXD_DTYP_DATA
514 * - E1000_ADVTXD_DCMD_DEXT
516 * The following bits must be set in the first Data Descriptor
517 * and are ignored in the other ones:
518 * - E1000_ADVTXD_DCMD_IFCS
519 * - E1000_ADVTXD_MAC_1588
520 * - E1000_ADVTXD_DCMD_VLE
522 * The following bits must only be set in the last Data
524 * - E1000_TXD_CMD_EOP
526 * The following bits can be set in any Data Descriptor, but
527 * are only set in the last Data Descriptor:
530 cmd_type_len = txq->txd_type |
531 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
532 if (tx_ol_req & PKT_TX_TCP_SEG)
533 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
534 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
535 #if defined(RTE_LIBRTE_IEEE1588)
536 if (ol_flags & PKT_TX_IEEE1588_TMST)
537 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
540 /* Setup TX Advanced context descriptor if required */
542 volatile struct e1000_adv_tx_context_desc *
545 ctx_txd = (volatile struct
546 e1000_adv_tx_context_desc *)
549 txn = &sw_ring[txe->next_id];
550 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
552 if (txe->mbuf != NULL) {
553 rte_pktmbuf_free_seg(txe->mbuf);
557 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
559 txe->last_id = tx_last;
560 tx_id = txe->next_id;
564 /* Setup the TX Advanced Data Descriptor */
565 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
566 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
567 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
572 txn = &sw_ring[txe->next_id];
575 if (txe->mbuf != NULL)
576 rte_pktmbuf_free_seg(txe->mbuf);
580 * Set up transmit descriptor.
582 slen = (uint16_t) m_seg->data_len;
583 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
584 txd->read.buffer_addr =
585 rte_cpu_to_le_64(buf_dma_addr);
586 txd->read.cmd_type_len =
587 rte_cpu_to_le_32(cmd_type_len | slen);
588 txd->read.olinfo_status =
589 rte_cpu_to_le_32(olinfo_status);
590 txe->last_id = tx_last;
591 tx_id = txe->next_id;
594 } while (m_seg != NULL);
597 * The last packet data descriptor needs End Of Packet (EOP)
598 * and Report Status (RS).
600 txd->read.cmd_type_len |=
601 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
607 * Set the Transmit Descriptor Tail (TDT).
609 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
610 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
611 (unsigned) txq->port_id, (unsigned) txq->queue_id,
612 (unsigned) tx_id, (unsigned) nb_tx);
613 txq->tx_tail = tx_id;
618 /*********************************************************************
622 **********************************************************************/
623 #define IGB_PACKET_TYPE_IPV4 0X01
624 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
625 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
626 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
627 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
628 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
629 #define IGB_PACKET_TYPE_IPV6 0X04
630 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
631 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
632 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
633 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
634 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
635 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
636 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
637 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
638 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
639 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
640 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
641 #define IGB_PACKET_TYPE_MAX 0X80
642 #define IGB_PACKET_TYPE_MASK 0X7F
643 #define IGB_PACKET_TYPE_SHIFT 0X04
644 static inline uint32_t
645 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
647 static const uint32_t
648 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
649 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
651 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
652 RTE_PTYPE_L3_IPV4_EXT,
653 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
655 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
656 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
657 RTE_PTYPE_INNER_L3_IPV6,
658 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
659 RTE_PTYPE_L3_IPV6_EXT,
660 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
661 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
662 RTE_PTYPE_INNER_L3_IPV6_EXT,
663 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
664 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
665 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
666 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
667 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
668 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
669 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
670 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
671 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
672 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
673 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
674 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
675 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
676 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
677 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
678 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
679 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
680 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
681 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
682 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
683 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
684 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
685 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
686 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
687 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
688 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
689 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
690 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
692 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
693 return RTE_PTYPE_UNKNOWN;
695 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
697 return ptype_table[pkt_info];
700 static inline uint64_t
701 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
703 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
705 #if defined(RTE_LIBRTE_IEEE1588)
706 static uint32_t ip_pkt_etqf_map[8] = {
707 0, 0, 0, PKT_RX_IEEE1588_PTP,
711 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
712 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
714 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
715 if (hw->mac.type == e1000_i210)
716 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
718 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
726 static inline uint64_t
727 rx_desc_status_to_pkt_flags(uint32_t rx_status)
731 /* Check if VLAN present */
732 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
734 #if defined(RTE_LIBRTE_IEEE1588)
735 if (rx_status & E1000_RXD_STAT_TMST)
736 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
741 static inline uint64_t
742 rx_desc_error_to_pkt_flags(uint32_t rx_status)
745 * Bit 30: IPE, IPv4 checksum error
746 * Bit 29: L4I, L4I integrity error
749 static uint64_t error_to_pkt_flags_map[4] = {
750 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
751 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
753 return error_to_pkt_flags_map[(rx_status >>
754 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
758 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
761 struct igb_rx_queue *rxq;
762 volatile union e1000_adv_rx_desc *rx_ring;
763 volatile union e1000_adv_rx_desc *rxdp;
764 struct igb_rx_entry *sw_ring;
765 struct igb_rx_entry *rxe;
766 struct rte_mbuf *rxm;
767 struct rte_mbuf *nmb;
768 union e1000_adv_rx_desc rxd;
771 uint32_t hlen_type_rss;
781 rx_id = rxq->rx_tail;
782 rx_ring = rxq->rx_ring;
783 sw_ring = rxq->sw_ring;
784 while (nb_rx < nb_pkts) {
786 * The order of operations here is important as the DD status
787 * bit must not be read after any other descriptor fields.
788 * rx_ring and rxdp are pointing to volatile data so the order
789 * of accesses cannot be reordered by the compiler. If they were
790 * not volatile, they could be reordered which could lead to
791 * using invalid descriptor fields when read from rxd.
793 rxdp = &rx_ring[rx_id];
794 staterr = rxdp->wb.upper.status_error;
795 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
802 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
803 * likely to be invalid and to be dropped by the various
804 * validation checks performed by the network stack.
806 * Allocate a new mbuf to replenish the RX ring descriptor.
807 * If the allocation fails:
808 * - arrange for that RX descriptor to be the first one
809 * being parsed the next time the receive function is
810 * invoked [on the same queue].
812 * - Stop parsing the RX ring and return immediately.
814 * This policy do not drop the packet received in the RX
815 * descriptor for which the allocation of a new mbuf failed.
816 * Thus, it allows that packet to be later retrieved if
817 * mbuf have been freed in the mean time.
818 * As a side effect, holding RX descriptors instead of
819 * systematically giving them back to the NIC may lead to
820 * RX ring exhaustion situations.
821 * However, the NIC can gracefully prevent such situations
822 * to happen by sending specific "back-pressure" flow control
823 * frames to its peer(s).
825 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
826 "staterr=0x%x pkt_len=%u",
827 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
828 (unsigned) rx_id, (unsigned) staterr,
829 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
831 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
833 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
834 "queue_id=%u", (unsigned) rxq->port_id,
835 (unsigned) rxq->queue_id);
836 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
841 rxe = &sw_ring[rx_id];
843 if (rx_id == rxq->nb_rx_desc)
846 /* Prefetch next mbuf while processing current one. */
847 rte_igb_prefetch(sw_ring[rx_id].mbuf);
850 * When next RX descriptor is on a cache-line boundary,
851 * prefetch the next 4 RX descriptors and the next 8 pointers
854 if ((rx_id & 0x3) == 0) {
855 rte_igb_prefetch(&rx_ring[rx_id]);
856 rte_igb_prefetch(&sw_ring[rx_id]);
862 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
863 rxdp->read.hdr_addr = 0;
864 rxdp->read.pkt_addr = dma_addr;
867 * Initialize the returned mbuf.
868 * 1) setup generic mbuf fields:
869 * - number of segments,
872 * - RX port identifier.
873 * 2) integrate hardware offload data, if any:
875 * - IP checksum flag,
876 * - VLAN TCI, if any,
879 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
881 rxm->data_off = RTE_PKTMBUF_HEADROOM;
882 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
885 rxm->pkt_len = pkt_len;
886 rxm->data_len = pkt_len;
887 rxm->port = rxq->port_id;
889 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
890 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
891 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
892 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
894 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
895 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
896 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
897 rxm->ol_flags = pkt_flags;
898 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
899 lo_dword.hs_rss.pkt_info);
902 * Store the mbuf address into the next entry of the array
903 * of returned packets.
905 rx_pkts[nb_rx++] = rxm;
907 rxq->rx_tail = rx_id;
910 * If the number of free RX descriptors is greater than the RX free
911 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
913 * Update the RDT with the value of the last processed RX descriptor
914 * minus 1, to guarantee that the RDT register is never equal to the
915 * RDH register, which creates a "full" ring situtation from the
916 * hardware point of view...
918 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
919 if (nb_hold > rxq->rx_free_thresh) {
920 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
921 "nb_hold=%u nb_rx=%u",
922 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
923 (unsigned) rx_id, (unsigned) nb_hold,
925 rx_id = (uint16_t) ((rx_id == 0) ?
926 (rxq->nb_rx_desc - 1) : (rx_id - 1));
927 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
930 rxq->nb_rx_hold = nb_hold;
935 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
938 struct igb_rx_queue *rxq;
939 volatile union e1000_adv_rx_desc *rx_ring;
940 volatile union e1000_adv_rx_desc *rxdp;
941 struct igb_rx_entry *sw_ring;
942 struct igb_rx_entry *rxe;
943 struct rte_mbuf *first_seg;
944 struct rte_mbuf *last_seg;
945 struct rte_mbuf *rxm;
946 struct rte_mbuf *nmb;
947 union e1000_adv_rx_desc rxd;
948 uint64_t dma; /* Physical address of mbuf data buffer */
950 uint32_t hlen_type_rss;
960 rx_id = rxq->rx_tail;
961 rx_ring = rxq->rx_ring;
962 sw_ring = rxq->sw_ring;
965 * Retrieve RX context of current packet, if any.
967 first_seg = rxq->pkt_first_seg;
968 last_seg = rxq->pkt_last_seg;
970 while (nb_rx < nb_pkts) {
973 * The order of operations here is important as the DD status
974 * bit must not be read after any other descriptor fields.
975 * rx_ring and rxdp are pointing to volatile data so the order
976 * of accesses cannot be reordered by the compiler. If they were
977 * not volatile, they could be reordered which could lead to
978 * using invalid descriptor fields when read from rxd.
980 rxdp = &rx_ring[rx_id];
981 staterr = rxdp->wb.upper.status_error;
982 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
989 * Allocate a new mbuf to replenish the RX ring descriptor.
990 * If the allocation fails:
991 * - arrange for that RX descriptor to be the first one
992 * being parsed the next time the receive function is
993 * invoked [on the same queue].
995 * - Stop parsing the RX ring and return immediately.
997 * This policy does not drop the packet received in the RX
998 * descriptor for which the allocation of a new mbuf failed.
999 * Thus, it allows that packet to be later retrieved if
1000 * mbuf have been freed in the mean time.
1001 * As a side effect, holding RX descriptors instead of
1002 * systematically giving them back to the NIC may lead to
1003 * RX ring exhaustion situations.
1004 * However, the NIC can gracefully prevent such situations
1005 * to happen by sending specific "back-pressure" flow control
1006 * frames to its peer(s).
1008 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1009 "staterr=0x%x data_len=%u",
1010 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1011 (unsigned) rx_id, (unsigned) staterr,
1012 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1014 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1016 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1017 "queue_id=%u", (unsigned) rxq->port_id,
1018 (unsigned) rxq->queue_id);
1019 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1024 rxe = &sw_ring[rx_id];
1026 if (rx_id == rxq->nb_rx_desc)
1029 /* Prefetch next mbuf while processing current one. */
1030 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1033 * When next RX descriptor is on a cache-line boundary,
1034 * prefetch the next 4 RX descriptors and the next 8 pointers
1037 if ((rx_id & 0x3) == 0) {
1038 rte_igb_prefetch(&rx_ring[rx_id]);
1039 rte_igb_prefetch(&sw_ring[rx_id]);
1043 * Update RX descriptor with the physical address of the new
1044 * data buffer of the new allocated mbuf.
1048 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1049 rxdp->read.pkt_addr = dma;
1050 rxdp->read.hdr_addr = 0;
1053 * Set data length & data buffer address of mbuf.
1055 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1056 rxm->data_len = data_len;
1057 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1060 * If this is the first buffer of the received packet,
1061 * set the pointer to the first mbuf of the packet and
1062 * initialize its context.
1063 * Otherwise, update the total length and the number of segments
1064 * of the current scattered packet, and update the pointer to
1065 * the last mbuf of the current packet.
1067 if (first_seg == NULL) {
1069 first_seg->pkt_len = data_len;
1070 first_seg->nb_segs = 1;
1072 first_seg->pkt_len += data_len;
1073 first_seg->nb_segs++;
1074 last_seg->next = rxm;
1078 * If this is not the last buffer of the received packet,
1079 * update the pointer to the last mbuf of the current scattered
1080 * packet and continue to parse the RX ring.
1082 if (! (staterr & E1000_RXD_STAT_EOP)) {
1088 * This is the last buffer of the received packet.
1089 * If the CRC is not stripped by the hardware:
1090 * - Subtract the CRC length from the total packet length.
1091 * - If the last buffer only contains the whole CRC or a part
1092 * of it, free the mbuf associated to the last buffer.
1093 * If part of the CRC is also contained in the previous
1094 * mbuf, subtract the length of that CRC part from the
1095 * data length of the previous mbuf.
1098 if (unlikely(rxq->crc_len > 0)) {
1099 first_seg->pkt_len -= ETHER_CRC_LEN;
1100 if (data_len <= ETHER_CRC_LEN) {
1101 rte_pktmbuf_free_seg(rxm);
1102 first_seg->nb_segs--;
1103 last_seg->data_len = (uint16_t)
1104 (last_seg->data_len -
1105 (ETHER_CRC_LEN - data_len));
1106 last_seg->next = NULL;
1109 (uint16_t) (data_len - ETHER_CRC_LEN);
1113 * Initialize the first mbuf of the returned packet:
1114 * - RX port identifier,
1115 * - hardware offload data, if any:
1116 * - RSS flag & hash,
1117 * - IP checksum flag,
1118 * - VLAN TCI, if any,
1121 first_seg->port = rxq->port_id;
1122 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1125 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1126 * set in the pkt_flags field.
1128 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1129 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1130 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1131 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1132 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1133 first_seg->ol_flags = pkt_flags;
1134 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1135 lower.lo_dword.hs_rss.pkt_info);
1137 /* Prefetch data of first segment, if configured to do so. */
1138 rte_packet_prefetch((char *)first_seg->buf_addr +
1139 first_seg->data_off);
1142 * Store the mbuf address into the next entry of the array
1143 * of returned packets.
1145 rx_pkts[nb_rx++] = first_seg;
1148 * Setup receipt context for a new packet.
1154 * Record index of the next RX descriptor to probe.
1156 rxq->rx_tail = rx_id;
1159 * Save receive context.
1161 rxq->pkt_first_seg = first_seg;
1162 rxq->pkt_last_seg = last_seg;
1165 * If the number of free RX descriptors is greater than the RX free
1166 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1168 * Update the RDT with the value of the last processed RX descriptor
1169 * minus 1, to guarantee that the RDT register is never equal to the
1170 * RDH register, which creates a "full" ring situtation from the
1171 * hardware point of view...
1173 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1174 if (nb_hold > rxq->rx_free_thresh) {
1175 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1176 "nb_hold=%u nb_rx=%u",
1177 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1178 (unsigned) rx_id, (unsigned) nb_hold,
1180 rx_id = (uint16_t) ((rx_id == 0) ?
1181 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1182 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1185 rxq->nb_rx_hold = nb_hold;
1190 * Maximum number of Ring Descriptors.
1192 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1193 * desscriptors should meet the following condition:
1194 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1198 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1202 if (txq->sw_ring != NULL) {
1203 for (i = 0; i < txq->nb_tx_desc; i++) {
1204 if (txq->sw_ring[i].mbuf != NULL) {
1205 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1206 txq->sw_ring[i].mbuf = NULL;
1213 igb_tx_queue_release(struct igb_tx_queue *txq)
1216 igb_tx_queue_release_mbufs(txq);
1217 rte_free(txq->sw_ring);
1223 eth_igb_tx_queue_release(void *txq)
1225 igb_tx_queue_release(txq);
1229 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1234 memset((void*)&txq->ctx_cache, 0,
1235 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1239 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1241 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1242 struct igb_tx_entry *txe = txq->sw_ring;
1244 struct e1000_hw *hw;
1246 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1247 /* Zero out HW ring memory */
1248 for (i = 0; i < txq->nb_tx_desc; i++) {
1249 txq->tx_ring[i] = zeroed_desc;
1252 /* Initialize ring entries */
1253 prev = (uint16_t)(txq->nb_tx_desc - 1);
1254 for (i = 0; i < txq->nb_tx_desc; i++) {
1255 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1257 txd->wb.status = E1000_TXD_STAT_DD;
1260 txe[prev].next_id = i;
1264 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1265 /* 82575 specific, each tx queue will use 2 hw contexts */
1266 if (hw->mac.type == e1000_82575)
1267 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1269 igb_reset_tx_queue_stat(txq);
1273 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1276 unsigned int socket_id,
1277 const struct rte_eth_txconf *tx_conf)
1279 const struct rte_memzone *tz;
1280 struct igb_tx_queue *txq;
1281 struct e1000_hw *hw;
1284 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287 * Validate number of transmit descriptors.
1288 * It must not exceed hardware maximum, and must be multiple
1291 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1292 (nb_desc > E1000_MAX_RING_DESC) ||
1293 (nb_desc < E1000_MIN_RING_DESC)) {
1298 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1301 if (tx_conf->tx_free_thresh != 0)
1302 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1303 "used for the 1G driver.");
1304 if (tx_conf->tx_rs_thresh != 0)
1305 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1306 "used for the 1G driver.");
1307 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1308 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1309 "consider setting the TX WTHRESH value to 4, 8, "
1312 /* Free memory prior to re-allocation if needed */
1313 if (dev->data->tx_queues[queue_idx] != NULL) {
1314 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1315 dev->data->tx_queues[queue_idx] = NULL;
1318 /* First allocate the tx queue data structure */
1319 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1320 RTE_CACHE_LINE_SIZE);
1325 * Allocate TX ring hardware descriptors. A memzone large enough to
1326 * handle the maximum ring size is allocated in order to allow for
1327 * resizing in later calls to the queue setup function.
1329 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1330 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1331 E1000_ALIGN, socket_id);
1333 igb_tx_queue_release(txq);
1337 txq->nb_tx_desc = nb_desc;
1338 txq->pthresh = tx_conf->tx_thresh.pthresh;
1339 txq->hthresh = tx_conf->tx_thresh.hthresh;
1340 txq->wthresh = tx_conf->tx_thresh.wthresh;
1341 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1343 txq->queue_id = queue_idx;
1344 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1345 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1346 txq->port_id = dev->data->port_id;
1348 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1349 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1351 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1352 /* Allocate software ring */
1353 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1354 sizeof(struct igb_tx_entry) * nb_desc,
1355 RTE_CACHE_LINE_SIZE);
1356 if (txq->sw_ring == NULL) {
1357 igb_tx_queue_release(txq);
1360 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1361 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1363 igb_reset_tx_queue(txq, dev);
1364 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1365 dev->data->tx_queues[queue_idx] = txq;
1371 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1375 if (rxq->sw_ring != NULL) {
1376 for (i = 0; i < rxq->nb_rx_desc; i++) {
1377 if (rxq->sw_ring[i].mbuf != NULL) {
1378 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1379 rxq->sw_ring[i].mbuf = NULL;
1386 igb_rx_queue_release(struct igb_rx_queue *rxq)
1389 igb_rx_queue_release_mbufs(rxq);
1390 rte_free(rxq->sw_ring);
1396 eth_igb_rx_queue_release(void *rxq)
1398 igb_rx_queue_release(rxq);
1402 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1404 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1407 /* Zero out HW ring memory */
1408 for (i = 0; i < rxq->nb_rx_desc; i++) {
1409 rxq->rx_ring[i] = zeroed_desc;
1413 rxq->pkt_first_seg = NULL;
1414 rxq->pkt_last_seg = NULL;
1418 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1421 unsigned int socket_id,
1422 const struct rte_eth_rxconf *rx_conf,
1423 struct rte_mempool *mp)
1425 const struct rte_memzone *rz;
1426 struct igb_rx_queue *rxq;
1427 struct e1000_hw *hw;
1430 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1433 * Validate number of receive descriptors.
1434 * It must not exceed hardware maximum, and must be multiple
1437 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1438 (nb_desc > E1000_MAX_RING_DESC) ||
1439 (nb_desc < E1000_MIN_RING_DESC)) {
1443 /* Free memory prior to re-allocation if needed */
1444 if (dev->data->rx_queues[queue_idx] != NULL) {
1445 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1446 dev->data->rx_queues[queue_idx] = NULL;
1449 /* First allocate the RX queue data structure. */
1450 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1451 RTE_CACHE_LINE_SIZE);
1455 rxq->nb_rx_desc = nb_desc;
1456 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1457 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1458 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1459 if (rxq->wthresh > 0 &&
1460 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1462 rxq->drop_en = rx_conf->rx_drop_en;
1463 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1464 rxq->queue_id = queue_idx;
1465 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1466 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1467 rxq->port_id = dev->data->port_id;
1468 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1472 * Allocate RX ring hardware descriptors. A memzone large enough to
1473 * handle the maximum ring size is allocated in order to allow for
1474 * resizing in later calls to the queue setup function.
1476 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1477 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1478 E1000_ALIGN, socket_id);
1480 igb_rx_queue_release(rxq);
1483 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1484 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1485 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1486 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1488 /* Allocate software ring. */
1489 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1490 sizeof(struct igb_rx_entry) * nb_desc,
1491 RTE_CACHE_LINE_SIZE);
1492 if (rxq->sw_ring == NULL) {
1493 igb_rx_queue_release(rxq);
1496 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1497 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1499 dev->data->rx_queues[queue_idx] = rxq;
1500 igb_reset_rx_queue(rxq);
1506 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1508 #define IGB_RXQ_SCAN_INTERVAL 4
1509 volatile union e1000_adv_rx_desc *rxdp;
1510 struct igb_rx_queue *rxq;
1513 if (rx_queue_id >= dev->data->nb_rx_queues) {
1514 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1518 rxq = dev->data->rx_queues[rx_queue_id];
1519 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1521 while ((desc < rxq->nb_rx_desc) &&
1522 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1523 desc += IGB_RXQ_SCAN_INTERVAL;
1524 rxdp += IGB_RXQ_SCAN_INTERVAL;
1525 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1526 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1527 desc - rxq->nb_rx_desc]);
1534 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1536 volatile union e1000_adv_rx_desc *rxdp;
1537 struct igb_rx_queue *rxq = rx_queue;
1540 if (unlikely(offset >= rxq->nb_rx_desc))
1542 desc = rxq->rx_tail + offset;
1543 if (desc >= rxq->nb_rx_desc)
1544 desc -= rxq->nb_rx_desc;
1546 rxdp = &rxq->rx_ring[desc];
1547 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1551 igb_dev_clear_queues(struct rte_eth_dev *dev)
1554 struct igb_tx_queue *txq;
1555 struct igb_rx_queue *rxq;
1557 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1558 txq = dev->data->tx_queues[i];
1560 igb_tx_queue_release_mbufs(txq);
1561 igb_reset_tx_queue(txq, dev);
1565 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1566 rxq = dev->data->rx_queues[i];
1568 igb_rx_queue_release_mbufs(rxq);
1569 igb_reset_rx_queue(rxq);
1575 igb_dev_free_queues(struct rte_eth_dev *dev)
1579 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1580 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1581 dev->data->rx_queues[i] = NULL;
1583 dev->data->nb_rx_queues = 0;
1585 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1586 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1587 dev->data->tx_queues[i] = NULL;
1589 dev->data->nb_tx_queues = 0;
1593 * Receive Side Scaling (RSS).
1594 * See section 7.1.1.7 in the following document:
1595 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1598 * The source and destination IP addresses of the IP header and the source and
1599 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1600 * against a configurable random key to compute a 32-bit RSS hash result.
1601 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1602 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1603 * RSS output index which is used as the RX queue index where to store the
1605 * The following output is supplied in the RX write-back descriptor:
1606 * - 32-bit result of the Microsoft RSS hash function,
1607 * - 4-bit RSS type field.
1611 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1612 * Used as the default key.
1614 static uint8_t rss_intel_key[40] = {
1615 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1616 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1617 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1618 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1619 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1623 igb_rss_disable(struct rte_eth_dev *dev)
1625 struct e1000_hw *hw;
1628 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1630 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1631 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1635 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1643 hash_key = rss_conf->rss_key;
1644 if (hash_key != NULL) {
1645 /* Fill in RSS hash key */
1646 for (i = 0; i < 10; i++) {
1647 rss_key = hash_key[(i * 4)];
1648 rss_key |= hash_key[(i * 4) + 1] << 8;
1649 rss_key |= hash_key[(i * 4) + 2] << 16;
1650 rss_key |= hash_key[(i * 4) + 3] << 24;
1651 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1655 /* Set configured hashing protocols in MRQC register */
1656 rss_hf = rss_conf->rss_hf;
1657 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1658 if (rss_hf & ETH_RSS_IPV4)
1659 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1660 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1661 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1662 if (rss_hf & ETH_RSS_IPV6)
1663 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1664 if (rss_hf & ETH_RSS_IPV6_EX)
1665 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1666 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1667 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1668 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1669 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1670 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1671 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1672 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1673 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1674 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1675 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1676 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1680 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1681 struct rte_eth_rss_conf *rss_conf)
1683 struct e1000_hw *hw;
1687 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1690 * Before changing anything, first check that the update RSS operation
1691 * does not attempt to disable RSS, if RSS was enabled at
1692 * initialization time, or does not attempt to enable RSS, if RSS was
1693 * disabled at initialization time.
1695 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1696 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1697 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1698 if (rss_hf != 0) /* Enable RSS */
1700 return 0; /* Nothing to do */
1703 if (rss_hf == 0) /* Disable RSS */
1705 igb_hw_rss_hash_set(hw, rss_conf);
1709 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1710 struct rte_eth_rss_conf *rss_conf)
1712 struct e1000_hw *hw;
1719 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720 hash_key = rss_conf->rss_key;
1721 if (hash_key != NULL) {
1722 /* Return RSS hash key */
1723 for (i = 0; i < 10; i++) {
1724 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1725 hash_key[(i * 4)] = rss_key & 0x000000FF;
1726 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1727 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1728 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1732 /* Get RSS functions configured in MRQC register */
1733 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1734 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1735 rss_conf->rss_hf = 0;
1739 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1740 rss_hf |= ETH_RSS_IPV4;
1741 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1742 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1743 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1744 rss_hf |= ETH_RSS_IPV6;
1745 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1746 rss_hf |= ETH_RSS_IPV6_EX;
1747 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1748 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1749 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1750 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1751 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1752 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1753 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1754 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1755 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1756 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1757 rss_conf->rss_hf = rss_hf;
1762 igb_rss_configure(struct rte_eth_dev *dev)
1764 struct rte_eth_rss_conf rss_conf;
1765 struct e1000_hw *hw;
1769 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1771 /* Fill in redirection table. */
1772 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1773 for (i = 0; i < 128; i++) {
1780 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1781 i % dev->data->nb_rx_queues : 0);
1782 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1784 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1788 * Configure the RSS key and the RSS protocols used to compute
1789 * the RSS hash of input packets.
1791 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1792 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1793 igb_rss_disable(dev);
1796 if (rss_conf.rss_key == NULL)
1797 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1798 igb_hw_rss_hash_set(hw, &rss_conf);
1802 * Check if the mac type support VMDq or not.
1803 * Return 1 if it supports, otherwise, return 0.
1806 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1808 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1810 switch (hw->mac.type) {
1831 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1837 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1839 struct rte_eth_vmdq_rx_conf *cfg;
1840 struct e1000_hw *hw;
1841 uint32_t mrqc, vt_ctl, vmolr, rctl;
1844 PMD_INIT_FUNC_TRACE();
1846 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1849 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1850 if (igb_is_vmdq_supported(dev) == 0)
1853 igb_rss_disable(dev);
1855 /* RCTL: eanble VLAN filter */
1856 rctl = E1000_READ_REG(hw, E1000_RCTL);
1857 rctl |= E1000_RCTL_VFE;
1858 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1860 /* MRQC: enable vmdq */
1861 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1862 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1863 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1865 /* VTCTL: pool selection according to VLAN tag */
1866 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1867 if (cfg->enable_default_pool)
1868 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1869 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1870 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1872 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1873 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1874 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1875 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1878 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1879 vmolr |= E1000_VMOLR_AUPE;
1880 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1881 vmolr |= E1000_VMOLR_ROMPE;
1882 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1883 vmolr |= E1000_VMOLR_ROPE;
1884 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1885 vmolr |= E1000_VMOLR_BAM;
1886 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1887 vmolr |= E1000_VMOLR_MPME;
1889 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1893 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1894 * Both 82576 and 82580 support it
1896 if (hw->mac.type != e1000_i350) {
1897 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1898 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1899 vmolr |= E1000_VMOLR_STRVLAN;
1900 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1904 /* VFTA - enable all vlan filters */
1905 for (i = 0; i < IGB_VFTA_SIZE; i++)
1906 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1908 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1909 if (hw->mac.type != e1000_82580)
1910 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1913 * RAH/RAL - allow pools to read specific mac addresses
1914 * In this case, all pools should be able to read from mac addr 0
1916 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1917 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1919 /* VLVF: set up filters for vlan tags as configured */
1920 for (i = 0; i < cfg->nb_pool_maps; i++) {
1921 /* set vlan id in VF register and set the valid bit */
1922 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1923 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1924 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1925 E1000_VLVF_POOLSEL_MASK)));
1928 E1000_WRITE_FLUSH(hw);
1934 /*********************************************************************
1936 * Enable receive unit.
1938 **********************************************************************/
1941 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1943 struct igb_rx_entry *rxe = rxq->sw_ring;
1947 /* Initialize software ring entries. */
1948 for (i = 0; i < rxq->nb_rx_desc; i++) {
1949 volatile union e1000_adv_rx_desc *rxd;
1950 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1953 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1954 "queue_id=%hu", rxq->queue_id);
1958 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1959 rxd = &rxq->rx_ring[i];
1960 rxd->read.hdr_addr = 0;
1961 rxd->read.pkt_addr = dma_addr;
1968 #define E1000_MRQC_DEF_Q_SHIFT (3)
1970 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1972 struct e1000_hw *hw =
1973 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1976 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1978 * SRIOV active scheme
1979 * FIXME if support RSS together with VMDq & SRIOV
1981 mrqc = E1000_MRQC_ENABLE_VMDQ;
1982 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1983 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1984 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1985 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1987 * SRIOV inactive scheme
1989 switch (dev->data->dev_conf.rxmode.mq_mode) {
1991 igb_rss_configure(dev);
1993 case ETH_MQ_RX_VMDQ_ONLY:
1994 /*Configure general VMDQ only RX parameters*/
1995 igb_vmdq_rx_hw_configure(dev);
1997 case ETH_MQ_RX_NONE:
1998 /* if mq_mode is none, disable rss mode.*/
2000 igb_rss_disable(dev);
2009 eth_igb_rx_init(struct rte_eth_dev *dev)
2011 struct e1000_hw *hw;
2012 struct igb_rx_queue *rxq;
2017 uint16_t rctl_bsize;
2021 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025 * Make sure receives are disabled while setting
2026 * up the descriptor ring.
2028 rctl = E1000_READ_REG(hw, E1000_RCTL);
2029 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2032 * Configure support of jumbo frames, if any.
2034 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2035 rctl |= E1000_RCTL_LPE;
2038 * Set maximum packet length by default, and might be updated
2039 * together with enabling/disabling dual VLAN.
2041 E1000_WRITE_REG(hw, E1000_RLPML,
2042 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2045 rctl &= ~E1000_RCTL_LPE;
2047 /* Configure and enable each RX queue. */
2049 dev->rx_pkt_burst = eth_igb_recv_pkts;
2050 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2054 rxq = dev->data->rx_queues[i];
2056 /* Allocate buffers for descriptor rings and set up queue */
2057 ret = igb_alloc_rx_queue_mbufs(rxq);
2062 * Reset crc_len in case it was changed after queue setup by a
2066 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2069 bus_addr = rxq->rx_ring_phys_addr;
2070 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2072 sizeof(union e1000_adv_rx_desc));
2073 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2074 (uint32_t)(bus_addr >> 32));
2075 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2077 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2080 * Configure RX buffer size.
2082 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2083 RTE_PKTMBUF_HEADROOM);
2084 if (buf_size >= 1024) {
2086 * Configure the BSIZEPACKET field of the SRRCTL
2087 * register of the queue.
2088 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2089 * If this field is equal to 0b, then RCTL.BSIZE
2090 * determines the RX packet buffer size.
2092 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2093 E1000_SRRCTL_BSIZEPKT_MASK);
2094 buf_size = (uint16_t) ((srrctl &
2095 E1000_SRRCTL_BSIZEPKT_MASK) <<
2096 E1000_SRRCTL_BSIZEPKT_SHIFT);
2098 /* It adds dual VLAN length for supporting dual VLAN */
2099 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2100 2 * VLAN_TAG_SIZE) > buf_size){
2101 if (!dev->data->scattered_rx)
2103 "forcing scatter mode");
2104 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2105 dev->data->scattered_rx = 1;
2109 * Use BSIZE field of the device RCTL register.
2111 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2112 rctl_bsize = buf_size;
2113 if (!dev->data->scattered_rx)
2114 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2115 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2116 dev->data->scattered_rx = 1;
2119 /* Set if packets are dropped when no descriptors available */
2121 srrctl |= E1000_SRRCTL_DROP_EN;
2123 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2125 /* Enable this RX queue. */
2126 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2127 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2128 rxdctl &= 0xFFF00000;
2129 rxdctl |= (rxq->pthresh & 0x1F);
2130 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2131 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2132 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2135 if (dev->data->dev_conf.rxmode.enable_scatter) {
2136 if (!dev->data->scattered_rx)
2137 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2138 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2139 dev->data->scattered_rx = 1;
2143 * Setup BSIZE field of RCTL register, if needed.
2144 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2145 * register, since the code above configures the SRRCTL register of
2146 * the RX queue in such a case.
2147 * All configurable sizes are:
2148 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2149 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2150 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2151 * 2048: rctl |= E1000_RCTL_SZ_2048;
2152 * 1024: rctl |= E1000_RCTL_SZ_1024;
2153 * 512: rctl |= E1000_RCTL_SZ_512;
2154 * 256: rctl |= E1000_RCTL_SZ_256;
2156 if (rctl_bsize > 0) {
2157 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2158 rctl |= E1000_RCTL_SZ_512;
2159 else /* 256 <= buf_size < 512 - use 256 */
2160 rctl |= E1000_RCTL_SZ_256;
2164 * Configure RSS if device configured with multiple RX queues.
2166 igb_dev_mq_rx_configure(dev);
2168 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2169 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2172 * Setup the Checksum Register.
2173 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2175 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2176 rxcsum |= E1000_RXCSUM_PCSD;
2178 /* Enable both L3/L4 rx checksum offload */
2179 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2180 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2182 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2183 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2185 /* Setup the Receive Control Register. */
2186 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2187 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2189 /* set STRCRC bit in all queues */
2190 if (hw->mac.type == e1000_i350 ||
2191 hw->mac.type == e1000_i210 ||
2192 hw->mac.type == e1000_i211 ||
2193 hw->mac.type == e1000_i354) {
2194 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2195 rxq = dev->data->rx_queues[i];
2196 uint32_t dvmolr = E1000_READ_REG(hw,
2197 E1000_DVMOLR(rxq->reg_idx));
2198 dvmolr |= E1000_DVMOLR_STRCRC;
2199 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2203 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2205 /* clear STRCRC bit in all queues */
2206 if (hw->mac.type == e1000_i350 ||
2207 hw->mac.type == e1000_i210 ||
2208 hw->mac.type == e1000_i211 ||
2209 hw->mac.type == e1000_i354) {
2210 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2211 rxq = dev->data->rx_queues[i];
2212 uint32_t dvmolr = E1000_READ_REG(hw,
2213 E1000_DVMOLR(rxq->reg_idx));
2214 dvmolr &= ~E1000_DVMOLR_STRCRC;
2215 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2220 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2221 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2222 E1000_RCTL_RDMTS_HALF |
2223 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2225 /* Make sure VLAN Filters are off. */
2226 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2227 rctl &= ~E1000_RCTL_VFE;
2228 /* Don't store bad packets. */
2229 rctl &= ~E1000_RCTL_SBP;
2231 /* Enable Receives. */
2232 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2235 * Setup the HW Rx Head and Tail Descriptor Pointers.
2236 * This needs to be done after enable.
2238 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2239 rxq = dev->data->rx_queues[i];
2240 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2241 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2247 /*********************************************************************
2249 * Enable transmit unit.
2251 **********************************************************************/
2253 eth_igb_tx_init(struct rte_eth_dev *dev)
2255 struct e1000_hw *hw;
2256 struct igb_tx_queue *txq;
2261 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2263 /* Setup the Base and Length of the Tx Descriptor Rings. */
2264 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2266 txq = dev->data->tx_queues[i];
2267 bus_addr = txq->tx_ring_phys_addr;
2269 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2271 sizeof(union e1000_adv_tx_desc));
2272 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2273 (uint32_t)(bus_addr >> 32));
2274 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2276 /* Setup the HW Tx Head and Tail descriptor pointers. */
2277 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2278 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2280 /* Setup Transmit threshold registers. */
2281 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2282 txdctl |= txq->pthresh & 0x1F;
2283 txdctl |= ((txq->hthresh & 0x1F) << 8);
2284 txdctl |= ((txq->wthresh & 0x1F) << 16);
2285 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2286 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2289 /* Program the Transmit Control Register. */
2290 tctl = E1000_READ_REG(hw, E1000_TCTL);
2291 tctl &= ~E1000_TCTL_CT;
2292 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2293 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2295 e1000_config_collision_dist(hw);
2297 /* This write will effectively turn on the transmit unit. */
2298 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2301 /*********************************************************************
2303 * Enable VF receive unit.
2305 **********************************************************************/
2307 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2309 struct e1000_hw *hw;
2310 struct igb_rx_queue *rxq;
2313 uint16_t rctl_bsize;
2317 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2320 e1000_rlpml_set_vf(hw,
2321 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2324 /* Configure and enable each RX queue. */
2326 dev->rx_pkt_burst = eth_igb_recv_pkts;
2327 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2331 rxq = dev->data->rx_queues[i];
2333 /* Allocate buffers for descriptor rings and set up queue */
2334 ret = igb_alloc_rx_queue_mbufs(rxq);
2338 bus_addr = rxq->rx_ring_phys_addr;
2339 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2341 sizeof(union e1000_adv_rx_desc));
2342 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2343 (uint32_t)(bus_addr >> 32));
2344 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2346 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2349 * Configure RX buffer size.
2351 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2352 RTE_PKTMBUF_HEADROOM);
2353 if (buf_size >= 1024) {
2355 * Configure the BSIZEPACKET field of the SRRCTL
2356 * register of the queue.
2357 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2358 * If this field is equal to 0b, then RCTL.BSIZE
2359 * determines the RX packet buffer size.
2361 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2362 E1000_SRRCTL_BSIZEPKT_MASK);
2363 buf_size = (uint16_t) ((srrctl &
2364 E1000_SRRCTL_BSIZEPKT_MASK) <<
2365 E1000_SRRCTL_BSIZEPKT_SHIFT);
2367 /* It adds dual VLAN length for supporting dual VLAN */
2368 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2369 2 * VLAN_TAG_SIZE) > buf_size){
2370 if (!dev->data->scattered_rx)
2372 "forcing scatter mode");
2373 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2374 dev->data->scattered_rx = 1;
2378 * Use BSIZE field of the device RCTL register.
2380 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2381 rctl_bsize = buf_size;
2382 if (!dev->data->scattered_rx)
2383 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2384 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2385 dev->data->scattered_rx = 1;
2388 /* Set if packets are dropped when no descriptors available */
2390 srrctl |= E1000_SRRCTL_DROP_EN;
2392 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2394 /* Enable this RX queue. */
2395 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2396 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2397 rxdctl &= 0xFFF00000;
2398 rxdctl |= (rxq->pthresh & 0x1F);
2399 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2400 if (hw->mac.type == e1000_vfadapt) {
2402 * Workaround of 82576 VF Erratum
2403 * force set WTHRESH to 1
2404 * to avoid Write-Back not triggered sometimes
2407 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2410 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2411 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2414 if (dev->data->dev_conf.rxmode.enable_scatter) {
2415 if (!dev->data->scattered_rx)
2416 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2417 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2418 dev->data->scattered_rx = 1;
2422 * Setup the HW Rx Head and Tail Descriptor Pointers.
2423 * This needs to be done after enable.
2425 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2426 rxq = dev->data->rx_queues[i];
2427 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2428 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2434 /*********************************************************************
2436 * Enable VF transmit unit.
2438 **********************************************************************/
2440 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2442 struct e1000_hw *hw;
2443 struct igb_tx_queue *txq;
2447 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2449 /* Setup the Base and Length of the Tx Descriptor Rings. */
2450 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2453 txq = dev->data->tx_queues[i];
2454 bus_addr = txq->tx_ring_phys_addr;
2455 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2457 sizeof(union e1000_adv_tx_desc));
2458 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2459 (uint32_t)(bus_addr >> 32));
2460 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2462 /* Setup the HW Tx Head and Tail descriptor pointers. */
2463 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2464 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2466 /* Setup Transmit threshold registers. */
2467 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2468 txdctl |= txq->pthresh & 0x1F;
2469 txdctl |= ((txq->hthresh & 0x1F) << 8);
2470 if (hw->mac.type == e1000_82576) {
2472 * Workaround of 82576 VF Erratum
2473 * force set WTHRESH to 1
2474 * to avoid Write-Back not triggered sometimes
2477 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2480 txdctl |= ((txq->wthresh & 0x1F) << 16);
2481 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2482 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2488 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2489 struct rte_eth_rxq_info *qinfo)
2491 struct igb_rx_queue *rxq;
2493 rxq = dev->data->rx_queues[queue_id];
2495 qinfo->mp = rxq->mb_pool;
2496 qinfo->scattered_rx = dev->data->scattered_rx;
2497 qinfo->nb_desc = rxq->nb_rx_desc;
2499 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2500 qinfo->conf.rx_drop_en = rxq->drop_en;
2504 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2505 struct rte_eth_txq_info *qinfo)
2507 struct igb_tx_queue *txq;
2509 txq = dev->data->tx_queues[queue_id];
2511 qinfo->nb_desc = txq->nb_tx_desc;
2513 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2514 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2515 qinfo->conf.tx_thresh.wthresh = txq->wthresh;