4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
87 m = __rte_mbuf_raw_alloc(mp);
88 __rte_mbuf_sanity_check_raw(m, 0);
93 * Structure associated with each descriptor of the RX ring of a RX queue.
96 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
100 * Structure associated with each descriptor of the TX ring of a TX queue.
102 struct igb_tx_entry {
103 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
104 uint16_t next_id; /**< Index of next descriptor in ring. */
105 uint16_t last_id; /**< Index of last scattered descriptor. */
109 * Structure associated with each RX queue.
111 struct igb_rx_queue {
112 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
113 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
114 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
115 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
116 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
117 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
118 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
119 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
120 uint16_t nb_rx_desc; /**< number of RX descriptors. */
121 uint16_t rx_tail; /**< current value of RDT register. */
122 uint16_t nb_rx_hold; /**< number of held free RX desc. */
123 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
124 uint16_t queue_id; /**< RX queue index. */
125 uint16_t reg_idx; /**< RX queue register index. */
126 uint8_t port_id; /**< Device port identifier. */
127 uint8_t pthresh; /**< Prefetch threshold register. */
128 uint8_t hthresh; /**< Host threshold register. */
129 uint8_t wthresh; /**< Write-back threshold register. */
130 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
131 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
135 * Hardware context number
137 enum igb_advctx_num {
138 IGB_CTX_0 = 0, /**< CTX0 */
139 IGB_CTX_1 = 1, /**< CTX1 */
140 IGB_CTX_NUM = 2, /**< CTX_NUM */
143 /** Offload features */
144 union igb_tx_offload {
147 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
148 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
149 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
150 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
151 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
153 /* uint64_t unused:8; */
158 * Compare mask for igb_tx_offload.data,
159 * should be in sync with igb_tx_offload layout.
161 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
162 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
163 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
164 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
165 /** Mac + IP + TCP + Mss mask. */
166 #define TX_TSO_CMP_MASK \
167 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
170 * Strucutre to check if new context need be built
172 struct igb_advctx_info {
173 uint64_t flags; /**< ol_flags related to context build. */
174 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
175 union igb_tx_offload tx_offload;
176 /** compare mask for tx offload. */
177 union igb_tx_offload tx_offload_mask;
181 * Structure associated with each TX queue.
183 struct igb_tx_queue {
184 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
185 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
186 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
187 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
188 uint32_t txd_type; /**< Device-specific TXD type */
189 uint16_t nb_tx_desc; /**< number of TX descriptors. */
190 uint16_t tx_tail; /**< Current value of TDT register. */
192 /**< Index of first used TX descriptor. */
193 uint16_t queue_id; /**< TX queue index. */
194 uint16_t reg_idx; /**< TX queue register index. */
195 uint8_t port_id; /**< Device port identifier. */
196 uint8_t pthresh; /**< Prefetch threshold register. */
197 uint8_t hthresh; /**< Host threshold register. */
198 uint8_t wthresh; /**< Write-back threshold register. */
200 /**< Current used hardware descriptor. */
202 /**< Start context position for transmit queue. */
203 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
204 /**< Hardware context history.*/
208 #define RTE_PMD_USE_PREFETCH
211 #ifdef RTE_PMD_USE_PREFETCH
212 #define rte_igb_prefetch(p) rte_prefetch0(p)
214 #define rte_igb_prefetch(p) do {} while(0)
217 #ifdef RTE_PMD_PACKET_PREFETCH
218 #define rte_packet_prefetch(p) rte_prefetch1(p)
220 #define rte_packet_prefetch(p) do {} while(0)
224 * Macro for VMDq feature for 1 GbE NIC.
226 #define E1000_VMOLR_SIZE (8)
227 #define IGB_TSO_MAX_HDRLEN (512)
228 #define IGB_TSO_MAX_MSS (9216)
230 /*********************************************************************
234 **********************************************************************/
237 *There're some limitations in hardware for TCP segmentation offload. We
238 *should check whether the parameters are valid.
240 static inline uint64_t
241 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
243 if (!(ol_req & PKT_TX_TCP_SEG))
245 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
246 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
247 ol_req &= ~PKT_TX_TCP_SEG;
248 ol_req |= PKT_TX_TCP_CKSUM;
254 * Advanced context descriptor are almost same between igb/ixgbe
255 * This is a separate function, looking for optimization opportunity here
256 * Rework required to go with the pre-defined values.
260 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
261 volatile struct e1000_adv_tx_context_desc *ctx_txd,
262 uint64_t ol_flags, union igb_tx_offload tx_offload)
264 uint32_t type_tucmd_mlhl;
265 uint32_t mss_l4len_idx;
266 uint32_t ctx_idx, ctx_curr;
267 uint32_t vlan_macip_lens;
268 union igb_tx_offload tx_offload_mask;
270 ctx_curr = txq->ctx_curr;
271 ctx_idx = ctx_curr + txq->ctx_start;
273 tx_offload_mask.data = 0;
276 /* Specify which HW CTX to upload. */
277 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
279 if (ol_flags & PKT_TX_VLAN_PKT)
280 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
282 /* check if TCP segmentation required for this packet */
283 if (ol_flags & PKT_TX_TCP_SEG) {
284 /* implies IP cksum in IPv4 */
285 if (ol_flags & PKT_TX_IP_CKSUM)
286 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
287 E1000_ADVTXD_TUCMD_L4T_TCP |
288 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
290 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
291 E1000_ADVTXD_TUCMD_L4T_TCP |
292 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
294 tx_offload_mask.data |= TX_TSO_CMP_MASK;
295 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
296 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
297 } else { /* no TSO, check if hardware checksum is needed */
298 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
299 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
301 if (ol_flags & PKT_TX_IP_CKSUM)
302 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
304 switch (ol_flags & PKT_TX_L4_MASK) {
305 case PKT_TX_UDP_CKSUM:
306 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
307 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
308 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
310 case PKT_TX_TCP_CKSUM:
311 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
312 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
313 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
315 case PKT_TX_SCTP_CKSUM:
316 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
317 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
318 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
321 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
322 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
327 txq->ctx_cache[ctx_curr].flags = ol_flags;
328 txq->ctx_cache[ctx_idx].tx_offload.data =
329 tx_offload_mask.data & tx_offload.data;
330 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
332 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
333 vlan_macip_lens = (uint32_t)tx_offload.data;
334 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
335 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
336 ctx_txd->seqnum_seed = 0;
340 * Check which hardware context can be used. Use the existing match
341 * or create a new context descriptor.
343 static inline uint32_t
344 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
345 union igb_tx_offload tx_offload)
347 /* If match with the current context */
348 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
349 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
350 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
351 return txq->ctx_curr;
354 /* If match with the second context */
356 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
357 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
358 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
359 return txq->ctx_curr;
362 /* Mismatch, use the previous context */
366 static inline uint32_t
367 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
369 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
370 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
373 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
374 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
375 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
379 static inline uint32_t
380 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
383 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
384 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
385 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
386 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
391 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
394 struct igb_tx_queue *txq;
395 struct igb_tx_entry *sw_ring;
396 struct igb_tx_entry *txe, *txn;
397 volatile union e1000_adv_tx_desc *txr;
398 volatile union e1000_adv_tx_desc *txd;
399 struct rte_mbuf *tx_pkt;
400 struct rte_mbuf *m_seg;
401 uint64_t buf_dma_addr;
402 uint32_t olinfo_status;
403 uint32_t cmd_type_len;
412 uint32_t new_ctx = 0;
414 union igb_tx_offload tx_offload = {0};
417 sw_ring = txq->sw_ring;
419 tx_id = txq->tx_tail;
420 txe = &sw_ring[tx_id];
422 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
424 pkt_len = tx_pkt->pkt_len;
426 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
429 * The number of descriptors that must be allocated for a
430 * packet is the number of segments of that packet, plus 1
431 * Context Descriptor for the VLAN Tag Identifier, if any.
432 * Determine the last TX descriptor to allocate in the TX ring
433 * for the packet, starting from the current position (tx_id)
436 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
438 ol_flags = tx_pkt->ol_flags;
439 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
441 /* If a Context Descriptor need be built . */
443 tx_offload.l2_len = tx_pkt->l2_len;
444 tx_offload.l3_len = tx_pkt->l3_len;
445 tx_offload.l4_len = tx_pkt->l4_len;
446 tx_offload.vlan_tci = tx_pkt->vlan_tci;
447 tx_offload.tso_segsz = tx_pkt->tso_segsz;
448 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
450 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
451 /* Only allocate context descriptor if required*/
452 new_ctx = (ctx == IGB_CTX_NUM);
454 tx_last = (uint16_t) (tx_last + new_ctx);
456 if (tx_last >= txq->nb_tx_desc)
457 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
459 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
460 " tx_first=%u tx_last=%u",
461 (unsigned) txq->port_id,
462 (unsigned) txq->queue_id,
468 * Check if there are enough free descriptors in the TX ring
469 * to transmit the next packet.
470 * This operation is based on the two following rules:
472 * 1- Only check that the last needed TX descriptor can be
473 * allocated (by construction, if that descriptor is free,
474 * all intermediate ones are also free).
476 * For this purpose, the index of the last TX descriptor
477 * used for a packet (the "last descriptor" of a packet)
478 * is recorded in the TX entries (the last one included)
479 * that are associated with all TX descriptors allocated
482 * 2- Avoid to allocate the last free TX descriptor of the
483 * ring, in order to never set the TDT register with the
484 * same value stored in parallel by the NIC in the TDH
485 * register, which makes the TX engine of the NIC enter
486 * in a deadlock situation.
488 * By extension, avoid to allocate a free descriptor that
489 * belongs to the last set of free descriptors allocated
490 * to the same packet previously transmitted.
494 * The "last descriptor" of the previously sent packet, if any,
495 * which used the last descriptor to allocate.
497 tx_end = sw_ring[tx_last].last_id;
500 * The next descriptor following that "last descriptor" in the
503 tx_end = sw_ring[tx_end].next_id;
506 * The "last descriptor" associated with that next descriptor.
508 tx_end = sw_ring[tx_end].last_id;
511 * Check that this descriptor is free.
513 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
520 * Set common flags of all TX Data Descriptors.
522 * The following bits must be set in all Data Descriptors:
523 * - E1000_ADVTXD_DTYP_DATA
524 * - E1000_ADVTXD_DCMD_DEXT
526 * The following bits must be set in the first Data Descriptor
527 * and are ignored in the other ones:
528 * - E1000_ADVTXD_DCMD_IFCS
529 * - E1000_ADVTXD_MAC_1588
530 * - E1000_ADVTXD_DCMD_VLE
532 * The following bits must only be set in the last Data
534 * - E1000_TXD_CMD_EOP
536 * The following bits can be set in any Data Descriptor, but
537 * are only set in the last Data Descriptor:
540 cmd_type_len = txq->txd_type |
541 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
542 if (tx_ol_req & PKT_TX_TCP_SEG)
543 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
544 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
545 #if defined(RTE_LIBRTE_IEEE1588)
546 if (ol_flags & PKT_TX_IEEE1588_TMST)
547 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
550 /* Setup TX Advanced context descriptor if required */
552 volatile struct e1000_adv_tx_context_desc *
555 ctx_txd = (volatile struct
556 e1000_adv_tx_context_desc *)
559 txn = &sw_ring[txe->next_id];
560 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
562 if (txe->mbuf != NULL) {
563 rte_pktmbuf_free_seg(txe->mbuf);
567 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
569 txe->last_id = tx_last;
570 tx_id = txe->next_id;
574 /* Setup the TX Advanced Data Descriptor */
575 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
576 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
577 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
582 txn = &sw_ring[txe->next_id];
585 if (txe->mbuf != NULL)
586 rte_pktmbuf_free_seg(txe->mbuf);
590 * Set up transmit descriptor.
592 slen = (uint16_t) m_seg->data_len;
593 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
594 txd->read.buffer_addr =
595 rte_cpu_to_le_64(buf_dma_addr);
596 txd->read.cmd_type_len =
597 rte_cpu_to_le_32(cmd_type_len | slen);
598 txd->read.olinfo_status =
599 rte_cpu_to_le_32(olinfo_status);
600 txe->last_id = tx_last;
601 tx_id = txe->next_id;
604 } while (m_seg != NULL);
607 * The last packet data descriptor needs End Of Packet (EOP)
608 * and Report Status (RS).
610 txd->read.cmd_type_len |=
611 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
617 * Set the Transmit Descriptor Tail (TDT).
619 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
620 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
621 (unsigned) txq->port_id, (unsigned) txq->queue_id,
622 (unsigned) tx_id, (unsigned) nb_tx);
623 txq->tx_tail = tx_id;
628 /*********************************************************************
632 **********************************************************************/
633 #define IGB_PACKET_TYPE_IPV4 0X01
634 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
635 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
636 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
637 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
638 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
639 #define IGB_PACKET_TYPE_IPV6 0X04
640 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
641 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
642 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
643 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
644 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
645 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
646 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
647 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
648 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
649 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
650 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
651 #define IGB_PACKET_TYPE_MAX 0X80
652 #define IGB_PACKET_TYPE_MASK 0X7F
653 #define IGB_PACKET_TYPE_SHIFT 0X04
654 static inline uint32_t
655 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
657 static const uint32_t
658 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
659 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
661 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
662 RTE_PTYPE_L3_IPV4_EXT,
663 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
665 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
666 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
667 RTE_PTYPE_INNER_L3_IPV6,
668 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
669 RTE_PTYPE_L3_IPV6_EXT,
670 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
671 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
672 RTE_PTYPE_INNER_L3_IPV6_EXT,
673 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
674 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
675 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
676 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
677 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
678 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
679 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
680 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
681 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
682 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
683 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
684 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
685 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
686 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
687 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
688 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
689 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
690 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
691 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
692 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
693 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
694 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
695 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
696 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
697 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
698 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
699 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
700 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
702 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
703 return RTE_PTYPE_UNKNOWN;
705 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
707 return ptype_table[pkt_info];
710 static inline uint64_t
711 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
713 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
715 #if defined(RTE_LIBRTE_IEEE1588)
716 static uint32_t ip_pkt_etqf_map[8] = {
717 0, 0, 0, PKT_RX_IEEE1588_PTP,
721 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
722 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
724 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
725 if (hw->mac.type == e1000_i210)
726 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
728 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
736 static inline uint64_t
737 rx_desc_status_to_pkt_flags(uint32_t rx_status)
741 /* Check if VLAN present */
742 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
744 #if defined(RTE_LIBRTE_IEEE1588)
745 if (rx_status & E1000_RXD_STAT_TMST)
746 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
751 static inline uint64_t
752 rx_desc_error_to_pkt_flags(uint32_t rx_status)
755 * Bit 30: IPE, IPv4 checksum error
756 * Bit 29: L4I, L4I integrity error
759 static uint64_t error_to_pkt_flags_map[4] = {
760 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
761 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
763 return error_to_pkt_flags_map[(rx_status >>
764 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
768 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
771 struct igb_rx_queue *rxq;
772 volatile union e1000_adv_rx_desc *rx_ring;
773 volatile union e1000_adv_rx_desc *rxdp;
774 struct igb_rx_entry *sw_ring;
775 struct igb_rx_entry *rxe;
776 struct rte_mbuf *rxm;
777 struct rte_mbuf *nmb;
778 union e1000_adv_rx_desc rxd;
781 uint32_t hlen_type_rss;
791 rx_id = rxq->rx_tail;
792 rx_ring = rxq->rx_ring;
793 sw_ring = rxq->sw_ring;
794 while (nb_rx < nb_pkts) {
796 * The order of operations here is important as the DD status
797 * bit must not be read after any other descriptor fields.
798 * rx_ring and rxdp are pointing to volatile data so the order
799 * of accesses cannot be reordered by the compiler. If they were
800 * not volatile, they could be reordered which could lead to
801 * using invalid descriptor fields when read from rxd.
803 rxdp = &rx_ring[rx_id];
804 staterr = rxdp->wb.upper.status_error;
805 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
812 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
813 * likely to be invalid and to be dropped by the various
814 * validation checks performed by the network stack.
816 * Allocate a new mbuf to replenish the RX ring descriptor.
817 * If the allocation fails:
818 * - arrange for that RX descriptor to be the first one
819 * being parsed the next time the receive function is
820 * invoked [on the same queue].
822 * - Stop parsing the RX ring and return immediately.
824 * This policy do not drop the packet received in the RX
825 * descriptor for which the allocation of a new mbuf failed.
826 * Thus, it allows that packet to be later retrieved if
827 * mbuf have been freed in the mean time.
828 * As a side effect, holding RX descriptors instead of
829 * systematically giving them back to the NIC may lead to
830 * RX ring exhaustion situations.
831 * However, the NIC can gracefully prevent such situations
832 * to happen by sending specific "back-pressure" flow control
833 * frames to its peer(s).
835 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
836 "staterr=0x%x pkt_len=%u",
837 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
838 (unsigned) rx_id, (unsigned) staterr,
839 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
841 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
843 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
844 "queue_id=%u", (unsigned) rxq->port_id,
845 (unsigned) rxq->queue_id);
846 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
851 rxe = &sw_ring[rx_id];
853 if (rx_id == rxq->nb_rx_desc)
856 /* Prefetch next mbuf while processing current one. */
857 rte_igb_prefetch(sw_ring[rx_id].mbuf);
860 * When next RX descriptor is on a cache-line boundary,
861 * prefetch the next 4 RX descriptors and the next 8 pointers
864 if ((rx_id & 0x3) == 0) {
865 rte_igb_prefetch(&rx_ring[rx_id]);
866 rte_igb_prefetch(&sw_ring[rx_id]);
872 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
873 rxdp->read.hdr_addr = 0;
874 rxdp->read.pkt_addr = dma_addr;
877 * Initialize the returned mbuf.
878 * 1) setup generic mbuf fields:
879 * - number of segments,
882 * - RX port identifier.
883 * 2) integrate hardware offload data, if any:
885 * - IP checksum flag,
886 * - VLAN TCI, if any,
889 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
891 rxm->data_off = RTE_PKTMBUF_HEADROOM;
892 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
895 rxm->pkt_len = pkt_len;
896 rxm->data_len = pkt_len;
897 rxm->port = rxq->port_id;
899 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
900 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
901 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
902 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
904 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
905 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
906 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
907 rxm->ol_flags = pkt_flags;
908 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
909 lo_dword.hs_rss.pkt_info);
912 * Store the mbuf address into the next entry of the array
913 * of returned packets.
915 rx_pkts[nb_rx++] = rxm;
917 rxq->rx_tail = rx_id;
920 * If the number of free RX descriptors is greater than the RX free
921 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
923 * Update the RDT with the value of the last processed RX descriptor
924 * minus 1, to guarantee that the RDT register is never equal to the
925 * RDH register, which creates a "full" ring situtation from the
926 * hardware point of view...
928 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
929 if (nb_hold > rxq->rx_free_thresh) {
930 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
931 "nb_hold=%u nb_rx=%u",
932 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
933 (unsigned) rx_id, (unsigned) nb_hold,
935 rx_id = (uint16_t) ((rx_id == 0) ?
936 (rxq->nb_rx_desc - 1) : (rx_id - 1));
937 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
940 rxq->nb_rx_hold = nb_hold;
945 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
948 struct igb_rx_queue *rxq;
949 volatile union e1000_adv_rx_desc *rx_ring;
950 volatile union e1000_adv_rx_desc *rxdp;
951 struct igb_rx_entry *sw_ring;
952 struct igb_rx_entry *rxe;
953 struct rte_mbuf *first_seg;
954 struct rte_mbuf *last_seg;
955 struct rte_mbuf *rxm;
956 struct rte_mbuf *nmb;
957 union e1000_adv_rx_desc rxd;
958 uint64_t dma; /* Physical address of mbuf data buffer */
960 uint32_t hlen_type_rss;
970 rx_id = rxq->rx_tail;
971 rx_ring = rxq->rx_ring;
972 sw_ring = rxq->sw_ring;
975 * Retrieve RX context of current packet, if any.
977 first_seg = rxq->pkt_first_seg;
978 last_seg = rxq->pkt_last_seg;
980 while (nb_rx < nb_pkts) {
983 * The order of operations here is important as the DD status
984 * bit must not be read after any other descriptor fields.
985 * rx_ring and rxdp are pointing to volatile data so the order
986 * of accesses cannot be reordered by the compiler. If they were
987 * not volatile, they could be reordered which could lead to
988 * using invalid descriptor fields when read from rxd.
990 rxdp = &rx_ring[rx_id];
991 staterr = rxdp->wb.upper.status_error;
992 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
999 * Allocate a new mbuf to replenish the RX ring descriptor.
1000 * If the allocation fails:
1001 * - arrange for that RX descriptor to be the first one
1002 * being parsed the next time the receive function is
1003 * invoked [on the same queue].
1005 * - Stop parsing the RX ring and return immediately.
1007 * This policy does not drop the packet received in the RX
1008 * descriptor for which the allocation of a new mbuf failed.
1009 * Thus, it allows that packet to be later retrieved if
1010 * mbuf have been freed in the mean time.
1011 * As a side effect, holding RX descriptors instead of
1012 * systematically giving them back to the NIC may lead to
1013 * RX ring exhaustion situations.
1014 * However, the NIC can gracefully prevent such situations
1015 * to happen by sending specific "back-pressure" flow control
1016 * frames to its peer(s).
1018 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1019 "staterr=0x%x data_len=%u",
1020 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1021 (unsigned) rx_id, (unsigned) staterr,
1022 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1024 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1026 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1027 "queue_id=%u", (unsigned) rxq->port_id,
1028 (unsigned) rxq->queue_id);
1029 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1034 rxe = &sw_ring[rx_id];
1036 if (rx_id == rxq->nb_rx_desc)
1039 /* Prefetch next mbuf while processing current one. */
1040 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1043 * When next RX descriptor is on a cache-line boundary,
1044 * prefetch the next 4 RX descriptors and the next 8 pointers
1047 if ((rx_id & 0x3) == 0) {
1048 rte_igb_prefetch(&rx_ring[rx_id]);
1049 rte_igb_prefetch(&sw_ring[rx_id]);
1053 * Update RX descriptor with the physical address of the new
1054 * data buffer of the new allocated mbuf.
1058 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1059 rxdp->read.pkt_addr = dma;
1060 rxdp->read.hdr_addr = 0;
1063 * Set data length & data buffer address of mbuf.
1065 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1066 rxm->data_len = data_len;
1067 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1070 * If this is the first buffer of the received packet,
1071 * set the pointer to the first mbuf of the packet and
1072 * initialize its context.
1073 * Otherwise, update the total length and the number of segments
1074 * of the current scattered packet, and update the pointer to
1075 * the last mbuf of the current packet.
1077 if (first_seg == NULL) {
1079 first_seg->pkt_len = data_len;
1080 first_seg->nb_segs = 1;
1082 first_seg->pkt_len += data_len;
1083 first_seg->nb_segs++;
1084 last_seg->next = rxm;
1088 * If this is not the last buffer of the received packet,
1089 * update the pointer to the last mbuf of the current scattered
1090 * packet and continue to parse the RX ring.
1092 if (! (staterr & E1000_RXD_STAT_EOP)) {
1098 * This is the last buffer of the received packet.
1099 * If the CRC is not stripped by the hardware:
1100 * - Subtract the CRC length from the total packet length.
1101 * - If the last buffer only contains the whole CRC or a part
1102 * of it, free the mbuf associated to the last buffer.
1103 * If part of the CRC is also contained in the previous
1104 * mbuf, subtract the length of that CRC part from the
1105 * data length of the previous mbuf.
1108 if (unlikely(rxq->crc_len > 0)) {
1109 first_seg->pkt_len -= ETHER_CRC_LEN;
1110 if (data_len <= ETHER_CRC_LEN) {
1111 rte_pktmbuf_free_seg(rxm);
1112 first_seg->nb_segs--;
1113 last_seg->data_len = (uint16_t)
1114 (last_seg->data_len -
1115 (ETHER_CRC_LEN - data_len));
1116 last_seg->next = NULL;
1119 (uint16_t) (data_len - ETHER_CRC_LEN);
1123 * Initialize the first mbuf of the returned packet:
1124 * - RX port identifier,
1125 * - hardware offload data, if any:
1126 * - RSS flag & hash,
1127 * - IP checksum flag,
1128 * - VLAN TCI, if any,
1131 first_seg->port = rxq->port_id;
1132 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1135 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1136 * set in the pkt_flags field.
1138 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1139 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1140 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1141 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1142 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1143 first_seg->ol_flags = pkt_flags;
1144 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1145 lower.lo_dword.hs_rss.pkt_info);
1147 /* Prefetch data of first segment, if configured to do so. */
1148 rte_packet_prefetch((char *)first_seg->buf_addr +
1149 first_seg->data_off);
1152 * Store the mbuf address into the next entry of the array
1153 * of returned packets.
1155 rx_pkts[nb_rx++] = first_seg;
1158 * Setup receipt context for a new packet.
1164 * Record index of the next RX descriptor to probe.
1166 rxq->rx_tail = rx_id;
1169 * Save receive context.
1171 rxq->pkt_first_seg = first_seg;
1172 rxq->pkt_last_seg = last_seg;
1175 * If the number of free RX descriptors is greater than the RX free
1176 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1178 * Update the RDT with the value of the last processed RX descriptor
1179 * minus 1, to guarantee that the RDT register is never equal to the
1180 * RDH register, which creates a "full" ring situtation from the
1181 * hardware point of view...
1183 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1184 if (nb_hold > rxq->rx_free_thresh) {
1185 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1186 "nb_hold=%u nb_rx=%u",
1187 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1188 (unsigned) rx_id, (unsigned) nb_hold,
1190 rx_id = (uint16_t) ((rx_id == 0) ?
1191 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1192 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1195 rxq->nb_rx_hold = nb_hold;
1200 * Maximum number of Ring Descriptors.
1202 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1203 * desscriptors should meet the following condition:
1204 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1208 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1212 if (txq->sw_ring != NULL) {
1213 for (i = 0; i < txq->nb_tx_desc; i++) {
1214 if (txq->sw_ring[i].mbuf != NULL) {
1215 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1216 txq->sw_ring[i].mbuf = NULL;
1223 igb_tx_queue_release(struct igb_tx_queue *txq)
1226 igb_tx_queue_release_mbufs(txq);
1227 rte_free(txq->sw_ring);
1233 eth_igb_tx_queue_release(void *txq)
1235 igb_tx_queue_release(txq);
1239 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1244 memset((void*)&txq->ctx_cache, 0,
1245 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1249 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1251 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1252 struct igb_tx_entry *txe = txq->sw_ring;
1254 struct e1000_hw *hw;
1256 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1257 /* Zero out HW ring memory */
1258 for (i = 0; i < txq->nb_tx_desc; i++) {
1259 txq->tx_ring[i] = zeroed_desc;
1262 /* Initialize ring entries */
1263 prev = (uint16_t)(txq->nb_tx_desc - 1);
1264 for (i = 0; i < txq->nb_tx_desc; i++) {
1265 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1267 txd->wb.status = E1000_TXD_STAT_DD;
1270 txe[prev].next_id = i;
1274 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1275 /* 82575 specific, each tx queue will use 2 hw contexts */
1276 if (hw->mac.type == e1000_82575)
1277 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1279 igb_reset_tx_queue_stat(txq);
1283 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1286 unsigned int socket_id,
1287 const struct rte_eth_txconf *tx_conf)
1289 const struct rte_memzone *tz;
1290 struct igb_tx_queue *txq;
1291 struct e1000_hw *hw;
1294 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1297 * Validate number of transmit descriptors.
1298 * It must not exceed hardware maximum, and must be multiple
1301 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1302 (nb_desc > E1000_MAX_RING_DESC) ||
1303 (nb_desc < E1000_MIN_RING_DESC)) {
1308 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1311 if (tx_conf->tx_free_thresh != 0)
1312 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1313 "used for the 1G driver.");
1314 if (tx_conf->tx_rs_thresh != 0)
1315 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1316 "used for the 1G driver.");
1317 if (tx_conf->tx_thresh.wthresh == 0)
1318 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1319 "consider setting the TX WTHRESH value to 4, 8, "
1322 /* Free memory prior to re-allocation if needed */
1323 if (dev->data->tx_queues[queue_idx] != NULL) {
1324 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1325 dev->data->tx_queues[queue_idx] = NULL;
1328 /* First allocate the tx queue data structure */
1329 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1330 RTE_CACHE_LINE_SIZE);
1335 * Allocate TX ring hardware descriptors. A memzone large enough to
1336 * handle the maximum ring size is allocated in order to allow for
1337 * resizing in later calls to the queue setup function.
1339 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1340 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1341 E1000_ALIGN, socket_id);
1343 igb_tx_queue_release(txq);
1347 txq->nb_tx_desc = nb_desc;
1348 txq->pthresh = tx_conf->tx_thresh.pthresh;
1349 txq->hthresh = tx_conf->tx_thresh.hthresh;
1350 txq->wthresh = tx_conf->tx_thresh.wthresh;
1351 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1353 txq->queue_id = queue_idx;
1354 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1355 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1356 txq->port_id = dev->data->port_id;
1358 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1359 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1361 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1362 /* Allocate software ring */
1363 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1364 sizeof(struct igb_tx_entry) * nb_desc,
1365 RTE_CACHE_LINE_SIZE);
1366 if (txq->sw_ring == NULL) {
1367 igb_tx_queue_release(txq);
1370 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1371 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1373 igb_reset_tx_queue(txq, dev);
1374 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1375 dev->data->tx_queues[queue_idx] = txq;
1381 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1385 if (rxq->sw_ring != NULL) {
1386 for (i = 0; i < rxq->nb_rx_desc; i++) {
1387 if (rxq->sw_ring[i].mbuf != NULL) {
1388 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1389 rxq->sw_ring[i].mbuf = NULL;
1396 igb_rx_queue_release(struct igb_rx_queue *rxq)
1399 igb_rx_queue_release_mbufs(rxq);
1400 rte_free(rxq->sw_ring);
1406 eth_igb_rx_queue_release(void *rxq)
1408 igb_rx_queue_release(rxq);
1412 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1414 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1417 /* Zero out HW ring memory */
1418 for (i = 0; i < rxq->nb_rx_desc; i++) {
1419 rxq->rx_ring[i] = zeroed_desc;
1423 rxq->pkt_first_seg = NULL;
1424 rxq->pkt_last_seg = NULL;
1428 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1431 unsigned int socket_id,
1432 const struct rte_eth_rxconf *rx_conf,
1433 struct rte_mempool *mp)
1435 const struct rte_memzone *rz;
1436 struct igb_rx_queue *rxq;
1437 struct e1000_hw *hw;
1440 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1443 * Validate number of receive descriptors.
1444 * It must not exceed hardware maximum, and must be multiple
1447 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1448 (nb_desc > E1000_MAX_RING_DESC) ||
1449 (nb_desc < E1000_MIN_RING_DESC)) {
1453 /* Free memory prior to re-allocation if needed */
1454 if (dev->data->rx_queues[queue_idx] != NULL) {
1455 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1456 dev->data->rx_queues[queue_idx] = NULL;
1459 /* First allocate the RX queue data structure. */
1460 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1461 RTE_CACHE_LINE_SIZE);
1465 rxq->nb_rx_desc = nb_desc;
1466 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1467 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1468 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1469 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1471 rxq->drop_en = rx_conf->rx_drop_en;
1472 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1473 rxq->queue_id = queue_idx;
1474 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1475 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1476 rxq->port_id = dev->data->port_id;
1477 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1481 * Allocate RX ring hardware descriptors. A memzone large enough to
1482 * handle the maximum ring size is allocated in order to allow for
1483 * resizing in later calls to the queue setup function.
1485 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1486 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1487 E1000_ALIGN, socket_id);
1489 igb_rx_queue_release(rxq);
1492 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1493 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1494 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1495 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1497 /* Allocate software ring. */
1498 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1499 sizeof(struct igb_rx_entry) * nb_desc,
1500 RTE_CACHE_LINE_SIZE);
1501 if (rxq->sw_ring == NULL) {
1502 igb_rx_queue_release(rxq);
1505 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1506 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1508 dev->data->rx_queues[queue_idx] = rxq;
1509 igb_reset_rx_queue(rxq);
1515 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1517 #define IGB_RXQ_SCAN_INTERVAL 4
1518 volatile union e1000_adv_rx_desc *rxdp;
1519 struct igb_rx_queue *rxq;
1522 if (rx_queue_id >= dev->data->nb_rx_queues) {
1523 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1527 rxq = dev->data->rx_queues[rx_queue_id];
1528 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1530 while ((desc < rxq->nb_rx_desc) &&
1531 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1532 desc += IGB_RXQ_SCAN_INTERVAL;
1533 rxdp += IGB_RXQ_SCAN_INTERVAL;
1534 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1535 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1536 desc - rxq->nb_rx_desc]);
1543 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1545 volatile union e1000_adv_rx_desc *rxdp;
1546 struct igb_rx_queue *rxq = rx_queue;
1549 if (unlikely(offset >= rxq->nb_rx_desc))
1551 desc = rxq->rx_tail + offset;
1552 if (desc >= rxq->nb_rx_desc)
1553 desc -= rxq->nb_rx_desc;
1555 rxdp = &rxq->rx_ring[desc];
1556 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1560 igb_dev_clear_queues(struct rte_eth_dev *dev)
1563 struct igb_tx_queue *txq;
1564 struct igb_rx_queue *rxq;
1566 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1567 txq = dev->data->tx_queues[i];
1569 igb_tx_queue_release_mbufs(txq);
1570 igb_reset_tx_queue(txq, dev);
1574 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1575 rxq = dev->data->rx_queues[i];
1577 igb_rx_queue_release_mbufs(rxq);
1578 igb_reset_rx_queue(rxq);
1584 igb_dev_free_queues(struct rte_eth_dev *dev)
1588 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1589 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1590 dev->data->rx_queues[i] = NULL;
1592 dev->data->nb_rx_queues = 0;
1594 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1595 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1596 dev->data->tx_queues[i] = NULL;
1598 dev->data->nb_tx_queues = 0;
1602 * Receive Side Scaling (RSS).
1603 * See section 7.1.1.7 in the following document:
1604 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1607 * The source and destination IP addresses of the IP header and the source and
1608 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1609 * against a configurable random key to compute a 32-bit RSS hash result.
1610 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1611 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1612 * RSS output index which is used as the RX queue index where to store the
1614 * The following output is supplied in the RX write-back descriptor:
1615 * - 32-bit result of the Microsoft RSS hash function,
1616 * - 4-bit RSS type field.
1620 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1621 * Used as the default key.
1623 static uint8_t rss_intel_key[40] = {
1624 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1625 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1626 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1627 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1628 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1632 igb_rss_disable(struct rte_eth_dev *dev)
1634 struct e1000_hw *hw;
1637 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1638 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1639 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1640 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1644 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1652 hash_key = rss_conf->rss_key;
1653 if (hash_key != NULL) {
1654 /* Fill in RSS hash key */
1655 for (i = 0; i < 10; i++) {
1656 rss_key = hash_key[(i * 4)];
1657 rss_key |= hash_key[(i * 4) + 1] << 8;
1658 rss_key |= hash_key[(i * 4) + 2] << 16;
1659 rss_key |= hash_key[(i * 4) + 3] << 24;
1660 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1664 /* Set configured hashing protocols in MRQC register */
1665 rss_hf = rss_conf->rss_hf;
1666 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1667 if (rss_hf & ETH_RSS_IPV4)
1668 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1669 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1670 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1671 if (rss_hf & ETH_RSS_IPV6)
1672 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1673 if (rss_hf & ETH_RSS_IPV6_EX)
1674 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1675 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1676 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1677 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1678 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1679 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1680 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1681 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1682 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1683 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1684 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1685 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1689 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1690 struct rte_eth_rss_conf *rss_conf)
1692 struct e1000_hw *hw;
1696 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1699 * Before changing anything, first check that the update RSS operation
1700 * does not attempt to disable RSS, if RSS was enabled at
1701 * initialization time, or does not attempt to enable RSS, if RSS was
1702 * disabled at initialization time.
1704 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1705 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1706 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1707 if (rss_hf != 0) /* Enable RSS */
1709 return 0; /* Nothing to do */
1712 if (rss_hf == 0) /* Disable RSS */
1714 igb_hw_rss_hash_set(hw, rss_conf);
1718 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1719 struct rte_eth_rss_conf *rss_conf)
1721 struct e1000_hw *hw;
1728 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1729 hash_key = rss_conf->rss_key;
1730 if (hash_key != NULL) {
1731 /* Return RSS hash key */
1732 for (i = 0; i < 10; i++) {
1733 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1734 hash_key[(i * 4)] = rss_key & 0x000000FF;
1735 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1736 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1737 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1741 /* Get RSS functions configured in MRQC register */
1742 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1743 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1744 rss_conf->rss_hf = 0;
1748 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1749 rss_hf |= ETH_RSS_IPV4;
1750 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1751 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1752 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1753 rss_hf |= ETH_RSS_IPV6;
1754 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1755 rss_hf |= ETH_RSS_IPV6_EX;
1756 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1757 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1758 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1759 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1760 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1761 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1762 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1763 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1764 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1765 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1766 rss_conf->rss_hf = rss_hf;
1771 igb_rss_configure(struct rte_eth_dev *dev)
1773 struct rte_eth_rss_conf rss_conf;
1774 struct e1000_hw *hw;
1778 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1780 /* Fill in redirection table. */
1781 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1782 for (i = 0; i < 128; i++) {
1789 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1790 i % dev->data->nb_rx_queues : 0);
1791 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1793 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1797 * Configure the RSS key and the RSS protocols used to compute
1798 * the RSS hash of input packets.
1800 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1801 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1802 igb_rss_disable(dev);
1805 if (rss_conf.rss_key == NULL)
1806 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1807 igb_hw_rss_hash_set(hw, &rss_conf);
1811 * Check if the mac type support VMDq or not.
1812 * Return 1 if it supports, otherwise, return 0.
1815 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1817 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1819 switch (hw->mac.type) {
1840 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1846 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1848 struct rte_eth_vmdq_rx_conf *cfg;
1849 struct e1000_hw *hw;
1850 uint32_t mrqc, vt_ctl, vmolr, rctl;
1853 PMD_INIT_FUNC_TRACE();
1855 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1858 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1859 if (igb_is_vmdq_supported(dev) == 0)
1862 igb_rss_disable(dev);
1864 /* RCTL: eanble VLAN filter */
1865 rctl = E1000_READ_REG(hw, E1000_RCTL);
1866 rctl |= E1000_RCTL_VFE;
1867 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1869 /* MRQC: enable vmdq */
1870 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1871 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1872 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1874 /* VTCTL: pool selection according to VLAN tag */
1875 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1876 if (cfg->enable_default_pool)
1877 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1878 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1879 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1881 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1882 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1883 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1884 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1887 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1888 vmolr |= E1000_VMOLR_AUPE;
1889 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1890 vmolr |= E1000_VMOLR_ROMPE;
1891 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1892 vmolr |= E1000_VMOLR_ROPE;
1893 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1894 vmolr |= E1000_VMOLR_BAM;
1895 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1896 vmolr |= E1000_VMOLR_MPME;
1898 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1902 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1903 * Both 82576 and 82580 support it
1905 if (hw->mac.type != e1000_i350) {
1906 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1907 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1908 vmolr |= E1000_VMOLR_STRVLAN;
1909 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1913 /* VFTA - enable all vlan filters */
1914 for (i = 0; i < IGB_VFTA_SIZE; i++)
1915 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1917 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1918 if (hw->mac.type != e1000_82580)
1919 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1922 * RAH/RAL - allow pools to read specific mac addresses
1923 * In this case, all pools should be able to read from mac addr 0
1925 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1926 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1928 /* VLVF: set up filters for vlan tags as configured */
1929 for (i = 0; i < cfg->nb_pool_maps; i++) {
1930 /* set vlan id in VF register and set the valid bit */
1931 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1932 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1933 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1934 E1000_VLVF_POOLSEL_MASK)));
1937 E1000_WRITE_FLUSH(hw);
1943 /*********************************************************************
1945 * Enable receive unit.
1947 **********************************************************************/
1950 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1952 struct igb_rx_entry *rxe = rxq->sw_ring;
1956 /* Initialize software ring entries. */
1957 for (i = 0; i < rxq->nb_rx_desc; i++) {
1958 volatile union e1000_adv_rx_desc *rxd;
1959 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1962 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1963 "queue_id=%hu", rxq->queue_id);
1967 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1968 rxd = &rxq->rx_ring[i];
1969 rxd->read.hdr_addr = 0;
1970 rxd->read.pkt_addr = dma_addr;
1977 #define E1000_MRQC_DEF_Q_SHIFT (3)
1979 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1981 struct e1000_hw *hw =
1982 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1985 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1987 * SRIOV active scheme
1988 * FIXME if support RSS together with VMDq & SRIOV
1990 mrqc = E1000_MRQC_ENABLE_VMDQ;
1991 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1992 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1993 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1994 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1996 * SRIOV inactive scheme
1998 switch (dev->data->dev_conf.rxmode.mq_mode) {
2000 igb_rss_configure(dev);
2002 case ETH_MQ_RX_VMDQ_ONLY:
2003 /*Configure general VMDQ only RX parameters*/
2004 igb_vmdq_rx_hw_configure(dev);
2006 case ETH_MQ_RX_NONE:
2007 /* if mq_mode is none, disable rss mode.*/
2009 igb_rss_disable(dev);
2018 eth_igb_rx_init(struct rte_eth_dev *dev)
2020 struct e1000_hw *hw;
2021 struct igb_rx_queue *rxq;
2026 uint16_t rctl_bsize;
2030 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2034 * Make sure receives are disabled while setting
2035 * up the descriptor ring.
2037 rctl = E1000_READ_REG(hw, E1000_RCTL);
2038 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2041 * Configure support of jumbo frames, if any.
2043 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2044 rctl |= E1000_RCTL_LPE;
2047 * Set maximum packet length by default, and might be updated
2048 * together with enabling/disabling dual VLAN.
2050 E1000_WRITE_REG(hw, E1000_RLPML,
2051 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2054 rctl &= ~E1000_RCTL_LPE;
2056 /* Configure and enable each RX queue. */
2058 dev->rx_pkt_burst = eth_igb_recv_pkts;
2059 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2063 rxq = dev->data->rx_queues[i];
2065 /* Allocate buffers for descriptor rings and set up queue */
2066 ret = igb_alloc_rx_queue_mbufs(rxq);
2071 * Reset crc_len in case it was changed after queue setup by a
2075 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2078 bus_addr = rxq->rx_ring_phys_addr;
2079 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2081 sizeof(union e1000_adv_rx_desc));
2082 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2083 (uint32_t)(bus_addr >> 32));
2084 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2086 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2089 * Configure RX buffer size.
2091 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2092 RTE_PKTMBUF_HEADROOM);
2093 if (buf_size >= 1024) {
2095 * Configure the BSIZEPACKET field of the SRRCTL
2096 * register of the queue.
2097 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2098 * If this field is equal to 0b, then RCTL.BSIZE
2099 * determines the RX packet buffer size.
2101 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2102 E1000_SRRCTL_BSIZEPKT_MASK);
2103 buf_size = (uint16_t) ((srrctl &
2104 E1000_SRRCTL_BSIZEPKT_MASK) <<
2105 E1000_SRRCTL_BSIZEPKT_SHIFT);
2107 /* It adds dual VLAN length for supporting dual VLAN */
2108 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2109 2 * VLAN_TAG_SIZE) > buf_size){
2110 if (!dev->data->scattered_rx)
2112 "forcing scatter mode");
2113 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2114 dev->data->scattered_rx = 1;
2118 * Use BSIZE field of the device RCTL register.
2120 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2121 rctl_bsize = buf_size;
2122 if (!dev->data->scattered_rx)
2123 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2124 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2125 dev->data->scattered_rx = 1;
2128 /* Set if packets are dropped when no descriptors available */
2130 srrctl |= E1000_SRRCTL_DROP_EN;
2132 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2134 /* Enable this RX queue. */
2135 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2136 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2137 rxdctl &= 0xFFF00000;
2138 rxdctl |= (rxq->pthresh & 0x1F);
2139 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2140 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2141 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2144 if (dev->data->dev_conf.rxmode.enable_scatter) {
2145 if (!dev->data->scattered_rx)
2146 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2147 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2148 dev->data->scattered_rx = 1;
2152 * Setup BSIZE field of RCTL register, if needed.
2153 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2154 * register, since the code above configures the SRRCTL register of
2155 * the RX queue in such a case.
2156 * All configurable sizes are:
2157 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2158 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2159 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2160 * 2048: rctl |= E1000_RCTL_SZ_2048;
2161 * 1024: rctl |= E1000_RCTL_SZ_1024;
2162 * 512: rctl |= E1000_RCTL_SZ_512;
2163 * 256: rctl |= E1000_RCTL_SZ_256;
2165 if (rctl_bsize > 0) {
2166 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2167 rctl |= E1000_RCTL_SZ_512;
2168 else /* 256 <= buf_size < 512 - use 256 */
2169 rctl |= E1000_RCTL_SZ_256;
2173 * Configure RSS if device configured with multiple RX queues.
2175 igb_dev_mq_rx_configure(dev);
2177 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2178 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2181 * Setup the Checksum Register.
2182 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2184 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2185 rxcsum |= E1000_RXCSUM_PCSD;
2187 /* Enable both L3/L4 rx checksum offload */
2188 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2189 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2191 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2192 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2194 /* Setup the Receive Control Register. */
2195 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2196 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2198 /* set STRCRC bit in all queues */
2199 if (hw->mac.type == e1000_i350 ||
2200 hw->mac.type == e1000_i210 ||
2201 hw->mac.type == e1000_i211 ||
2202 hw->mac.type == e1000_i354) {
2203 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2204 rxq = dev->data->rx_queues[i];
2205 uint32_t dvmolr = E1000_READ_REG(hw,
2206 E1000_DVMOLR(rxq->reg_idx));
2207 dvmolr |= E1000_DVMOLR_STRCRC;
2208 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2212 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2214 /* clear STRCRC bit in all queues */
2215 if (hw->mac.type == e1000_i350 ||
2216 hw->mac.type == e1000_i210 ||
2217 hw->mac.type == e1000_i211 ||
2218 hw->mac.type == e1000_i354) {
2219 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2220 rxq = dev->data->rx_queues[i];
2221 uint32_t dvmolr = E1000_READ_REG(hw,
2222 E1000_DVMOLR(rxq->reg_idx));
2223 dvmolr &= ~E1000_DVMOLR_STRCRC;
2224 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2229 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2230 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2231 E1000_RCTL_RDMTS_HALF |
2232 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2234 /* Make sure VLAN Filters are off. */
2235 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2236 rctl &= ~E1000_RCTL_VFE;
2237 /* Don't store bad packets. */
2238 rctl &= ~E1000_RCTL_SBP;
2240 /* Enable Receives. */
2241 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2244 * Setup the HW Rx Head and Tail Descriptor Pointers.
2245 * This needs to be done after enable.
2247 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2248 rxq = dev->data->rx_queues[i];
2249 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2250 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2256 /*********************************************************************
2258 * Enable transmit unit.
2260 **********************************************************************/
2262 eth_igb_tx_init(struct rte_eth_dev *dev)
2264 struct e1000_hw *hw;
2265 struct igb_tx_queue *txq;
2270 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272 /* Setup the Base and Length of the Tx Descriptor Rings. */
2273 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2275 txq = dev->data->tx_queues[i];
2276 bus_addr = txq->tx_ring_phys_addr;
2278 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2280 sizeof(union e1000_adv_tx_desc));
2281 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2282 (uint32_t)(bus_addr >> 32));
2283 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2285 /* Setup the HW Tx Head and Tail descriptor pointers. */
2286 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2287 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2289 /* Setup Transmit threshold registers. */
2290 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2291 txdctl |= txq->pthresh & 0x1F;
2292 txdctl |= ((txq->hthresh & 0x1F) << 8);
2293 txdctl |= ((txq->wthresh & 0x1F) << 16);
2294 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2295 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2298 /* Program the Transmit Control Register. */
2299 tctl = E1000_READ_REG(hw, E1000_TCTL);
2300 tctl &= ~E1000_TCTL_CT;
2301 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2302 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2304 e1000_config_collision_dist(hw);
2306 /* This write will effectively turn on the transmit unit. */
2307 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2310 /*********************************************************************
2312 * Enable VF receive unit.
2314 **********************************************************************/
2316 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2318 struct e1000_hw *hw;
2319 struct igb_rx_queue *rxq;
2322 uint16_t rctl_bsize;
2326 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2329 e1000_rlpml_set_vf(hw,
2330 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2333 /* Configure and enable each RX queue. */
2335 dev->rx_pkt_burst = eth_igb_recv_pkts;
2336 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2340 rxq = dev->data->rx_queues[i];
2342 /* Allocate buffers for descriptor rings and set up queue */
2343 ret = igb_alloc_rx_queue_mbufs(rxq);
2347 bus_addr = rxq->rx_ring_phys_addr;
2348 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2350 sizeof(union e1000_adv_rx_desc));
2351 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2352 (uint32_t)(bus_addr >> 32));
2353 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2355 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2358 * Configure RX buffer size.
2360 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2361 RTE_PKTMBUF_HEADROOM);
2362 if (buf_size >= 1024) {
2364 * Configure the BSIZEPACKET field of the SRRCTL
2365 * register of the queue.
2366 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2367 * If this field is equal to 0b, then RCTL.BSIZE
2368 * determines the RX packet buffer size.
2370 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2371 E1000_SRRCTL_BSIZEPKT_MASK);
2372 buf_size = (uint16_t) ((srrctl &
2373 E1000_SRRCTL_BSIZEPKT_MASK) <<
2374 E1000_SRRCTL_BSIZEPKT_SHIFT);
2376 /* It adds dual VLAN length for supporting dual VLAN */
2377 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2378 2 * VLAN_TAG_SIZE) > buf_size){
2379 if (!dev->data->scattered_rx)
2381 "forcing scatter mode");
2382 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2383 dev->data->scattered_rx = 1;
2387 * Use BSIZE field of the device RCTL register.
2389 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2390 rctl_bsize = buf_size;
2391 if (!dev->data->scattered_rx)
2392 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2393 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2394 dev->data->scattered_rx = 1;
2397 /* Set if packets are dropped when no descriptors available */
2399 srrctl |= E1000_SRRCTL_DROP_EN;
2401 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2403 /* Enable this RX queue. */
2404 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2405 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2406 rxdctl &= 0xFFF00000;
2407 rxdctl |= (rxq->pthresh & 0x1F);
2408 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2409 if (hw->mac.type == e1000_vfadapt) {
2411 * Workaround of 82576 VF Erratum
2412 * force set WTHRESH to 1
2413 * to avoid Write-Back not triggered sometimes
2416 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2419 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2420 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2423 if (dev->data->dev_conf.rxmode.enable_scatter) {
2424 if (!dev->data->scattered_rx)
2425 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2426 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2427 dev->data->scattered_rx = 1;
2431 * Setup the HW Rx Head and Tail Descriptor Pointers.
2432 * This needs to be done after enable.
2434 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2435 rxq = dev->data->rx_queues[i];
2436 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2437 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2443 /*********************************************************************
2445 * Enable VF transmit unit.
2447 **********************************************************************/
2449 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2451 struct e1000_hw *hw;
2452 struct igb_tx_queue *txq;
2456 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458 /* Setup the Base and Length of the Tx Descriptor Rings. */
2459 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2462 txq = dev->data->tx_queues[i];
2463 bus_addr = txq->tx_ring_phys_addr;
2464 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2466 sizeof(union e1000_adv_tx_desc));
2467 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2468 (uint32_t)(bus_addr >> 32));
2469 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2471 /* Setup the HW Tx Head and Tail descriptor pointers. */
2472 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2473 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2475 /* Setup Transmit threshold registers. */
2476 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2477 txdctl |= txq->pthresh & 0x1F;
2478 txdctl |= ((txq->hthresh & 0x1F) << 8);
2479 if (hw->mac.type == e1000_82576) {
2481 * Workaround of 82576 VF Erratum
2482 * force set WTHRESH to 1
2483 * to avoid Write-Back not triggered sometimes
2486 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2489 txdctl |= ((txq->wthresh & 0x1F) << 16);
2490 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2491 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2497 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2498 struct rte_eth_rxq_info *qinfo)
2500 struct igb_rx_queue *rxq;
2502 rxq = dev->data->rx_queues[queue_id];
2504 qinfo->mp = rxq->mb_pool;
2505 qinfo->scattered_rx = dev->data->scattered_rx;
2506 qinfo->nb_desc = rxq->nb_rx_desc;
2508 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2509 qinfo->conf.rx_drop_en = rxq->drop_en;
2513 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2514 struct rte_eth_txq_info *qinfo)
2516 struct igb_tx_queue *txq;
2518 txq = dev->data->tx_queues[queue_id];
2520 qinfo->nb_desc = txq->nb_tx_desc;
2522 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2523 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2524 qinfo->conf.tx_thresh.wthresh = txq->wthresh;