4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
87 m = __rte_mbuf_raw_alloc(mp);
88 __rte_mbuf_sanity_check_raw(m, 0);
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93 (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
95 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
96 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
99 * Structure associated with each descriptor of the RX ring of a RX queue.
101 struct igb_rx_entry {
102 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
106 * Structure associated with each descriptor of the TX ring of a TX queue.
108 struct igb_tx_entry {
109 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
110 uint16_t next_id; /**< Index of next descriptor in ring. */
111 uint16_t last_id; /**< Index of last scattered descriptor. */
115 * Structure associated with each RX queue.
117 struct igb_rx_queue {
118 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
119 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
120 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
121 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
122 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
123 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
124 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
125 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
126 uint16_t nb_rx_desc; /**< number of RX descriptors. */
127 uint16_t rx_tail; /**< current value of RDT register. */
128 uint16_t nb_rx_hold; /**< number of held free RX desc. */
129 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
130 uint16_t queue_id; /**< RX queue index. */
131 uint16_t reg_idx; /**< RX queue register index. */
132 uint8_t port_id; /**< Device port identifier. */
133 uint8_t pthresh; /**< Prefetch threshold register. */
134 uint8_t hthresh; /**< Host threshold register. */
135 uint8_t wthresh; /**< Write-back threshold register. */
136 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
137 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
141 * Hardware context number
143 enum igb_advctx_num {
144 IGB_CTX_0 = 0, /**< CTX0 */
145 IGB_CTX_1 = 1, /**< CTX1 */
146 IGB_CTX_NUM = 2, /**< CTX_NUM */
149 /** Offload features */
150 union igb_tx_offload {
153 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
154 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
155 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
156 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
157 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
159 /* uint64_t unused:8; */
164 * Compare mask for igb_tx_offload.data,
165 * should be in sync with igb_tx_offload layout.
167 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
168 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
169 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
170 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
171 /** Mac + IP + TCP + Mss mask. */
172 #define TX_TSO_CMP_MASK \
173 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
176 * Strucutre to check if new context need be built
178 struct igb_advctx_info {
179 uint64_t flags; /**< ol_flags related to context build. */
180 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
181 union igb_tx_offload tx_offload;
182 /** compare mask for tx offload. */
183 union igb_tx_offload tx_offload_mask;
187 * Structure associated with each TX queue.
189 struct igb_tx_queue {
190 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
191 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
192 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
193 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
194 uint32_t txd_type; /**< Device-specific TXD type */
195 uint16_t nb_tx_desc; /**< number of TX descriptors. */
196 uint16_t tx_tail; /**< Current value of TDT register. */
198 /**< Index of first used TX descriptor. */
199 uint16_t queue_id; /**< TX queue index. */
200 uint16_t reg_idx; /**< TX queue register index. */
201 uint8_t port_id; /**< Device port identifier. */
202 uint8_t pthresh; /**< Prefetch threshold register. */
203 uint8_t hthresh; /**< Host threshold register. */
204 uint8_t wthresh; /**< Write-back threshold register. */
206 /**< Current used hardware descriptor. */
208 /**< Start context position for transmit queue. */
209 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
210 /**< Hardware context history.*/
214 #define RTE_PMD_USE_PREFETCH
217 #ifdef RTE_PMD_USE_PREFETCH
218 #define rte_igb_prefetch(p) rte_prefetch0(p)
220 #define rte_igb_prefetch(p) do {} while(0)
223 #ifdef RTE_PMD_PACKET_PREFETCH
224 #define rte_packet_prefetch(p) rte_prefetch1(p)
226 #define rte_packet_prefetch(p) do {} while(0)
230 * Macro for VMDq feature for 1 GbE NIC.
232 #define E1000_VMOLR_SIZE (8)
233 #define IGB_TSO_MAX_HDRLEN (512)
234 #define IGB_TSO_MAX_MSS (9216)
236 /*********************************************************************
240 **********************************************************************/
243 *There're some limitations in hardware for TCP segmentation offload. We
244 *should check whether the parameters are valid.
246 static inline uint64_t
247 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
249 if (!(ol_req & PKT_TX_TCP_SEG))
251 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
252 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
253 ol_req &= ~PKT_TX_TCP_SEG;
254 ol_req |= PKT_TX_TCP_CKSUM;
260 * Advanced context descriptor are almost same between igb/ixgbe
261 * This is a separate function, looking for optimization opportunity here
262 * Rework required to go with the pre-defined values.
266 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
267 volatile struct e1000_adv_tx_context_desc *ctx_txd,
268 uint64_t ol_flags, union igb_tx_offload tx_offload)
270 uint32_t type_tucmd_mlhl;
271 uint32_t mss_l4len_idx;
272 uint32_t ctx_idx, ctx_curr;
273 uint32_t vlan_macip_lens;
274 union igb_tx_offload tx_offload_mask;
276 ctx_curr = txq->ctx_curr;
277 ctx_idx = ctx_curr + txq->ctx_start;
279 tx_offload_mask.data = 0;
282 /* Specify which HW CTX to upload. */
283 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
285 if (ol_flags & PKT_TX_VLAN_PKT)
286 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
288 /* check if TCP segmentation required for this packet */
289 if (ol_flags & PKT_TX_TCP_SEG) {
290 /* implies IP cksum in IPv4 */
291 if (ol_flags & PKT_TX_IP_CKSUM)
292 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
293 E1000_ADVTXD_TUCMD_L4T_TCP |
294 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
296 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
297 E1000_ADVTXD_TUCMD_L4T_TCP |
298 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
300 tx_offload_mask.data |= TX_TSO_CMP_MASK;
301 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
302 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
303 } else { /* no TSO, check if hardware checksum is needed */
304 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
305 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
307 if (ol_flags & PKT_TX_IP_CKSUM)
308 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
310 switch (ol_flags & PKT_TX_L4_MASK) {
311 case PKT_TX_UDP_CKSUM:
312 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
313 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
314 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
316 case PKT_TX_TCP_CKSUM:
317 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
318 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
319 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
321 case PKT_TX_SCTP_CKSUM:
322 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
323 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
324 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
327 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
328 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
333 txq->ctx_cache[ctx_curr].flags = ol_flags;
334 txq->ctx_cache[ctx_idx].tx_offload.data =
335 tx_offload_mask.data & tx_offload.data;
336 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
338 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
339 vlan_macip_lens = (uint32_t)tx_offload.data;
340 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
341 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
342 ctx_txd->seqnum_seed = 0;
346 * Check which hardware context can be used. Use the existing match
347 * or create a new context descriptor.
349 static inline uint32_t
350 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
351 union igb_tx_offload tx_offload)
353 /* If match with the current context */
354 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
355 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
356 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
357 return txq->ctx_curr;
360 /* If match with the second context */
362 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
363 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
364 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
365 return txq->ctx_curr;
368 /* Mismatch, use the previous context */
369 return (IGB_CTX_NUM);
372 static inline uint32_t
373 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
375 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
376 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
379 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
380 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
381 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
385 static inline uint32_t
386 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
389 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
390 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
391 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
392 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
397 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
400 struct igb_tx_queue *txq;
401 struct igb_tx_entry *sw_ring;
402 struct igb_tx_entry *txe, *txn;
403 volatile union e1000_adv_tx_desc *txr;
404 volatile union e1000_adv_tx_desc *txd;
405 struct rte_mbuf *tx_pkt;
406 struct rte_mbuf *m_seg;
407 uint64_t buf_dma_addr;
408 uint32_t olinfo_status;
409 uint32_t cmd_type_len;
418 uint32_t new_ctx = 0;
420 union igb_tx_offload tx_offload = {0};
423 sw_ring = txq->sw_ring;
425 tx_id = txq->tx_tail;
426 txe = &sw_ring[tx_id];
428 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
430 pkt_len = tx_pkt->pkt_len;
432 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
435 * The number of descriptors that must be allocated for a
436 * packet is the number of segments of that packet, plus 1
437 * Context Descriptor for the VLAN Tag Identifier, if any.
438 * Determine the last TX descriptor to allocate in the TX ring
439 * for the packet, starting from the current position (tx_id)
442 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
444 ol_flags = tx_pkt->ol_flags;
445 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
447 /* If a Context Descriptor need be built . */
449 tx_offload.l2_len = tx_pkt->l2_len;
450 tx_offload.l3_len = tx_pkt->l3_len;
451 tx_offload.l4_len = tx_pkt->l4_len;
452 tx_offload.vlan_tci = tx_pkt->vlan_tci;
453 tx_offload.tso_segsz = tx_pkt->tso_segsz;
454 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
456 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
457 /* Only allocate context descriptor if required*/
458 new_ctx = (ctx == IGB_CTX_NUM);
460 tx_last = (uint16_t) (tx_last + new_ctx);
462 if (tx_last >= txq->nb_tx_desc)
463 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
465 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
466 " tx_first=%u tx_last=%u",
467 (unsigned) txq->port_id,
468 (unsigned) txq->queue_id,
474 * Check if there are enough free descriptors in the TX ring
475 * to transmit the next packet.
476 * This operation is based on the two following rules:
478 * 1- Only check that the last needed TX descriptor can be
479 * allocated (by construction, if that descriptor is free,
480 * all intermediate ones are also free).
482 * For this purpose, the index of the last TX descriptor
483 * used for a packet (the "last descriptor" of a packet)
484 * is recorded in the TX entries (the last one included)
485 * that are associated with all TX descriptors allocated
488 * 2- Avoid to allocate the last free TX descriptor of the
489 * ring, in order to never set the TDT register with the
490 * same value stored in parallel by the NIC in the TDH
491 * register, which makes the TX engine of the NIC enter
492 * in a deadlock situation.
494 * By extension, avoid to allocate a free descriptor that
495 * belongs to the last set of free descriptors allocated
496 * to the same packet previously transmitted.
500 * The "last descriptor" of the previously sent packet, if any,
501 * which used the last descriptor to allocate.
503 tx_end = sw_ring[tx_last].last_id;
506 * The next descriptor following that "last descriptor" in the
509 tx_end = sw_ring[tx_end].next_id;
512 * The "last descriptor" associated with that next descriptor.
514 tx_end = sw_ring[tx_end].last_id;
517 * Check that this descriptor is free.
519 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
526 * Set common flags of all TX Data Descriptors.
528 * The following bits must be set in all Data Descriptors:
529 * - E1000_ADVTXD_DTYP_DATA
530 * - E1000_ADVTXD_DCMD_DEXT
532 * The following bits must be set in the first Data Descriptor
533 * and are ignored in the other ones:
534 * - E1000_ADVTXD_DCMD_IFCS
535 * - E1000_ADVTXD_MAC_1588
536 * - E1000_ADVTXD_DCMD_VLE
538 * The following bits must only be set in the last Data
540 * - E1000_TXD_CMD_EOP
542 * The following bits can be set in any Data Descriptor, but
543 * are only set in the last Data Descriptor:
546 cmd_type_len = txq->txd_type |
547 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
548 if (tx_ol_req & PKT_TX_TCP_SEG)
549 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
550 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
551 #if defined(RTE_LIBRTE_IEEE1588)
552 if (ol_flags & PKT_TX_IEEE1588_TMST)
553 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
556 /* Setup TX Advanced context descriptor if required */
558 volatile struct e1000_adv_tx_context_desc *
561 ctx_txd = (volatile struct
562 e1000_adv_tx_context_desc *)
565 txn = &sw_ring[txe->next_id];
566 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
568 if (txe->mbuf != NULL) {
569 rte_pktmbuf_free_seg(txe->mbuf);
573 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
575 txe->last_id = tx_last;
576 tx_id = txe->next_id;
580 /* Setup the TX Advanced Data Descriptor */
581 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
582 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
583 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
588 txn = &sw_ring[txe->next_id];
591 if (txe->mbuf != NULL)
592 rte_pktmbuf_free_seg(txe->mbuf);
596 * Set up transmit descriptor.
598 slen = (uint16_t) m_seg->data_len;
599 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
600 txd->read.buffer_addr =
601 rte_cpu_to_le_64(buf_dma_addr);
602 txd->read.cmd_type_len =
603 rte_cpu_to_le_32(cmd_type_len | slen);
604 txd->read.olinfo_status =
605 rte_cpu_to_le_32(olinfo_status);
606 txe->last_id = tx_last;
607 tx_id = txe->next_id;
610 } while (m_seg != NULL);
613 * The last packet data descriptor needs End Of Packet (EOP)
614 * and Report Status (RS).
616 txd->read.cmd_type_len |=
617 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
623 * Set the Transmit Descriptor Tail (TDT).
625 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
626 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
627 (unsigned) txq->port_id, (unsigned) txq->queue_id,
628 (unsigned) tx_id, (unsigned) nb_tx);
629 txq->tx_tail = tx_id;
634 /*********************************************************************
638 **********************************************************************/
639 #define IGB_PACKET_TYPE_IPV4 0X01
640 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
641 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
642 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
643 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
644 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
645 #define IGB_PACKET_TYPE_IPV6 0X04
646 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
647 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
648 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
649 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
650 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
651 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
652 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
653 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
654 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
655 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
656 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
657 #define IGB_PACKET_TYPE_MAX 0X80
658 #define IGB_PACKET_TYPE_MASK 0X7F
659 #define IGB_PACKET_TYPE_SHIFT 0X04
660 static inline uint32_t
661 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
663 static const uint32_t
664 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
665 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
667 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
668 RTE_PTYPE_L3_IPV4_EXT,
669 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
671 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
672 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673 RTE_PTYPE_INNER_L3_IPV6,
674 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
675 RTE_PTYPE_L3_IPV6_EXT,
676 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
677 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
678 RTE_PTYPE_INNER_L3_IPV6_EXT,
679 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
680 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
681 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
682 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
683 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
684 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
686 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
687 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
688 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
689 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
690 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
691 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
692 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
693 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
694 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
695 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
696 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
698 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
699 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
700 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
701 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
703 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
704 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
705 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
706 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
708 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
709 return RTE_PTYPE_UNKNOWN;
711 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
713 return ptype_table[pkt_info];
716 static inline uint64_t
717 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
719 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
721 #if defined(RTE_LIBRTE_IEEE1588)
722 static uint32_t ip_pkt_etqf_map[8] = {
723 0, 0, 0, PKT_RX_IEEE1588_PTP,
727 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
728 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
730 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
731 if (hw->mac.type == e1000_i210)
732 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
734 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
742 static inline uint64_t
743 rx_desc_status_to_pkt_flags(uint32_t rx_status)
747 /* Check if VLAN present */
748 pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
750 #if defined(RTE_LIBRTE_IEEE1588)
751 if (rx_status & E1000_RXD_STAT_TMST)
752 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
757 static inline uint64_t
758 rx_desc_error_to_pkt_flags(uint32_t rx_status)
761 * Bit 30: IPE, IPv4 checksum error
762 * Bit 29: L4I, L4I integrity error
765 static uint64_t error_to_pkt_flags_map[4] = {
766 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
767 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
769 return error_to_pkt_flags_map[(rx_status >>
770 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
774 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
777 struct igb_rx_queue *rxq;
778 volatile union e1000_adv_rx_desc *rx_ring;
779 volatile union e1000_adv_rx_desc *rxdp;
780 struct igb_rx_entry *sw_ring;
781 struct igb_rx_entry *rxe;
782 struct rte_mbuf *rxm;
783 struct rte_mbuf *nmb;
784 union e1000_adv_rx_desc rxd;
787 uint32_t hlen_type_rss;
797 rx_id = rxq->rx_tail;
798 rx_ring = rxq->rx_ring;
799 sw_ring = rxq->sw_ring;
800 while (nb_rx < nb_pkts) {
802 * The order of operations here is important as the DD status
803 * bit must not be read after any other descriptor fields.
804 * rx_ring and rxdp are pointing to volatile data so the order
805 * of accesses cannot be reordered by the compiler. If they were
806 * not volatile, they could be reordered which could lead to
807 * using invalid descriptor fields when read from rxd.
809 rxdp = &rx_ring[rx_id];
810 staterr = rxdp->wb.upper.status_error;
811 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
818 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
819 * likely to be invalid and to be dropped by the various
820 * validation checks performed by the network stack.
822 * Allocate a new mbuf to replenish the RX ring descriptor.
823 * If the allocation fails:
824 * - arrange for that RX descriptor to be the first one
825 * being parsed the next time the receive function is
826 * invoked [on the same queue].
828 * - Stop parsing the RX ring and return immediately.
830 * This policy do not drop the packet received in the RX
831 * descriptor for which the allocation of a new mbuf failed.
832 * Thus, it allows that packet to be later retrieved if
833 * mbuf have been freed in the mean time.
834 * As a side effect, holding RX descriptors instead of
835 * systematically giving them back to the NIC may lead to
836 * RX ring exhaustion situations.
837 * However, the NIC can gracefully prevent such situations
838 * to happen by sending specific "back-pressure" flow control
839 * frames to its peer(s).
841 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
842 "staterr=0x%x pkt_len=%u",
843 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
844 (unsigned) rx_id, (unsigned) staterr,
845 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
847 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
849 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
850 "queue_id=%u", (unsigned) rxq->port_id,
851 (unsigned) rxq->queue_id);
852 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
857 rxe = &sw_ring[rx_id];
859 if (rx_id == rxq->nb_rx_desc)
862 /* Prefetch next mbuf while processing current one. */
863 rte_igb_prefetch(sw_ring[rx_id].mbuf);
866 * When next RX descriptor is on a cache-line boundary,
867 * prefetch the next 4 RX descriptors and the next 8 pointers
870 if ((rx_id & 0x3) == 0) {
871 rte_igb_prefetch(&rx_ring[rx_id]);
872 rte_igb_prefetch(&sw_ring[rx_id]);
878 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
879 rxdp->read.hdr_addr = 0;
880 rxdp->read.pkt_addr = dma_addr;
883 * Initialize the returned mbuf.
884 * 1) setup generic mbuf fields:
885 * - number of segments,
888 * - RX port identifier.
889 * 2) integrate hardware offload data, if any:
891 * - IP checksum flag,
892 * - VLAN TCI, if any,
895 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
897 rxm->data_off = RTE_PKTMBUF_HEADROOM;
898 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
901 rxm->pkt_len = pkt_len;
902 rxm->data_len = pkt_len;
903 rxm->port = rxq->port_id;
905 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
906 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
907 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
908 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
910 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
911 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
912 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
913 rxm->ol_flags = pkt_flags;
914 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
915 lo_dword.hs_rss.pkt_info);
918 * Store the mbuf address into the next entry of the array
919 * of returned packets.
921 rx_pkts[nb_rx++] = rxm;
923 rxq->rx_tail = rx_id;
926 * If the number of free RX descriptors is greater than the RX free
927 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
929 * Update the RDT with the value of the last processed RX descriptor
930 * minus 1, to guarantee that the RDT register is never equal to the
931 * RDH register, which creates a "full" ring situtation from the
932 * hardware point of view...
934 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
935 if (nb_hold > rxq->rx_free_thresh) {
936 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
937 "nb_hold=%u nb_rx=%u",
938 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
939 (unsigned) rx_id, (unsigned) nb_hold,
941 rx_id = (uint16_t) ((rx_id == 0) ?
942 (rxq->nb_rx_desc - 1) : (rx_id - 1));
943 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
946 rxq->nb_rx_hold = nb_hold;
951 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
954 struct igb_rx_queue *rxq;
955 volatile union e1000_adv_rx_desc *rx_ring;
956 volatile union e1000_adv_rx_desc *rxdp;
957 struct igb_rx_entry *sw_ring;
958 struct igb_rx_entry *rxe;
959 struct rte_mbuf *first_seg;
960 struct rte_mbuf *last_seg;
961 struct rte_mbuf *rxm;
962 struct rte_mbuf *nmb;
963 union e1000_adv_rx_desc rxd;
964 uint64_t dma; /* Physical address of mbuf data buffer */
966 uint32_t hlen_type_rss;
976 rx_id = rxq->rx_tail;
977 rx_ring = rxq->rx_ring;
978 sw_ring = rxq->sw_ring;
981 * Retrieve RX context of current packet, if any.
983 first_seg = rxq->pkt_first_seg;
984 last_seg = rxq->pkt_last_seg;
986 while (nb_rx < nb_pkts) {
989 * The order of operations here is important as the DD status
990 * bit must not be read after any other descriptor fields.
991 * rx_ring and rxdp are pointing to volatile data so the order
992 * of accesses cannot be reordered by the compiler. If they were
993 * not volatile, they could be reordered which could lead to
994 * using invalid descriptor fields when read from rxd.
996 rxdp = &rx_ring[rx_id];
997 staterr = rxdp->wb.upper.status_error;
998 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1005 * Allocate a new mbuf to replenish the RX ring descriptor.
1006 * If the allocation fails:
1007 * - arrange for that RX descriptor to be the first one
1008 * being parsed the next time the receive function is
1009 * invoked [on the same queue].
1011 * - Stop parsing the RX ring and return immediately.
1013 * This policy does not drop the packet received in the RX
1014 * descriptor for which the allocation of a new mbuf failed.
1015 * Thus, it allows that packet to be later retrieved if
1016 * mbuf have been freed in the mean time.
1017 * As a side effect, holding RX descriptors instead of
1018 * systematically giving them back to the NIC may lead to
1019 * RX ring exhaustion situations.
1020 * However, the NIC can gracefully prevent such situations
1021 * to happen by sending specific "back-pressure" flow control
1022 * frames to its peer(s).
1024 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1025 "staterr=0x%x data_len=%u",
1026 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1027 (unsigned) rx_id, (unsigned) staterr,
1028 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1030 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1032 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1033 "queue_id=%u", (unsigned) rxq->port_id,
1034 (unsigned) rxq->queue_id);
1035 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1040 rxe = &sw_ring[rx_id];
1042 if (rx_id == rxq->nb_rx_desc)
1045 /* Prefetch next mbuf while processing current one. */
1046 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1049 * When next RX descriptor is on a cache-line boundary,
1050 * prefetch the next 4 RX descriptors and the next 8 pointers
1053 if ((rx_id & 0x3) == 0) {
1054 rte_igb_prefetch(&rx_ring[rx_id]);
1055 rte_igb_prefetch(&sw_ring[rx_id]);
1059 * Update RX descriptor with the physical address of the new
1060 * data buffer of the new allocated mbuf.
1064 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1065 rxdp->read.pkt_addr = dma;
1066 rxdp->read.hdr_addr = 0;
1069 * Set data length & data buffer address of mbuf.
1071 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1072 rxm->data_len = data_len;
1073 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1076 * If this is the first buffer of the received packet,
1077 * set the pointer to the first mbuf of the packet and
1078 * initialize its context.
1079 * Otherwise, update the total length and the number of segments
1080 * of the current scattered packet, and update the pointer to
1081 * the last mbuf of the current packet.
1083 if (first_seg == NULL) {
1085 first_seg->pkt_len = data_len;
1086 first_seg->nb_segs = 1;
1088 first_seg->pkt_len += data_len;
1089 first_seg->nb_segs++;
1090 last_seg->next = rxm;
1094 * If this is not the last buffer of the received packet,
1095 * update the pointer to the last mbuf of the current scattered
1096 * packet and continue to parse the RX ring.
1098 if (! (staterr & E1000_RXD_STAT_EOP)) {
1104 * This is the last buffer of the received packet.
1105 * If the CRC is not stripped by the hardware:
1106 * - Subtract the CRC length from the total packet length.
1107 * - If the last buffer only contains the whole CRC or a part
1108 * of it, free the mbuf associated to the last buffer.
1109 * If part of the CRC is also contained in the previous
1110 * mbuf, subtract the length of that CRC part from the
1111 * data length of the previous mbuf.
1114 if (unlikely(rxq->crc_len > 0)) {
1115 first_seg->pkt_len -= ETHER_CRC_LEN;
1116 if (data_len <= ETHER_CRC_LEN) {
1117 rte_pktmbuf_free_seg(rxm);
1118 first_seg->nb_segs--;
1119 last_seg->data_len = (uint16_t)
1120 (last_seg->data_len -
1121 (ETHER_CRC_LEN - data_len));
1122 last_seg->next = NULL;
1125 (uint16_t) (data_len - ETHER_CRC_LEN);
1129 * Initialize the first mbuf of the returned packet:
1130 * - RX port identifier,
1131 * - hardware offload data, if any:
1132 * - RSS flag & hash,
1133 * - IP checksum flag,
1134 * - VLAN TCI, if any,
1137 first_seg->port = rxq->port_id;
1138 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1141 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1142 * set in the pkt_flags field.
1144 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1145 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1146 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1147 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1148 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1149 first_seg->ol_flags = pkt_flags;
1150 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1151 lower.lo_dword.hs_rss.pkt_info);
1153 /* Prefetch data of first segment, if configured to do so. */
1154 rte_packet_prefetch((char *)first_seg->buf_addr +
1155 first_seg->data_off);
1158 * Store the mbuf address into the next entry of the array
1159 * of returned packets.
1161 rx_pkts[nb_rx++] = first_seg;
1164 * Setup receipt context for a new packet.
1170 * Record index of the next RX descriptor to probe.
1172 rxq->rx_tail = rx_id;
1175 * Save receive context.
1177 rxq->pkt_first_seg = first_seg;
1178 rxq->pkt_last_seg = last_seg;
1181 * If the number of free RX descriptors is greater than the RX free
1182 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1184 * Update the RDT with the value of the last processed RX descriptor
1185 * minus 1, to guarantee that the RDT register is never equal to the
1186 * RDH register, which creates a "full" ring situtation from the
1187 * hardware point of view...
1189 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1190 if (nb_hold > rxq->rx_free_thresh) {
1191 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1192 "nb_hold=%u nb_rx=%u",
1193 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1194 (unsigned) rx_id, (unsigned) nb_hold,
1196 rx_id = (uint16_t) ((rx_id == 0) ?
1197 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1198 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1201 rxq->nb_rx_hold = nb_hold;
1206 * Maximum number of Ring Descriptors.
1208 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1209 * desscriptors should meet the following condition:
1210 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1212 static const struct rte_memzone *
1213 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1214 uint16_t queue_id, uint32_t ring_size, int socket_id)
1216 char z_name[RTE_MEMZONE_NAMESIZE];
1217 const struct rte_memzone *mz;
1219 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1220 dev->driver->pci_drv.name, ring_name,
1221 dev->data->port_id, queue_id);
1222 mz = rte_memzone_lookup(z_name);
1226 #ifdef RTE_LIBRTE_XEN_DOM0
1227 return rte_memzone_reserve_bounded(z_name, ring_size,
1228 socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);
1230 return rte_memzone_reserve_aligned(z_name, ring_size,
1231 socket_id, 0, E1000_ALIGN);
1236 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1240 if (txq->sw_ring != NULL) {
1241 for (i = 0; i < txq->nb_tx_desc; i++) {
1242 if (txq->sw_ring[i].mbuf != NULL) {
1243 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1244 txq->sw_ring[i].mbuf = NULL;
1251 igb_tx_queue_release(struct igb_tx_queue *txq)
1254 igb_tx_queue_release_mbufs(txq);
1255 rte_free(txq->sw_ring);
1261 eth_igb_tx_queue_release(void *txq)
1263 igb_tx_queue_release(txq);
1267 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1272 memset((void*)&txq->ctx_cache, 0,
1273 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1277 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1279 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1280 struct igb_tx_entry *txe = txq->sw_ring;
1282 struct e1000_hw *hw;
1284 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1285 /* Zero out HW ring memory */
1286 for (i = 0; i < txq->nb_tx_desc; i++) {
1287 txq->tx_ring[i] = zeroed_desc;
1290 /* Initialize ring entries */
1291 prev = (uint16_t)(txq->nb_tx_desc - 1);
1292 for (i = 0; i < txq->nb_tx_desc; i++) {
1293 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1295 txd->wb.status = E1000_TXD_STAT_DD;
1298 txe[prev].next_id = i;
1302 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1303 /* 82575 specific, each tx queue will use 2 hw contexts */
1304 if (hw->mac.type == e1000_82575)
1305 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1307 igb_reset_tx_queue_stat(txq);
1311 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1314 unsigned int socket_id,
1315 const struct rte_eth_txconf *tx_conf)
1317 const struct rte_memzone *tz;
1318 struct igb_tx_queue *txq;
1319 struct e1000_hw *hw;
1322 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1325 * Validate number of transmit descriptors.
1326 * It must not exceed hardware maximum, and must be multiple
1329 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1330 (nb_desc > E1000_MAX_RING_DESC) ||
1331 (nb_desc < E1000_MIN_RING_DESC)) {
1336 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1339 if (tx_conf->tx_free_thresh != 0)
1340 PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
1341 "used for the 1G driver.");
1342 if (tx_conf->tx_rs_thresh != 0)
1343 PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
1344 "used for the 1G driver.");
1345 if (tx_conf->tx_thresh.wthresh == 0)
1346 PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
1347 "consider setting the TX WTHRESH value to 4, 8, "
1350 /* Free memory prior to re-allocation if needed */
1351 if (dev->data->tx_queues[queue_idx] != NULL) {
1352 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1353 dev->data->tx_queues[queue_idx] = NULL;
1356 /* First allocate the tx queue data structure */
1357 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1358 RTE_CACHE_LINE_SIZE);
1363 * Allocate TX ring hardware descriptors. A memzone large enough to
1364 * handle the maximum ring size is allocated in order to allow for
1365 * resizing in later calls to the queue setup function.
1367 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1368 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1371 igb_tx_queue_release(txq);
1375 txq->nb_tx_desc = nb_desc;
1376 txq->pthresh = tx_conf->tx_thresh.pthresh;
1377 txq->hthresh = tx_conf->tx_thresh.hthresh;
1378 txq->wthresh = tx_conf->tx_thresh.wthresh;
1379 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1381 txq->queue_id = queue_idx;
1382 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1383 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1384 txq->port_id = dev->data->port_id;
1386 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1387 #ifndef RTE_LIBRTE_XEN_DOM0
1388 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1390 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1392 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1393 /* Allocate software ring */
1394 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1395 sizeof(struct igb_tx_entry) * nb_desc,
1396 RTE_CACHE_LINE_SIZE);
1397 if (txq->sw_ring == NULL) {
1398 igb_tx_queue_release(txq);
1401 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1402 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1404 igb_reset_tx_queue(txq, dev);
1405 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1406 dev->data->tx_queues[queue_idx] = txq;
1412 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1416 if (rxq->sw_ring != NULL) {
1417 for (i = 0; i < rxq->nb_rx_desc; i++) {
1418 if (rxq->sw_ring[i].mbuf != NULL) {
1419 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1420 rxq->sw_ring[i].mbuf = NULL;
1427 igb_rx_queue_release(struct igb_rx_queue *rxq)
1430 igb_rx_queue_release_mbufs(rxq);
1431 rte_free(rxq->sw_ring);
1437 eth_igb_rx_queue_release(void *rxq)
1439 igb_rx_queue_release(rxq);
1443 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1445 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1448 /* Zero out HW ring memory */
1449 for (i = 0; i < rxq->nb_rx_desc; i++) {
1450 rxq->rx_ring[i] = zeroed_desc;
1454 rxq->pkt_first_seg = NULL;
1455 rxq->pkt_last_seg = NULL;
1459 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1462 unsigned int socket_id,
1463 const struct rte_eth_rxconf *rx_conf,
1464 struct rte_mempool *mp)
1466 const struct rte_memzone *rz;
1467 struct igb_rx_queue *rxq;
1468 struct e1000_hw *hw;
1471 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1474 * Validate number of receive descriptors.
1475 * It must not exceed hardware maximum, and must be multiple
1478 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1479 (nb_desc > E1000_MAX_RING_DESC) ||
1480 (nb_desc < E1000_MIN_RING_DESC)) {
1484 /* Free memory prior to re-allocation if needed */
1485 if (dev->data->rx_queues[queue_idx] != NULL) {
1486 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1487 dev->data->rx_queues[queue_idx] = NULL;
1490 /* First allocate the RX queue data structure. */
1491 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1492 RTE_CACHE_LINE_SIZE);
1496 rxq->nb_rx_desc = nb_desc;
1497 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1498 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1499 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1500 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1502 rxq->drop_en = rx_conf->rx_drop_en;
1503 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1504 rxq->queue_id = queue_idx;
1505 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1506 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1507 rxq->port_id = dev->data->port_id;
1508 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1512 * Allocate RX ring hardware descriptors. A memzone large enough to
1513 * handle the maximum ring size is allocated in order to allow for
1514 * resizing in later calls to the queue setup function.
1516 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1517 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1519 igb_rx_queue_release(rxq);
1522 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1523 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1524 #ifndef RTE_LIBRTE_XEN_DOM0
1525 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1527 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1529 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1531 /* Allocate software ring. */
1532 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1533 sizeof(struct igb_rx_entry) * nb_desc,
1534 RTE_CACHE_LINE_SIZE);
1535 if (rxq->sw_ring == NULL) {
1536 igb_rx_queue_release(rxq);
1539 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1540 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1542 dev->data->rx_queues[queue_idx] = rxq;
1543 igb_reset_rx_queue(rxq);
1549 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1551 #define IGB_RXQ_SCAN_INTERVAL 4
1552 volatile union e1000_adv_rx_desc *rxdp;
1553 struct igb_rx_queue *rxq;
1556 if (rx_queue_id >= dev->data->nb_rx_queues) {
1557 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1561 rxq = dev->data->rx_queues[rx_queue_id];
1562 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1564 while ((desc < rxq->nb_rx_desc) &&
1565 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1566 desc += IGB_RXQ_SCAN_INTERVAL;
1567 rxdp += IGB_RXQ_SCAN_INTERVAL;
1568 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1569 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1570 desc - rxq->nb_rx_desc]);
1577 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1579 volatile union e1000_adv_rx_desc *rxdp;
1580 struct igb_rx_queue *rxq = rx_queue;
1583 if (unlikely(offset >= rxq->nb_rx_desc))
1585 desc = rxq->rx_tail + offset;
1586 if (desc >= rxq->nb_rx_desc)
1587 desc -= rxq->nb_rx_desc;
1589 rxdp = &rxq->rx_ring[desc];
1590 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1594 igb_dev_clear_queues(struct rte_eth_dev *dev)
1597 struct igb_tx_queue *txq;
1598 struct igb_rx_queue *rxq;
1600 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1601 txq = dev->data->tx_queues[i];
1603 igb_tx_queue_release_mbufs(txq);
1604 igb_reset_tx_queue(txq, dev);
1608 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1609 rxq = dev->data->rx_queues[i];
1611 igb_rx_queue_release_mbufs(rxq);
1612 igb_reset_rx_queue(rxq);
1618 igb_dev_free_queues(struct rte_eth_dev *dev)
1622 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1623 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1624 dev->data->rx_queues[i] = NULL;
1626 dev->data->nb_rx_queues = 0;
1628 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1629 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1630 dev->data->tx_queues[i] = NULL;
1632 dev->data->nb_tx_queues = 0;
1636 * Receive Side Scaling (RSS).
1637 * See section 7.1.1.7 in the following document:
1638 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1641 * The source and destination IP addresses of the IP header and the source and
1642 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1643 * against a configurable random key to compute a 32-bit RSS hash result.
1644 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1645 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1646 * RSS output index which is used as the RX queue index where to store the
1648 * The following output is supplied in the RX write-back descriptor:
1649 * - 32-bit result of the Microsoft RSS hash function,
1650 * - 4-bit RSS type field.
1654 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1655 * Used as the default key.
1657 static uint8_t rss_intel_key[40] = {
1658 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1659 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1660 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1661 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1662 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1666 igb_rss_disable(struct rte_eth_dev *dev)
1668 struct e1000_hw *hw;
1671 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1673 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1674 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1678 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1686 hash_key = rss_conf->rss_key;
1687 if (hash_key != NULL) {
1688 /* Fill in RSS hash key */
1689 for (i = 0; i < 10; i++) {
1690 rss_key = hash_key[(i * 4)];
1691 rss_key |= hash_key[(i * 4) + 1] << 8;
1692 rss_key |= hash_key[(i * 4) + 2] << 16;
1693 rss_key |= hash_key[(i * 4) + 3] << 24;
1694 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1698 /* Set configured hashing protocols in MRQC register */
1699 rss_hf = rss_conf->rss_hf;
1700 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1701 if (rss_hf & ETH_RSS_IPV4)
1702 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1703 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1704 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1705 if (rss_hf & ETH_RSS_IPV6)
1706 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1707 if (rss_hf & ETH_RSS_IPV6_EX)
1708 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1709 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1710 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1711 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1712 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1713 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1714 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1715 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1716 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1717 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1718 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1719 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1723 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1724 struct rte_eth_rss_conf *rss_conf)
1726 struct e1000_hw *hw;
1730 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733 * Before changing anything, first check that the update RSS operation
1734 * does not attempt to disable RSS, if RSS was enabled at
1735 * initialization time, or does not attempt to enable RSS, if RSS was
1736 * disabled at initialization time.
1738 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1739 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1740 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1741 if (rss_hf != 0) /* Enable RSS */
1743 return 0; /* Nothing to do */
1746 if (rss_hf == 0) /* Disable RSS */
1748 igb_hw_rss_hash_set(hw, rss_conf);
1752 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1753 struct rte_eth_rss_conf *rss_conf)
1755 struct e1000_hw *hw;
1762 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1763 hash_key = rss_conf->rss_key;
1764 if (hash_key != NULL) {
1765 /* Return RSS hash key */
1766 for (i = 0; i < 10; i++) {
1767 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1768 hash_key[(i * 4)] = rss_key & 0x000000FF;
1769 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1770 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1771 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1775 /* Get RSS functions configured in MRQC register */
1776 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1777 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1778 rss_conf->rss_hf = 0;
1782 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1783 rss_hf |= ETH_RSS_IPV4;
1784 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1785 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1786 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1787 rss_hf |= ETH_RSS_IPV6;
1788 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1789 rss_hf |= ETH_RSS_IPV6_EX;
1790 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1791 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1792 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1793 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1794 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1795 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1796 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1797 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1798 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1799 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1800 rss_conf->rss_hf = rss_hf;
1805 igb_rss_configure(struct rte_eth_dev *dev)
1807 struct rte_eth_rss_conf rss_conf;
1808 struct e1000_hw *hw;
1812 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1814 /* Fill in redirection table. */
1815 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1816 for (i = 0; i < 128; i++) {
1823 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1824 i % dev->data->nb_rx_queues : 0);
1825 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1827 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1831 * Configure the RSS key and the RSS protocols used to compute
1832 * the RSS hash of input packets.
1834 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1835 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1836 igb_rss_disable(dev);
1839 if (rss_conf.rss_key == NULL)
1840 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1841 igb_hw_rss_hash_set(hw, &rss_conf);
1845 * Check if the mac type support VMDq or not.
1846 * Return 1 if it supports, otherwise, return 0.
1849 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1851 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1853 switch (hw->mac.type) {
1874 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1880 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1882 struct rte_eth_vmdq_rx_conf *cfg;
1883 struct e1000_hw *hw;
1884 uint32_t mrqc, vt_ctl, vmolr, rctl;
1887 PMD_INIT_FUNC_TRACE();
1889 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1890 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1892 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1893 if (igb_is_vmdq_supported(dev) == 0)
1896 igb_rss_disable(dev);
1898 /* RCTL: eanble VLAN filter */
1899 rctl = E1000_READ_REG(hw, E1000_RCTL);
1900 rctl |= E1000_RCTL_VFE;
1901 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1903 /* MRQC: enable vmdq */
1904 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1905 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1906 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1908 /* VTCTL: pool selection according to VLAN tag */
1909 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1910 if (cfg->enable_default_pool)
1911 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1912 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1913 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1915 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1916 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1917 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1918 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1921 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1922 vmolr |= E1000_VMOLR_AUPE;
1923 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1924 vmolr |= E1000_VMOLR_ROMPE;
1925 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1926 vmolr |= E1000_VMOLR_ROPE;
1927 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1928 vmolr |= E1000_VMOLR_BAM;
1929 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1930 vmolr |= E1000_VMOLR_MPME;
1932 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1936 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1937 * Both 82576 and 82580 support it
1939 if (hw->mac.type != e1000_i350) {
1940 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1941 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1942 vmolr |= E1000_VMOLR_STRVLAN;
1943 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1947 /* VFTA - enable all vlan filters */
1948 for (i = 0; i < IGB_VFTA_SIZE; i++)
1949 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1951 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1952 if (hw->mac.type != e1000_82580)
1953 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1956 * RAH/RAL - allow pools to read specific mac addresses
1957 * In this case, all pools should be able to read from mac addr 0
1959 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1960 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1962 /* VLVF: set up filters for vlan tags as configured */
1963 for (i = 0; i < cfg->nb_pool_maps; i++) {
1964 /* set vlan id in VF register and set the valid bit */
1965 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1966 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1967 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1968 E1000_VLVF_POOLSEL_MASK)));
1971 E1000_WRITE_FLUSH(hw);
1977 /*********************************************************************
1979 * Enable receive unit.
1981 **********************************************************************/
1984 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1986 struct igb_rx_entry *rxe = rxq->sw_ring;
1990 /* Initialize software ring entries. */
1991 for (i = 0; i < rxq->nb_rx_desc; i++) {
1992 volatile union e1000_adv_rx_desc *rxd;
1993 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1996 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1997 "queue_id=%hu", rxq->queue_id);
2001 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
2002 rxd = &rxq->rx_ring[i];
2003 rxd->read.hdr_addr = 0;
2004 rxd->read.pkt_addr = dma_addr;
2011 #define E1000_MRQC_DEF_Q_SHIFT (3)
2013 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2015 struct e1000_hw *hw =
2016 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2019 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2021 * SRIOV active scheme
2022 * FIXME if support RSS together with VMDq & SRIOV
2024 mrqc = E1000_MRQC_ENABLE_VMDQ;
2025 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2026 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2027 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2028 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2030 * SRIOV inactive scheme
2032 switch (dev->data->dev_conf.rxmode.mq_mode) {
2034 igb_rss_configure(dev);
2036 case ETH_MQ_RX_VMDQ_ONLY:
2037 /*Configure general VMDQ only RX parameters*/
2038 igb_vmdq_rx_hw_configure(dev);
2040 case ETH_MQ_RX_NONE:
2041 /* if mq_mode is none, disable rss mode.*/
2043 igb_rss_disable(dev);
2052 eth_igb_rx_init(struct rte_eth_dev *dev)
2054 struct e1000_hw *hw;
2055 struct igb_rx_queue *rxq;
2060 uint16_t rctl_bsize;
2064 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2068 * Make sure receives are disabled while setting
2069 * up the descriptor ring.
2071 rctl = E1000_READ_REG(hw, E1000_RCTL);
2072 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2075 * Configure support of jumbo frames, if any.
2077 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2078 rctl |= E1000_RCTL_LPE;
2081 * Set maximum packet length by default, and might be updated
2082 * together with enabling/disabling dual VLAN.
2084 E1000_WRITE_REG(hw, E1000_RLPML,
2085 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2088 rctl &= ~E1000_RCTL_LPE;
2090 /* Configure and enable each RX queue. */
2092 dev->rx_pkt_burst = eth_igb_recv_pkts;
2093 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2097 rxq = dev->data->rx_queues[i];
2099 /* Allocate buffers for descriptor rings and set up queue */
2100 ret = igb_alloc_rx_queue_mbufs(rxq);
2105 * Reset crc_len in case it was changed after queue setup by a
2109 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2112 bus_addr = rxq->rx_ring_phys_addr;
2113 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2115 sizeof(union e1000_adv_rx_desc));
2116 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2117 (uint32_t)(bus_addr >> 32));
2118 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2120 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2123 * Configure RX buffer size.
2125 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2126 RTE_PKTMBUF_HEADROOM);
2127 if (buf_size >= 1024) {
2129 * Configure the BSIZEPACKET field of the SRRCTL
2130 * register of the queue.
2131 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2132 * If this field is equal to 0b, then RCTL.BSIZE
2133 * determines the RX packet buffer size.
2135 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2136 E1000_SRRCTL_BSIZEPKT_MASK);
2137 buf_size = (uint16_t) ((srrctl &
2138 E1000_SRRCTL_BSIZEPKT_MASK) <<
2139 E1000_SRRCTL_BSIZEPKT_SHIFT);
2141 /* It adds dual VLAN length for supporting dual VLAN */
2142 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2143 2 * VLAN_TAG_SIZE) > buf_size){
2144 if (!dev->data->scattered_rx)
2146 "forcing scatter mode");
2147 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2148 dev->data->scattered_rx = 1;
2152 * Use BSIZE field of the device RCTL register.
2154 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2155 rctl_bsize = buf_size;
2156 if (!dev->data->scattered_rx)
2157 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2158 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2159 dev->data->scattered_rx = 1;
2162 /* Set if packets are dropped when no descriptors available */
2164 srrctl |= E1000_SRRCTL_DROP_EN;
2166 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2168 /* Enable this RX queue. */
2169 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2170 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2171 rxdctl &= 0xFFF00000;
2172 rxdctl |= (rxq->pthresh & 0x1F);
2173 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2174 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2175 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2178 if (dev->data->dev_conf.rxmode.enable_scatter) {
2179 if (!dev->data->scattered_rx)
2180 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2181 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2182 dev->data->scattered_rx = 1;
2186 * Setup BSIZE field of RCTL register, if needed.
2187 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2188 * register, since the code above configures the SRRCTL register of
2189 * the RX queue in such a case.
2190 * All configurable sizes are:
2191 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2192 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2193 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2194 * 2048: rctl |= E1000_RCTL_SZ_2048;
2195 * 1024: rctl |= E1000_RCTL_SZ_1024;
2196 * 512: rctl |= E1000_RCTL_SZ_512;
2197 * 256: rctl |= E1000_RCTL_SZ_256;
2199 if (rctl_bsize > 0) {
2200 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2201 rctl |= E1000_RCTL_SZ_512;
2202 else /* 256 <= buf_size < 512 - use 256 */
2203 rctl |= E1000_RCTL_SZ_256;
2207 * Configure RSS if device configured with multiple RX queues.
2209 igb_dev_mq_rx_configure(dev);
2211 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2212 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2215 * Setup the Checksum Register.
2216 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2218 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2219 rxcsum |= E1000_RXCSUM_PCSD;
2221 /* Enable both L3/L4 rx checksum offload */
2222 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2223 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2225 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2226 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2228 /* Setup the Receive Control Register. */
2229 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2230 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2232 /* set STRCRC bit in all queues */
2233 if (hw->mac.type == e1000_i350 ||
2234 hw->mac.type == e1000_i210 ||
2235 hw->mac.type == e1000_i211 ||
2236 hw->mac.type == e1000_i354) {
2237 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2238 rxq = dev->data->rx_queues[i];
2239 uint32_t dvmolr = E1000_READ_REG(hw,
2240 E1000_DVMOLR(rxq->reg_idx));
2241 dvmolr |= E1000_DVMOLR_STRCRC;
2242 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2246 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2248 /* clear STRCRC bit in all queues */
2249 if (hw->mac.type == e1000_i350 ||
2250 hw->mac.type == e1000_i210 ||
2251 hw->mac.type == e1000_i211 ||
2252 hw->mac.type == e1000_i354) {
2253 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2254 rxq = dev->data->rx_queues[i];
2255 uint32_t dvmolr = E1000_READ_REG(hw,
2256 E1000_DVMOLR(rxq->reg_idx));
2257 dvmolr &= ~E1000_DVMOLR_STRCRC;
2258 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2263 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2264 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2265 E1000_RCTL_RDMTS_HALF |
2266 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2268 /* Make sure VLAN Filters are off. */
2269 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2270 rctl &= ~E1000_RCTL_VFE;
2271 /* Don't store bad packets. */
2272 rctl &= ~E1000_RCTL_SBP;
2274 /* Enable Receives. */
2275 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2278 * Setup the HW Rx Head and Tail Descriptor Pointers.
2279 * This needs to be done after enable.
2281 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2282 rxq = dev->data->rx_queues[i];
2283 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2284 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2290 /*********************************************************************
2292 * Enable transmit unit.
2294 **********************************************************************/
2296 eth_igb_tx_init(struct rte_eth_dev *dev)
2298 struct e1000_hw *hw;
2299 struct igb_tx_queue *txq;
2304 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2306 /* Setup the Base and Length of the Tx Descriptor Rings. */
2307 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2309 txq = dev->data->tx_queues[i];
2310 bus_addr = txq->tx_ring_phys_addr;
2312 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2314 sizeof(union e1000_adv_tx_desc));
2315 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2316 (uint32_t)(bus_addr >> 32));
2317 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2319 /* Setup the HW Tx Head and Tail descriptor pointers. */
2320 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2321 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2323 /* Setup Transmit threshold registers. */
2324 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2325 txdctl |= txq->pthresh & 0x1F;
2326 txdctl |= ((txq->hthresh & 0x1F) << 8);
2327 txdctl |= ((txq->wthresh & 0x1F) << 16);
2328 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2329 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2332 /* Program the Transmit Control Register. */
2333 tctl = E1000_READ_REG(hw, E1000_TCTL);
2334 tctl &= ~E1000_TCTL_CT;
2335 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2336 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2338 e1000_config_collision_dist(hw);
2340 /* This write will effectively turn on the transmit unit. */
2341 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2344 /*********************************************************************
2346 * Enable VF receive unit.
2348 **********************************************************************/
2350 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2352 struct e1000_hw *hw;
2353 struct igb_rx_queue *rxq;
2356 uint16_t rctl_bsize;
2360 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363 e1000_rlpml_set_vf(hw,
2364 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2367 /* Configure and enable each RX queue. */
2369 dev->rx_pkt_burst = eth_igb_recv_pkts;
2370 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2374 rxq = dev->data->rx_queues[i];
2376 /* Allocate buffers for descriptor rings and set up queue */
2377 ret = igb_alloc_rx_queue_mbufs(rxq);
2381 bus_addr = rxq->rx_ring_phys_addr;
2382 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2384 sizeof(union e1000_adv_rx_desc));
2385 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2386 (uint32_t)(bus_addr >> 32));
2387 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2389 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2392 * Configure RX buffer size.
2394 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2395 RTE_PKTMBUF_HEADROOM);
2396 if (buf_size >= 1024) {
2398 * Configure the BSIZEPACKET field of the SRRCTL
2399 * register of the queue.
2400 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2401 * If this field is equal to 0b, then RCTL.BSIZE
2402 * determines the RX packet buffer size.
2404 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2405 E1000_SRRCTL_BSIZEPKT_MASK);
2406 buf_size = (uint16_t) ((srrctl &
2407 E1000_SRRCTL_BSIZEPKT_MASK) <<
2408 E1000_SRRCTL_BSIZEPKT_SHIFT);
2410 /* It adds dual VLAN length for supporting dual VLAN */
2411 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2412 2 * VLAN_TAG_SIZE) > buf_size){
2413 if (!dev->data->scattered_rx)
2415 "forcing scatter mode");
2416 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2417 dev->data->scattered_rx = 1;
2421 * Use BSIZE field of the device RCTL register.
2423 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2424 rctl_bsize = buf_size;
2425 if (!dev->data->scattered_rx)
2426 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2427 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2428 dev->data->scattered_rx = 1;
2431 /* Set if packets are dropped when no descriptors available */
2433 srrctl |= E1000_SRRCTL_DROP_EN;
2435 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2437 /* Enable this RX queue. */
2438 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2439 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2440 rxdctl &= 0xFFF00000;
2441 rxdctl |= (rxq->pthresh & 0x1F);
2442 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2443 if (hw->mac.type == e1000_vfadapt) {
2445 * Workaround of 82576 VF Erratum
2446 * force set WTHRESH to 1
2447 * to avoid Write-Back not triggered sometimes
2450 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2453 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2454 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2457 if (dev->data->dev_conf.rxmode.enable_scatter) {
2458 if (!dev->data->scattered_rx)
2459 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2460 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2461 dev->data->scattered_rx = 1;
2465 * Setup the HW Rx Head and Tail Descriptor Pointers.
2466 * This needs to be done after enable.
2468 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2469 rxq = dev->data->rx_queues[i];
2470 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2471 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2477 /*********************************************************************
2479 * Enable VF transmit unit.
2481 **********************************************************************/
2483 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2485 struct e1000_hw *hw;
2486 struct igb_tx_queue *txq;
2490 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2492 /* Setup the Base and Length of the Tx Descriptor Rings. */
2493 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2496 txq = dev->data->tx_queues[i];
2497 bus_addr = txq->tx_ring_phys_addr;
2498 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2500 sizeof(union e1000_adv_tx_desc));
2501 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2502 (uint32_t)(bus_addr >> 32));
2503 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2505 /* Setup the HW Tx Head and Tail descriptor pointers. */
2506 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2507 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2509 /* Setup Transmit threshold registers. */
2510 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2511 txdctl |= txq->pthresh & 0x1F;
2512 txdctl |= ((txq->hthresh & 0x1F) << 8);
2513 if (hw->mac.type == e1000_82576) {
2515 * Workaround of 82576 VF Erratum
2516 * force set WTHRESH to 1
2517 * to avoid Write-Back not triggered sometimes
2520 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2523 txdctl |= ((txq->wthresh & 0x1F) << 16);
2524 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2525 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2531 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2532 struct rte_eth_rxq_info *qinfo)
2534 struct igb_rx_queue *rxq;
2536 rxq = dev->data->rx_queues[queue_id];
2538 qinfo->mp = rxq->mb_pool;
2539 qinfo->scattered_rx = dev->data->scattered_rx;
2540 qinfo->nb_desc = rxq->nb_rx_desc;
2542 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2543 qinfo->conf.rx_drop_en = rxq->drop_en;
2547 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2548 struct rte_eth_txq_info *qinfo)
2550 struct igb_tx_queue *txq;
2552 txq = dev->data->tx_queues[queue_id];
2554 qinfo->nb_desc = txq->nb_tx_desc;
2556 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2557 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2558 qinfo->conf.tx_thresh.wthresh = txq->wthresh;