4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK ( \
82 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
83 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
86 * Structure associated with each descriptor of the RX ring of a RX queue.
89 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
93 * Structure associated with each descriptor of the TX ring of a TX queue.
96 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
97 uint16_t next_id; /**< Index of next descriptor in ring. */
98 uint16_t last_id; /**< Index of last scattered descriptor. */
102 * Structure associated with each RX queue.
104 struct igb_rx_queue {
105 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
106 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
107 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
108 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
109 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
110 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
111 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
112 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
113 uint16_t nb_rx_desc; /**< number of RX descriptors. */
114 uint16_t rx_tail; /**< current value of RDT register. */
115 uint16_t nb_rx_hold; /**< number of held free RX desc. */
116 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
117 uint16_t queue_id; /**< RX queue index. */
118 uint16_t reg_idx; /**< RX queue register index. */
119 uint8_t port_id; /**< Device port identifier. */
120 uint8_t pthresh; /**< Prefetch threshold register. */
121 uint8_t hthresh; /**< Host threshold register. */
122 uint8_t wthresh; /**< Write-back threshold register. */
123 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
124 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
128 * Hardware context number
130 enum igb_advctx_num {
131 IGB_CTX_0 = 0, /**< CTX0 */
132 IGB_CTX_1 = 1, /**< CTX1 */
133 IGB_CTX_NUM = 2, /**< CTX_NUM */
136 /** Offload features */
137 union igb_tx_offload {
140 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
141 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
142 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
143 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
144 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
146 /* uint64_t unused:8; */
151 * Compare mask for igb_tx_offload.data,
152 * should be in sync with igb_tx_offload layout.
154 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
155 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
156 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
157 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
158 /** Mac + IP + TCP + Mss mask. */
159 #define TX_TSO_CMP_MASK \
160 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
163 * Strucutre to check if new context need be built
165 struct igb_advctx_info {
166 uint64_t flags; /**< ol_flags related to context build. */
167 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
168 union igb_tx_offload tx_offload;
169 /** compare mask for tx offload. */
170 union igb_tx_offload tx_offload_mask;
174 * Structure associated with each TX queue.
176 struct igb_tx_queue {
177 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
178 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
179 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
180 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
181 uint32_t txd_type; /**< Device-specific TXD type */
182 uint16_t nb_tx_desc; /**< number of TX descriptors. */
183 uint16_t tx_tail; /**< Current value of TDT register. */
185 /**< Index of first used TX descriptor. */
186 uint16_t queue_id; /**< TX queue index. */
187 uint16_t reg_idx; /**< TX queue register index. */
188 uint8_t port_id; /**< Device port identifier. */
189 uint8_t pthresh; /**< Prefetch threshold register. */
190 uint8_t hthresh; /**< Host threshold register. */
191 uint8_t wthresh; /**< Write-back threshold register. */
193 /**< Current used hardware descriptor. */
195 /**< Start context position for transmit queue. */
196 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
197 /**< Hardware context history.*/
201 #define RTE_PMD_USE_PREFETCH
204 #ifdef RTE_PMD_USE_PREFETCH
205 #define rte_igb_prefetch(p) rte_prefetch0(p)
207 #define rte_igb_prefetch(p) do {} while(0)
210 #ifdef RTE_PMD_PACKET_PREFETCH
211 #define rte_packet_prefetch(p) rte_prefetch1(p)
213 #define rte_packet_prefetch(p) do {} while(0)
217 * Macro for VMDq feature for 1 GbE NIC.
219 #define E1000_VMOLR_SIZE (8)
220 #define IGB_TSO_MAX_HDRLEN (512)
221 #define IGB_TSO_MAX_MSS (9216)
223 /*********************************************************************
227 **********************************************************************/
230 *There're some limitations in hardware for TCP segmentation offload. We
231 *should check whether the parameters are valid.
233 static inline uint64_t
234 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
236 if (!(ol_req & PKT_TX_TCP_SEG))
238 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
239 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
240 ol_req &= ~PKT_TX_TCP_SEG;
241 ol_req |= PKT_TX_TCP_CKSUM;
247 * Advanced context descriptor are almost same between igb/ixgbe
248 * This is a separate function, looking for optimization opportunity here
249 * Rework required to go with the pre-defined values.
253 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
254 volatile struct e1000_adv_tx_context_desc *ctx_txd,
255 uint64_t ol_flags, union igb_tx_offload tx_offload)
257 uint32_t type_tucmd_mlhl;
258 uint32_t mss_l4len_idx;
259 uint32_t ctx_idx, ctx_curr;
260 uint32_t vlan_macip_lens;
261 union igb_tx_offload tx_offload_mask;
263 ctx_curr = txq->ctx_curr;
264 ctx_idx = ctx_curr + txq->ctx_start;
266 tx_offload_mask.data = 0;
269 /* Specify which HW CTX to upload. */
270 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
272 if (ol_flags & PKT_TX_VLAN_PKT)
273 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
275 /* check if TCP segmentation required for this packet */
276 if (ol_flags & PKT_TX_TCP_SEG) {
277 /* implies IP cksum in IPv4 */
278 if (ol_flags & PKT_TX_IP_CKSUM)
279 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
280 E1000_ADVTXD_TUCMD_L4T_TCP |
281 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
283 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
284 E1000_ADVTXD_TUCMD_L4T_TCP |
285 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
287 tx_offload_mask.data |= TX_TSO_CMP_MASK;
288 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
289 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
290 } else { /* no TSO, check if hardware checksum is needed */
291 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
292 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
294 if (ol_flags & PKT_TX_IP_CKSUM)
295 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
297 switch (ol_flags & PKT_TX_L4_MASK) {
298 case PKT_TX_UDP_CKSUM:
299 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
300 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
301 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
303 case PKT_TX_TCP_CKSUM:
304 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
305 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
306 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
308 case PKT_TX_SCTP_CKSUM:
309 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
310 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
311 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
314 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
315 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
320 txq->ctx_cache[ctx_curr].flags = ol_flags;
321 txq->ctx_cache[ctx_curr].tx_offload.data =
322 tx_offload_mask.data & tx_offload.data;
323 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
325 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
326 vlan_macip_lens = (uint32_t)tx_offload.data;
327 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
328 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
329 ctx_txd->seqnum_seed = 0;
333 * Check which hardware context can be used. Use the existing match
334 * or create a new context descriptor.
336 static inline uint32_t
337 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
338 union igb_tx_offload tx_offload)
340 /* If match with the current context */
341 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
342 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
343 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
344 return txq->ctx_curr;
347 /* If match with the second context */
349 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
350 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
351 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
352 return txq->ctx_curr;
355 /* Mismatch, use the previous context */
359 static inline uint32_t
360 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
362 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
363 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
366 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
367 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
368 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
372 static inline uint32_t
373 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
376 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
377 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
378 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
379 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
384 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
387 struct igb_tx_queue *txq;
388 struct igb_tx_entry *sw_ring;
389 struct igb_tx_entry *txe, *txn;
390 volatile union e1000_adv_tx_desc *txr;
391 volatile union e1000_adv_tx_desc *txd;
392 struct rte_mbuf *tx_pkt;
393 struct rte_mbuf *m_seg;
394 uint64_t buf_dma_addr;
395 uint32_t olinfo_status;
396 uint32_t cmd_type_len;
405 uint32_t new_ctx = 0;
407 union igb_tx_offload tx_offload = {0};
410 sw_ring = txq->sw_ring;
412 tx_id = txq->tx_tail;
413 txe = &sw_ring[tx_id];
415 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
417 pkt_len = tx_pkt->pkt_len;
419 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
422 * The number of descriptors that must be allocated for a
423 * packet is the number of segments of that packet, plus 1
424 * Context Descriptor for the VLAN Tag Identifier, if any.
425 * Determine the last TX descriptor to allocate in the TX ring
426 * for the packet, starting from the current position (tx_id)
429 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
431 ol_flags = tx_pkt->ol_flags;
432 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
434 /* If a Context Descriptor need be built . */
436 tx_offload.l2_len = tx_pkt->l2_len;
437 tx_offload.l3_len = tx_pkt->l3_len;
438 tx_offload.l4_len = tx_pkt->l4_len;
439 tx_offload.vlan_tci = tx_pkt->vlan_tci;
440 tx_offload.tso_segsz = tx_pkt->tso_segsz;
441 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
443 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
444 /* Only allocate context descriptor if required*/
445 new_ctx = (ctx == IGB_CTX_NUM);
446 ctx = txq->ctx_curr + txq->ctx_start;
447 tx_last = (uint16_t) (tx_last + new_ctx);
449 if (tx_last >= txq->nb_tx_desc)
450 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
452 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
453 " tx_first=%u tx_last=%u",
454 (unsigned) txq->port_id,
455 (unsigned) txq->queue_id,
461 * Check if there are enough free descriptors in the TX ring
462 * to transmit the next packet.
463 * This operation is based on the two following rules:
465 * 1- Only check that the last needed TX descriptor can be
466 * allocated (by construction, if that descriptor is free,
467 * all intermediate ones are also free).
469 * For this purpose, the index of the last TX descriptor
470 * used for a packet (the "last descriptor" of a packet)
471 * is recorded in the TX entries (the last one included)
472 * that are associated with all TX descriptors allocated
475 * 2- Avoid to allocate the last free TX descriptor of the
476 * ring, in order to never set the TDT register with the
477 * same value stored in parallel by the NIC in the TDH
478 * register, which makes the TX engine of the NIC enter
479 * in a deadlock situation.
481 * By extension, avoid to allocate a free descriptor that
482 * belongs to the last set of free descriptors allocated
483 * to the same packet previously transmitted.
487 * The "last descriptor" of the previously sent packet, if any,
488 * which used the last descriptor to allocate.
490 tx_end = sw_ring[tx_last].last_id;
493 * The next descriptor following that "last descriptor" in the
496 tx_end = sw_ring[tx_end].next_id;
499 * The "last descriptor" associated with that next descriptor.
501 tx_end = sw_ring[tx_end].last_id;
504 * Check that this descriptor is free.
506 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
513 * Set common flags of all TX Data Descriptors.
515 * The following bits must be set in all Data Descriptors:
516 * - E1000_ADVTXD_DTYP_DATA
517 * - E1000_ADVTXD_DCMD_DEXT
519 * The following bits must be set in the first Data Descriptor
520 * and are ignored in the other ones:
521 * - E1000_ADVTXD_DCMD_IFCS
522 * - E1000_ADVTXD_MAC_1588
523 * - E1000_ADVTXD_DCMD_VLE
525 * The following bits must only be set in the last Data
527 * - E1000_TXD_CMD_EOP
529 * The following bits can be set in any Data Descriptor, but
530 * are only set in the last Data Descriptor:
533 cmd_type_len = txq->txd_type |
534 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
535 if (tx_ol_req & PKT_TX_TCP_SEG)
536 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
537 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
538 #if defined(RTE_LIBRTE_IEEE1588)
539 if (ol_flags & PKT_TX_IEEE1588_TMST)
540 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
543 /* Setup TX Advanced context descriptor if required */
545 volatile struct e1000_adv_tx_context_desc *
548 ctx_txd = (volatile struct
549 e1000_adv_tx_context_desc *)
552 txn = &sw_ring[txe->next_id];
553 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
555 if (txe->mbuf != NULL) {
556 rte_pktmbuf_free_seg(txe->mbuf);
560 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
562 txe->last_id = tx_last;
563 tx_id = txe->next_id;
567 /* Setup the TX Advanced Data Descriptor */
568 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
569 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
570 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
575 txn = &sw_ring[txe->next_id];
578 if (txe->mbuf != NULL)
579 rte_pktmbuf_free_seg(txe->mbuf);
583 * Set up transmit descriptor.
585 slen = (uint16_t) m_seg->data_len;
586 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
587 txd->read.buffer_addr =
588 rte_cpu_to_le_64(buf_dma_addr);
589 txd->read.cmd_type_len =
590 rte_cpu_to_le_32(cmd_type_len | slen);
591 txd->read.olinfo_status =
592 rte_cpu_to_le_32(olinfo_status);
593 txe->last_id = tx_last;
594 tx_id = txe->next_id;
597 } while (m_seg != NULL);
600 * The last packet data descriptor needs End Of Packet (EOP)
601 * and Report Status (RS).
603 txd->read.cmd_type_len |=
604 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
610 * Set the Transmit Descriptor Tail (TDT).
612 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
613 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
614 (unsigned) txq->port_id, (unsigned) txq->queue_id,
615 (unsigned) tx_id, (unsigned) nb_tx);
616 txq->tx_tail = tx_id;
621 /*********************************************************************
625 **********************************************************************/
627 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
633 for (i = 0; i < nb_pkts; i++) {
636 /* Check some limitations for TSO in hardware */
637 if (m->ol_flags & PKT_TX_TCP_SEG)
638 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
639 (m->l2_len + m->l3_len + m->l4_len >
640 IGB_TSO_MAX_HDRLEN)) {
645 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
646 rte_errno = -ENOTSUP;
650 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
651 ret = rte_validate_tx_offload(m);
657 ret = rte_net_intel_cksum_prepare(m);
667 /*********************************************************************
671 **********************************************************************/
672 #define IGB_PACKET_TYPE_IPV4 0X01
673 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
674 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
675 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
676 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
677 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
678 #define IGB_PACKET_TYPE_IPV6 0X04
679 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
680 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
681 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
682 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
683 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
684 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
685 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
686 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
687 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
688 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
689 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
690 #define IGB_PACKET_TYPE_MAX 0X80
691 #define IGB_PACKET_TYPE_MASK 0X7F
692 #define IGB_PACKET_TYPE_SHIFT 0X04
693 static inline uint32_t
694 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
696 static const uint32_t
697 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
698 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
700 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
701 RTE_PTYPE_L3_IPV4_EXT,
702 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
704 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
705 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
706 RTE_PTYPE_INNER_L3_IPV6,
707 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
708 RTE_PTYPE_L3_IPV6_EXT,
709 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
710 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
711 RTE_PTYPE_INNER_L3_IPV6_EXT,
712 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
713 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
714 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
715 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
716 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
717 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
718 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
719 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
720 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
721 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
722 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
723 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
724 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
725 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
726 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
727 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
728 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
729 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
730 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
731 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
732 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
733 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
734 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
735 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
736 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
737 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
738 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
739 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
741 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
742 return RTE_PTYPE_UNKNOWN;
744 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
746 return ptype_table[pkt_info];
749 static inline uint64_t
750 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
752 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
754 #if defined(RTE_LIBRTE_IEEE1588)
755 static uint32_t ip_pkt_etqf_map[8] = {
756 0, 0, 0, PKT_RX_IEEE1588_PTP,
760 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
761 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
763 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
764 if (hw->mac.type == e1000_i210)
765 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
767 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
775 static inline uint64_t
776 rx_desc_status_to_pkt_flags(uint32_t rx_status)
780 /* Check if VLAN present */
781 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
782 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
784 #if defined(RTE_LIBRTE_IEEE1588)
785 if (rx_status & E1000_RXD_STAT_TMST)
786 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
791 static inline uint64_t
792 rx_desc_error_to_pkt_flags(uint32_t rx_status)
795 * Bit 30: IPE, IPv4 checksum error
796 * Bit 29: L4I, L4I integrity error
799 static uint64_t error_to_pkt_flags_map[4] = {
800 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
801 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
802 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
803 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
805 return error_to_pkt_flags_map[(rx_status >>
806 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
810 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
813 struct igb_rx_queue *rxq;
814 volatile union e1000_adv_rx_desc *rx_ring;
815 volatile union e1000_adv_rx_desc *rxdp;
816 struct igb_rx_entry *sw_ring;
817 struct igb_rx_entry *rxe;
818 struct rte_mbuf *rxm;
819 struct rte_mbuf *nmb;
820 union e1000_adv_rx_desc rxd;
823 uint32_t hlen_type_rss;
833 rx_id = rxq->rx_tail;
834 rx_ring = rxq->rx_ring;
835 sw_ring = rxq->sw_ring;
836 while (nb_rx < nb_pkts) {
838 * The order of operations here is important as the DD status
839 * bit must not be read after any other descriptor fields.
840 * rx_ring and rxdp are pointing to volatile data so the order
841 * of accesses cannot be reordered by the compiler. If they were
842 * not volatile, they could be reordered which could lead to
843 * using invalid descriptor fields when read from rxd.
845 rxdp = &rx_ring[rx_id];
846 staterr = rxdp->wb.upper.status_error;
847 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
854 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
855 * likely to be invalid and to be dropped by the various
856 * validation checks performed by the network stack.
858 * Allocate a new mbuf to replenish the RX ring descriptor.
859 * If the allocation fails:
860 * - arrange for that RX descriptor to be the first one
861 * being parsed the next time the receive function is
862 * invoked [on the same queue].
864 * - Stop parsing the RX ring and return immediately.
866 * This policy do not drop the packet received in the RX
867 * descriptor for which the allocation of a new mbuf failed.
868 * Thus, it allows that packet to be later retrieved if
869 * mbuf have been freed in the mean time.
870 * As a side effect, holding RX descriptors instead of
871 * systematically giving them back to the NIC may lead to
872 * RX ring exhaustion situations.
873 * However, the NIC can gracefully prevent such situations
874 * to happen by sending specific "back-pressure" flow control
875 * frames to its peer(s).
877 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
878 "staterr=0x%x pkt_len=%u",
879 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
880 (unsigned) rx_id, (unsigned) staterr,
881 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
883 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
885 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
886 "queue_id=%u", (unsigned) rxq->port_id,
887 (unsigned) rxq->queue_id);
888 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
893 rxe = &sw_ring[rx_id];
895 if (rx_id == rxq->nb_rx_desc)
898 /* Prefetch next mbuf while processing current one. */
899 rte_igb_prefetch(sw_ring[rx_id].mbuf);
902 * When next RX descriptor is on a cache-line boundary,
903 * prefetch the next 4 RX descriptors and the next 8 pointers
906 if ((rx_id & 0x3) == 0) {
907 rte_igb_prefetch(&rx_ring[rx_id]);
908 rte_igb_prefetch(&sw_ring[rx_id]);
914 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
915 rxdp->read.hdr_addr = 0;
916 rxdp->read.pkt_addr = dma_addr;
919 * Initialize the returned mbuf.
920 * 1) setup generic mbuf fields:
921 * - number of segments,
924 * - RX port identifier.
925 * 2) integrate hardware offload data, if any:
927 * - IP checksum flag,
928 * - VLAN TCI, if any,
931 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
933 rxm->data_off = RTE_PKTMBUF_HEADROOM;
934 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
937 rxm->pkt_len = pkt_len;
938 rxm->data_len = pkt_len;
939 rxm->port = rxq->port_id;
941 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
942 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
943 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
944 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
946 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
947 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
948 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
949 rxm->ol_flags = pkt_flags;
950 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
951 lo_dword.hs_rss.pkt_info);
954 * Store the mbuf address into the next entry of the array
955 * of returned packets.
957 rx_pkts[nb_rx++] = rxm;
959 rxq->rx_tail = rx_id;
962 * If the number of free RX descriptors is greater than the RX free
963 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
965 * Update the RDT with the value of the last processed RX descriptor
966 * minus 1, to guarantee that the RDT register is never equal to the
967 * RDH register, which creates a "full" ring situtation from the
968 * hardware point of view...
970 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
971 if (nb_hold > rxq->rx_free_thresh) {
972 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
973 "nb_hold=%u nb_rx=%u",
974 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
975 (unsigned) rx_id, (unsigned) nb_hold,
977 rx_id = (uint16_t) ((rx_id == 0) ?
978 (rxq->nb_rx_desc - 1) : (rx_id - 1));
979 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
982 rxq->nb_rx_hold = nb_hold;
987 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
990 struct igb_rx_queue *rxq;
991 volatile union e1000_adv_rx_desc *rx_ring;
992 volatile union e1000_adv_rx_desc *rxdp;
993 struct igb_rx_entry *sw_ring;
994 struct igb_rx_entry *rxe;
995 struct rte_mbuf *first_seg;
996 struct rte_mbuf *last_seg;
997 struct rte_mbuf *rxm;
998 struct rte_mbuf *nmb;
999 union e1000_adv_rx_desc rxd;
1000 uint64_t dma; /* Physical address of mbuf data buffer */
1002 uint32_t hlen_type_rss;
1012 rx_id = rxq->rx_tail;
1013 rx_ring = rxq->rx_ring;
1014 sw_ring = rxq->sw_ring;
1017 * Retrieve RX context of current packet, if any.
1019 first_seg = rxq->pkt_first_seg;
1020 last_seg = rxq->pkt_last_seg;
1022 while (nb_rx < nb_pkts) {
1025 * The order of operations here is important as the DD status
1026 * bit must not be read after any other descriptor fields.
1027 * rx_ring and rxdp are pointing to volatile data so the order
1028 * of accesses cannot be reordered by the compiler. If they were
1029 * not volatile, they could be reordered which could lead to
1030 * using invalid descriptor fields when read from rxd.
1032 rxdp = &rx_ring[rx_id];
1033 staterr = rxdp->wb.upper.status_error;
1034 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1041 * Allocate a new mbuf to replenish the RX ring descriptor.
1042 * If the allocation fails:
1043 * - arrange for that RX descriptor to be the first one
1044 * being parsed the next time the receive function is
1045 * invoked [on the same queue].
1047 * - Stop parsing the RX ring and return immediately.
1049 * This policy does not drop the packet received in the RX
1050 * descriptor for which the allocation of a new mbuf failed.
1051 * Thus, it allows that packet to be later retrieved if
1052 * mbuf have been freed in the mean time.
1053 * As a side effect, holding RX descriptors instead of
1054 * systematically giving them back to the NIC may lead to
1055 * RX ring exhaustion situations.
1056 * However, the NIC can gracefully prevent such situations
1057 * to happen by sending specific "back-pressure" flow control
1058 * frames to its peer(s).
1060 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1061 "staterr=0x%x data_len=%u",
1062 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1063 (unsigned) rx_id, (unsigned) staterr,
1064 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1066 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1068 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1069 "queue_id=%u", (unsigned) rxq->port_id,
1070 (unsigned) rxq->queue_id);
1071 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1076 rxe = &sw_ring[rx_id];
1078 if (rx_id == rxq->nb_rx_desc)
1081 /* Prefetch next mbuf while processing current one. */
1082 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1085 * When next RX descriptor is on a cache-line boundary,
1086 * prefetch the next 4 RX descriptors and the next 8 pointers
1089 if ((rx_id & 0x3) == 0) {
1090 rte_igb_prefetch(&rx_ring[rx_id]);
1091 rte_igb_prefetch(&sw_ring[rx_id]);
1095 * Update RX descriptor with the physical address of the new
1096 * data buffer of the new allocated mbuf.
1100 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1101 rxdp->read.pkt_addr = dma;
1102 rxdp->read.hdr_addr = 0;
1105 * Set data length & data buffer address of mbuf.
1107 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1108 rxm->data_len = data_len;
1109 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1112 * If this is the first buffer of the received packet,
1113 * set the pointer to the first mbuf of the packet and
1114 * initialize its context.
1115 * Otherwise, update the total length and the number of segments
1116 * of the current scattered packet, and update the pointer to
1117 * the last mbuf of the current packet.
1119 if (first_seg == NULL) {
1121 first_seg->pkt_len = data_len;
1122 first_seg->nb_segs = 1;
1124 first_seg->pkt_len += data_len;
1125 first_seg->nb_segs++;
1126 last_seg->next = rxm;
1130 * If this is not the last buffer of the received packet,
1131 * update the pointer to the last mbuf of the current scattered
1132 * packet and continue to parse the RX ring.
1134 if (! (staterr & E1000_RXD_STAT_EOP)) {
1140 * This is the last buffer of the received packet.
1141 * If the CRC is not stripped by the hardware:
1142 * - Subtract the CRC length from the total packet length.
1143 * - If the last buffer only contains the whole CRC or a part
1144 * of it, free the mbuf associated to the last buffer.
1145 * If part of the CRC is also contained in the previous
1146 * mbuf, subtract the length of that CRC part from the
1147 * data length of the previous mbuf.
1150 if (unlikely(rxq->crc_len > 0)) {
1151 first_seg->pkt_len -= ETHER_CRC_LEN;
1152 if (data_len <= ETHER_CRC_LEN) {
1153 rte_pktmbuf_free_seg(rxm);
1154 first_seg->nb_segs--;
1155 last_seg->data_len = (uint16_t)
1156 (last_seg->data_len -
1157 (ETHER_CRC_LEN - data_len));
1158 last_seg->next = NULL;
1161 (uint16_t) (data_len - ETHER_CRC_LEN);
1165 * Initialize the first mbuf of the returned packet:
1166 * - RX port identifier,
1167 * - hardware offload data, if any:
1168 * - RSS flag & hash,
1169 * - IP checksum flag,
1170 * - VLAN TCI, if any,
1173 first_seg->port = rxq->port_id;
1174 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1177 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1178 * set in the pkt_flags field.
1180 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1181 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1182 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1183 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1184 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1185 first_seg->ol_flags = pkt_flags;
1186 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1187 lower.lo_dword.hs_rss.pkt_info);
1189 /* Prefetch data of first segment, if configured to do so. */
1190 rte_packet_prefetch((char *)first_seg->buf_addr +
1191 first_seg->data_off);
1194 * Store the mbuf address into the next entry of the array
1195 * of returned packets.
1197 rx_pkts[nb_rx++] = first_seg;
1200 * Setup receipt context for a new packet.
1206 * Record index of the next RX descriptor to probe.
1208 rxq->rx_tail = rx_id;
1211 * Save receive context.
1213 rxq->pkt_first_seg = first_seg;
1214 rxq->pkt_last_seg = last_seg;
1217 * If the number of free RX descriptors is greater than the RX free
1218 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1220 * Update the RDT with the value of the last processed RX descriptor
1221 * minus 1, to guarantee that the RDT register is never equal to the
1222 * RDH register, which creates a "full" ring situtation from the
1223 * hardware point of view...
1225 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1226 if (nb_hold > rxq->rx_free_thresh) {
1227 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1228 "nb_hold=%u nb_rx=%u",
1229 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1230 (unsigned) rx_id, (unsigned) nb_hold,
1232 rx_id = (uint16_t) ((rx_id == 0) ?
1233 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1234 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1237 rxq->nb_rx_hold = nb_hold;
1242 * Maximum number of Ring Descriptors.
1244 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1245 * desscriptors should meet the following condition:
1246 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1250 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1254 if (txq->sw_ring != NULL) {
1255 for (i = 0; i < txq->nb_tx_desc; i++) {
1256 if (txq->sw_ring[i].mbuf != NULL) {
1257 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1258 txq->sw_ring[i].mbuf = NULL;
1265 igb_tx_queue_release(struct igb_tx_queue *txq)
1268 igb_tx_queue_release_mbufs(txq);
1269 rte_free(txq->sw_ring);
1275 eth_igb_tx_queue_release(void *txq)
1277 igb_tx_queue_release(txq);
1281 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1286 memset((void*)&txq->ctx_cache, 0,
1287 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1291 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1293 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1294 struct igb_tx_entry *txe = txq->sw_ring;
1296 struct e1000_hw *hw;
1298 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1299 /* Zero out HW ring memory */
1300 for (i = 0; i < txq->nb_tx_desc; i++) {
1301 txq->tx_ring[i] = zeroed_desc;
1304 /* Initialize ring entries */
1305 prev = (uint16_t)(txq->nb_tx_desc - 1);
1306 for (i = 0; i < txq->nb_tx_desc; i++) {
1307 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1309 txd->wb.status = E1000_TXD_STAT_DD;
1312 txe[prev].next_id = i;
1316 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1317 /* 82575 specific, each tx queue will use 2 hw contexts */
1318 if (hw->mac.type == e1000_82575)
1319 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1321 igb_reset_tx_queue_stat(txq);
1325 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1328 unsigned int socket_id,
1329 const struct rte_eth_txconf *tx_conf)
1331 const struct rte_memzone *tz;
1332 struct igb_tx_queue *txq;
1333 struct e1000_hw *hw;
1336 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1339 * Validate number of transmit descriptors.
1340 * It must not exceed hardware maximum, and must be multiple
1343 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1344 (nb_desc > E1000_MAX_RING_DESC) ||
1345 (nb_desc < E1000_MIN_RING_DESC)) {
1350 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1353 if (tx_conf->tx_free_thresh != 0)
1354 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1355 "used for the 1G driver.");
1356 if (tx_conf->tx_rs_thresh != 0)
1357 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1358 "used for the 1G driver.");
1359 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1360 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1361 "consider setting the TX WTHRESH value to 4, 8, "
1364 /* Free memory prior to re-allocation if needed */
1365 if (dev->data->tx_queues[queue_idx] != NULL) {
1366 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1367 dev->data->tx_queues[queue_idx] = NULL;
1370 /* First allocate the tx queue data structure */
1371 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1372 RTE_CACHE_LINE_SIZE);
1377 * Allocate TX ring hardware descriptors. A memzone large enough to
1378 * handle the maximum ring size is allocated in order to allow for
1379 * resizing in later calls to the queue setup function.
1381 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1382 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1383 E1000_ALIGN, socket_id);
1385 igb_tx_queue_release(txq);
1389 txq->nb_tx_desc = nb_desc;
1390 txq->pthresh = tx_conf->tx_thresh.pthresh;
1391 txq->hthresh = tx_conf->tx_thresh.hthresh;
1392 txq->wthresh = tx_conf->tx_thresh.wthresh;
1393 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1395 txq->queue_id = queue_idx;
1396 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1397 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1398 txq->port_id = dev->data->port_id;
1400 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1401 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1403 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1404 /* Allocate software ring */
1405 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1406 sizeof(struct igb_tx_entry) * nb_desc,
1407 RTE_CACHE_LINE_SIZE);
1408 if (txq->sw_ring == NULL) {
1409 igb_tx_queue_release(txq);
1412 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1413 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1415 igb_reset_tx_queue(txq, dev);
1416 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1417 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1418 dev->data->tx_queues[queue_idx] = txq;
1424 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1428 if (rxq->sw_ring != NULL) {
1429 for (i = 0; i < rxq->nb_rx_desc; i++) {
1430 if (rxq->sw_ring[i].mbuf != NULL) {
1431 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1432 rxq->sw_ring[i].mbuf = NULL;
1439 igb_rx_queue_release(struct igb_rx_queue *rxq)
1442 igb_rx_queue_release_mbufs(rxq);
1443 rte_free(rxq->sw_ring);
1449 eth_igb_rx_queue_release(void *rxq)
1451 igb_rx_queue_release(rxq);
1455 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1457 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1460 /* Zero out HW ring memory */
1461 for (i = 0; i < rxq->nb_rx_desc; i++) {
1462 rxq->rx_ring[i] = zeroed_desc;
1466 rxq->pkt_first_seg = NULL;
1467 rxq->pkt_last_seg = NULL;
1471 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1474 unsigned int socket_id,
1475 const struct rte_eth_rxconf *rx_conf,
1476 struct rte_mempool *mp)
1478 const struct rte_memzone *rz;
1479 struct igb_rx_queue *rxq;
1480 struct e1000_hw *hw;
1483 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1486 * Validate number of receive descriptors.
1487 * It must not exceed hardware maximum, and must be multiple
1490 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1491 (nb_desc > E1000_MAX_RING_DESC) ||
1492 (nb_desc < E1000_MIN_RING_DESC)) {
1496 /* Free memory prior to re-allocation if needed */
1497 if (dev->data->rx_queues[queue_idx] != NULL) {
1498 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1499 dev->data->rx_queues[queue_idx] = NULL;
1502 /* First allocate the RX queue data structure. */
1503 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1504 RTE_CACHE_LINE_SIZE);
1508 rxq->nb_rx_desc = nb_desc;
1509 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1510 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1511 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1512 if (rxq->wthresh > 0 &&
1513 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1515 rxq->drop_en = rx_conf->rx_drop_en;
1516 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1517 rxq->queue_id = queue_idx;
1518 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1519 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1520 rxq->port_id = dev->data->port_id;
1521 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1525 * Allocate RX ring hardware descriptors. A memzone large enough to
1526 * handle the maximum ring size is allocated in order to allow for
1527 * resizing in later calls to the queue setup function.
1529 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1530 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1531 E1000_ALIGN, socket_id);
1533 igb_rx_queue_release(rxq);
1536 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1537 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1538 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1539 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1541 /* Allocate software ring. */
1542 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1543 sizeof(struct igb_rx_entry) * nb_desc,
1544 RTE_CACHE_LINE_SIZE);
1545 if (rxq->sw_ring == NULL) {
1546 igb_rx_queue_release(rxq);
1549 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1550 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1552 dev->data->rx_queues[queue_idx] = rxq;
1553 igb_reset_rx_queue(rxq);
1559 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1561 #define IGB_RXQ_SCAN_INTERVAL 4
1562 volatile union e1000_adv_rx_desc *rxdp;
1563 struct igb_rx_queue *rxq;
1566 if (rx_queue_id >= dev->data->nb_rx_queues) {
1567 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1571 rxq = dev->data->rx_queues[rx_queue_id];
1572 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1574 while ((desc < rxq->nb_rx_desc) &&
1575 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1576 desc += IGB_RXQ_SCAN_INTERVAL;
1577 rxdp += IGB_RXQ_SCAN_INTERVAL;
1578 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1579 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1580 desc - rxq->nb_rx_desc]);
1587 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1589 volatile union e1000_adv_rx_desc *rxdp;
1590 struct igb_rx_queue *rxq = rx_queue;
1593 if (unlikely(offset >= rxq->nb_rx_desc))
1595 desc = rxq->rx_tail + offset;
1596 if (desc >= rxq->nb_rx_desc)
1597 desc -= rxq->nb_rx_desc;
1599 rxdp = &rxq->rx_ring[desc];
1600 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1604 igb_dev_clear_queues(struct rte_eth_dev *dev)
1607 struct igb_tx_queue *txq;
1608 struct igb_rx_queue *rxq;
1610 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1611 txq = dev->data->tx_queues[i];
1613 igb_tx_queue_release_mbufs(txq);
1614 igb_reset_tx_queue(txq, dev);
1618 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1619 rxq = dev->data->rx_queues[i];
1621 igb_rx_queue_release_mbufs(rxq);
1622 igb_reset_rx_queue(rxq);
1628 igb_dev_free_queues(struct rte_eth_dev *dev)
1632 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1633 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1634 dev->data->rx_queues[i] = NULL;
1636 dev->data->nb_rx_queues = 0;
1638 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1639 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1640 dev->data->tx_queues[i] = NULL;
1642 dev->data->nb_tx_queues = 0;
1646 * Receive Side Scaling (RSS).
1647 * See section 7.1.1.7 in the following document:
1648 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1651 * The source and destination IP addresses of the IP header and the source and
1652 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1653 * against a configurable random key to compute a 32-bit RSS hash result.
1654 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1655 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1656 * RSS output index which is used as the RX queue index where to store the
1658 * The following output is supplied in the RX write-back descriptor:
1659 * - 32-bit result of the Microsoft RSS hash function,
1660 * - 4-bit RSS type field.
1664 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1665 * Used as the default key.
1667 static uint8_t rss_intel_key[40] = {
1668 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1669 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1670 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1671 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1672 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1676 igb_rss_disable(struct rte_eth_dev *dev)
1678 struct e1000_hw *hw;
1681 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1682 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1683 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1684 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1688 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1696 hash_key = rss_conf->rss_key;
1697 if (hash_key != NULL) {
1698 /* Fill in RSS hash key */
1699 for (i = 0; i < 10; i++) {
1700 rss_key = hash_key[(i * 4)];
1701 rss_key |= hash_key[(i * 4) + 1] << 8;
1702 rss_key |= hash_key[(i * 4) + 2] << 16;
1703 rss_key |= hash_key[(i * 4) + 3] << 24;
1704 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1708 /* Set configured hashing protocols in MRQC register */
1709 rss_hf = rss_conf->rss_hf;
1710 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1711 if (rss_hf & ETH_RSS_IPV4)
1712 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1713 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1714 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1715 if (rss_hf & ETH_RSS_IPV6)
1716 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1717 if (rss_hf & ETH_RSS_IPV6_EX)
1718 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1719 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1720 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1721 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1722 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1723 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1724 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1725 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1726 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1727 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1728 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1729 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1733 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1734 struct rte_eth_rss_conf *rss_conf)
1736 struct e1000_hw *hw;
1740 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 * Before changing anything, first check that the update RSS operation
1744 * does not attempt to disable RSS, if RSS was enabled at
1745 * initialization time, or does not attempt to enable RSS, if RSS was
1746 * disabled at initialization time.
1748 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1749 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1750 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1751 if (rss_hf != 0) /* Enable RSS */
1753 return 0; /* Nothing to do */
1756 if (rss_hf == 0) /* Disable RSS */
1758 igb_hw_rss_hash_set(hw, rss_conf);
1762 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1763 struct rte_eth_rss_conf *rss_conf)
1765 struct e1000_hw *hw;
1772 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773 hash_key = rss_conf->rss_key;
1774 if (hash_key != NULL) {
1775 /* Return RSS hash key */
1776 for (i = 0; i < 10; i++) {
1777 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1778 hash_key[(i * 4)] = rss_key & 0x000000FF;
1779 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1780 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1781 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1785 /* Get RSS functions configured in MRQC register */
1786 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1787 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1788 rss_conf->rss_hf = 0;
1792 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1793 rss_hf |= ETH_RSS_IPV4;
1794 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1795 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1796 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1797 rss_hf |= ETH_RSS_IPV6;
1798 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1799 rss_hf |= ETH_RSS_IPV6_EX;
1800 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1801 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1802 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1803 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1804 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1805 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1806 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1807 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1808 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1809 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1810 rss_conf->rss_hf = rss_hf;
1815 igb_rss_configure(struct rte_eth_dev *dev)
1817 struct rte_eth_rss_conf rss_conf;
1818 struct e1000_hw *hw;
1822 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1824 /* Fill in redirection table. */
1825 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1826 for (i = 0; i < 128; i++) {
1833 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1834 i % dev->data->nb_rx_queues : 0);
1835 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1837 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1841 * Configure the RSS key and the RSS protocols used to compute
1842 * the RSS hash of input packets.
1844 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1845 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1846 igb_rss_disable(dev);
1849 if (rss_conf.rss_key == NULL)
1850 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1851 igb_hw_rss_hash_set(hw, &rss_conf);
1855 * Check if the mac type support VMDq or not.
1856 * Return 1 if it supports, otherwise, return 0.
1859 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1861 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1863 switch (hw->mac.type) {
1884 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1890 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1892 struct rte_eth_vmdq_rx_conf *cfg;
1893 struct e1000_hw *hw;
1894 uint32_t mrqc, vt_ctl, vmolr, rctl;
1897 PMD_INIT_FUNC_TRACE();
1899 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1900 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1902 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1903 if (igb_is_vmdq_supported(dev) == 0)
1906 igb_rss_disable(dev);
1908 /* RCTL: eanble VLAN filter */
1909 rctl = E1000_READ_REG(hw, E1000_RCTL);
1910 rctl |= E1000_RCTL_VFE;
1911 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1913 /* MRQC: enable vmdq */
1914 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1915 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1916 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1918 /* VTCTL: pool selection according to VLAN tag */
1919 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1920 if (cfg->enable_default_pool)
1921 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1922 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1923 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1925 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1926 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1927 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1928 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1931 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1932 vmolr |= E1000_VMOLR_AUPE;
1933 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1934 vmolr |= E1000_VMOLR_ROMPE;
1935 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1936 vmolr |= E1000_VMOLR_ROPE;
1937 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1938 vmolr |= E1000_VMOLR_BAM;
1939 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1940 vmolr |= E1000_VMOLR_MPME;
1942 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1946 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1947 * Both 82576 and 82580 support it
1949 if (hw->mac.type != e1000_i350) {
1950 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1951 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1952 vmolr |= E1000_VMOLR_STRVLAN;
1953 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1957 /* VFTA - enable all vlan filters */
1958 for (i = 0; i < IGB_VFTA_SIZE; i++)
1959 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1961 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1962 if (hw->mac.type != e1000_82580)
1963 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1966 * RAH/RAL - allow pools to read specific mac addresses
1967 * In this case, all pools should be able to read from mac addr 0
1969 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1970 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1972 /* VLVF: set up filters for vlan tags as configured */
1973 for (i = 0; i < cfg->nb_pool_maps; i++) {
1974 /* set vlan id in VF register and set the valid bit */
1975 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1976 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1977 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1978 E1000_VLVF_POOLSEL_MASK)));
1981 E1000_WRITE_FLUSH(hw);
1987 /*********************************************************************
1989 * Enable receive unit.
1991 **********************************************************************/
1994 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1996 struct igb_rx_entry *rxe = rxq->sw_ring;
2000 /* Initialize software ring entries. */
2001 for (i = 0; i < rxq->nb_rx_desc; i++) {
2002 volatile union e1000_adv_rx_desc *rxd;
2003 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2006 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2007 "queue_id=%hu", rxq->queue_id);
2011 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
2012 rxd = &rxq->rx_ring[i];
2013 rxd->read.hdr_addr = 0;
2014 rxd->read.pkt_addr = dma_addr;
2021 #define E1000_MRQC_DEF_Q_SHIFT (3)
2023 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2025 struct e1000_hw *hw =
2026 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2029 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2031 * SRIOV active scheme
2032 * FIXME if support RSS together with VMDq & SRIOV
2034 mrqc = E1000_MRQC_ENABLE_VMDQ;
2035 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2036 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2037 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2038 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2040 * SRIOV inactive scheme
2042 switch (dev->data->dev_conf.rxmode.mq_mode) {
2044 igb_rss_configure(dev);
2046 case ETH_MQ_RX_VMDQ_ONLY:
2047 /*Configure general VMDQ only RX parameters*/
2048 igb_vmdq_rx_hw_configure(dev);
2050 case ETH_MQ_RX_NONE:
2051 /* if mq_mode is none, disable rss mode.*/
2053 igb_rss_disable(dev);
2062 eth_igb_rx_init(struct rte_eth_dev *dev)
2064 struct e1000_hw *hw;
2065 struct igb_rx_queue *rxq;
2070 uint16_t rctl_bsize;
2074 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078 * Make sure receives are disabled while setting
2079 * up the descriptor ring.
2081 rctl = E1000_READ_REG(hw, E1000_RCTL);
2082 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2085 * Configure support of jumbo frames, if any.
2087 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2088 rctl |= E1000_RCTL_LPE;
2091 * Set maximum packet length by default, and might be updated
2092 * together with enabling/disabling dual VLAN.
2094 E1000_WRITE_REG(hw, E1000_RLPML,
2095 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2098 rctl &= ~E1000_RCTL_LPE;
2100 /* Configure and enable each RX queue. */
2102 dev->rx_pkt_burst = eth_igb_recv_pkts;
2103 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2107 rxq = dev->data->rx_queues[i];
2109 /* Allocate buffers for descriptor rings and set up queue */
2110 ret = igb_alloc_rx_queue_mbufs(rxq);
2115 * Reset crc_len in case it was changed after queue setup by a
2119 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2122 bus_addr = rxq->rx_ring_phys_addr;
2123 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2125 sizeof(union e1000_adv_rx_desc));
2126 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2127 (uint32_t)(bus_addr >> 32));
2128 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2130 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2133 * Configure RX buffer size.
2135 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2136 RTE_PKTMBUF_HEADROOM);
2137 if (buf_size >= 1024) {
2139 * Configure the BSIZEPACKET field of the SRRCTL
2140 * register of the queue.
2141 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2142 * If this field is equal to 0b, then RCTL.BSIZE
2143 * determines the RX packet buffer size.
2145 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2146 E1000_SRRCTL_BSIZEPKT_MASK);
2147 buf_size = (uint16_t) ((srrctl &
2148 E1000_SRRCTL_BSIZEPKT_MASK) <<
2149 E1000_SRRCTL_BSIZEPKT_SHIFT);
2151 /* It adds dual VLAN length for supporting dual VLAN */
2152 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2153 2 * VLAN_TAG_SIZE) > buf_size){
2154 if (!dev->data->scattered_rx)
2156 "forcing scatter mode");
2157 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2158 dev->data->scattered_rx = 1;
2162 * Use BSIZE field of the device RCTL register.
2164 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2165 rctl_bsize = buf_size;
2166 if (!dev->data->scattered_rx)
2167 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2168 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2169 dev->data->scattered_rx = 1;
2172 /* Set if packets are dropped when no descriptors available */
2174 srrctl |= E1000_SRRCTL_DROP_EN;
2176 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2178 /* Enable this RX queue. */
2179 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2180 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2181 rxdctl &= 0xFFF00000;
2182 rxdctl |= (rxq->pthresh & 0x1F);
2183 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2184 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2185 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2188 if (dev->data->dev_conf.rxmode.enable_scatter) {
2189 if (!dev->data->scattered_rx)
2190 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2191 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2192 dev->data->scattered_rx = 1;
2196 * Setup BSIZE field of RCTL register, if needed.
2197 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2198 * register, since the code above configures the SRRCTL register of
2199 * the RX queue in such a case.
2200 * All configurable sizes are:
2201 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2202 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2203 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2204 * 2048: rctl |= E1000_RCTL_SZ_2048;
2205 * 1024: rctl |= E1000_RCTL_SZ_1024;
2206 * 512: rctl |= E1000_RCTL_SZ_512;
2207 * 256: rctl |= E1000_RCTL_SZ_256;
2209 if (rctl_bsize > 0) {
2210 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2211 rctl |= E1000_RCTL_SZ_512;
2212 else /* 256 <= buf_size < 512 - use 256 */
2213 rctl |= E1000_RCTL_SZ_256;
2217 * Configure RSS if device configured with multiple RX queues.
2219 igb_dev_mq_rx_configure(dev);
2221 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2222 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2225 * Setup the Checksum Register.
2226 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2228 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2229 rxcsum |= E1000_RXCSUM_PCSD;
2231 /* Enable both L3/L4 rx checksum offload */
2232 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2233 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2235 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2236 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2238 /* Setup the Receive Control Register. */
2239 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2240 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2242 /* set STRCRC bit in all queues */
2243 if (hw->mac.type == e1000_i350 ||
2244 hw->mac.type == e1000_i210 ||
2245 hw->mac.type == e1000_i211 ||
2246 hw->mac.type == e1000_i354) {
2247 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2248 rxq = dev->data->rx_queues[i];
2249 uint32_t dvmolr = E1000_READ_REG(hw,
2250 E1000_DVMOLR(rxq->reg_idx));
2251 dvmolr |= E1000_DVMOLR_STRCRC;
2252 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2256 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2258 /* clear STRCRC bit in all queues */
2259 if (hw->mac.type == e1000_i350 ||
2260 hw->mac.type == e1000_i210 ||
2261 hw->mac.type == e1000_i211 ||
2262 hw->mac.type == e1000_i354) {
2263 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2264 rxq = dev->data->rx_queues[i];
2265 uint32_t dvmolr = E1000_READ_REG(hw,
2266 E1000_DVMOLR(rxq->reg_idx));
2267 dvmolr &= ~E1000_DVMOLR_STRCRC;
2268 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2273 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2274 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2275 E1000_RCTL_RDMTS_HALF |
2276 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2278 /* Make sure VLAN Filters are off. */
2279 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2280 rctl &= ~E1000_RCTL_VFE;
2281 /* Don't store bad packets. */
2282 rctl &= ~E1000_RCTL_SBP;
2284 /* Enable Receives. */
2285 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2288 * Setup the HW Rx Head and Tail Descriptor Pointers.
2289 * This needs to be done after enable.
2291 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2292 rxq = dev->data->rx_queues[i];
2293 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2294 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2300 /*********************************************************************
2302 * Enable transmit unit.
2304 **********************************************************************/
2306 eth_igb_tx_init(struct rte_eth_dev *dev)
2308 struct e1000_hw *hw;
2309 struct igb_tx_queue *txq;
2314 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2316 /* Setup the Base and Length of the Tx Descriptor Rings. */
2317 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2319 txq = dev->data->tx_queues[i];
2320 bus_addr = txq->tx_ring_phys_addr;
2322 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2324 sizeof(union e1000_adv_tx_desc));
2325 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2326 (uint32_t)(bus_addr >> 32));
2327 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2329 /* Setup the HW Tx Head and Tail descriptor pointers. */
2330 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2331 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2333 /* Setup Transmit threshold registers. */
2334 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2335 txdctl |= txq->pthresh & 0x1F;
2336 txdctl |= ((txq->hthresh & 0x1F) << 8);
2337 txdctl |= ((txq->wthresh & 0x1F) << 16);
2338 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2339 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2342 /* Program the Transmit Control Register. */
2343 tctl = E1000_READ_REG(hw, E1000_TCTL);
2344 tctl &= ~E1000_TCTL_CT;
2345 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2346 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2348 e1000_config_collision_dist(hw);
2350 /* This write will effectively turn on the transmit unit. */
2351 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2354 /*********************************************************************
2356 * Enable VF receive unit.
2358 **********************************************************************/
2360 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2362 struct e1000_hw *hw;
2363 struct igb_rx_queue *rxq;
2366 uint16_t rctl_bsize;
2370 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2373 e1000_rlpml_set_vf(hw,
2374 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2377 /* Configure and enable each RX queue. */
2379 dev->rx_pkt_burst = eth_igb_recv_pkts;
2380 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2384 rxq = dev->data->rx_queues[i];
2386 /* Allocate buffers for descriptor rings and set up queue */
2387 ret = igb_alloc_rx_queue_mbufs(rxq);
2391 bus_addr = rxq->rx_ring_phys_addr;
2392 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2394 sizeof(union e1000_adv_rx_desc));
2395 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2396 (uint32_t)(bus_addr >> 32));
2397 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2399 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2402 * Configure RX buffer size.
2404 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2405 RTE_PKTMBUF_HEADROOM);
2406 if (buf_size >= 1024) {
2408 * Configure the BSIZEPACKET field of the SRRCTL
2409 * register of the queue.
2410 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2411 * If this field is equal to 0b, then RCTL.BSIZE
2412 * determines the RX packet buffer size.
2414 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2415 E1000_SRRCTL_BSIZEPKT_MASK);
2416 buf_size = (uint16_t) ((srrctl &
2417 E1000_SRRCTL_BSIZEPKT_MASK) <<
2418 E1000_SRRCTL_BSIZEPKT_SHIFT);
2420 /* It adds dual VLAN length for supporting dual VLAN */
2421 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2422 2 * VLAN_TAG_SIZE) > buf_size){
2423 if (!dev->data->scattered_rx)
2425 "forcing scatter mode");
2426 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2427 dev->data->scattered_rx = 1;
2431 * Use BSIZE field of the device RCTL register.
2433 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2434 rctl_bsize = buf_size;
2435 if (!dev->data->scattered_rx)
2436 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2437 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2438 dev->data->scattered_rx = 1;
2441 /* Set if packets are dropped when no descriptors available */
2443 srrctl |= E1000_SRRCTL_DROP_EN;
2445 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2447 /* Enable this RX queue. */
2448 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2449 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2450 rxdctl &= 0xFFF00000;
2451 rxdctl |= (rxq->pthresh & 0x1F);
2452 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2453 if (hw->mac.type == e1000_vfadapt) {
2455 * Workaround of 82576 VF Erratum
2456 * force set WTHRESH to 1
2457 * to avoid Write-Back not triggered sometimes
2460 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2463 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2464 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2467 if (dev->data->dev_conf.rxmode.enable_scatter) {
2468 if (!dev->data->scattered_rx)
2469 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2470 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2471 dev->data->scattered_rx = 1;
2475 * Setup the HW Rx Head and Tail Descriptor Pointers.
2476 * This needs to be done after enable.
2478 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2479 rxq = dev->data->rx_queues[i];
2480 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2481 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2487 /*********************************************************************
2489 * Enable VF transmit unit.
2491 **********************************************************************/
2493 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2495 struct e1000_hw *hw;
2496 struct igb_tx_queue *txq;
2500 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2502 /* Setup the Base and Length of the Tx Descriptor Rings. */
2503 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2506 txq = dev->data->tx_queues[i];
2507 bus_addr = txq->tx_ring_phys_addr;
2508 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2510 sizeof(union e1000_adv_tx_desc));
2511 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2512 (uint32_t)(bus_addr >> 32));
2513 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2515 /* Setup the HW Tx Head and Tail descriptor pointers. */
2516 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2517 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2519 /* Setup Transmit threshold registers. */
2520 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2521 txdctl |= txq->pthresh & 0x1F;
2522 txdctl |= ((txq->hthresh & 0x1F) << 8);
2523 if (hw->mac.type == e1000_82576) {
2525 * Workaround of 82576 VF Erratum
2526 * force set WTHRESH to 1
2527 * to avoid Write-Back not triggered sometimes
2530 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2533 txdctl |= ((txq->wthresh & 0x1F) << 16);
2534 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2535 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2541 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2542 struct rte_eth_rxq_info *qinfo)
2544 struct igb_rx_queue *rxq;
2546 rxq = dev->data->rx_queues[queue_id];
2548 qinfo->mp = rxq->mb_pool;
2549 qinfo->scattered_rx = dev->data->scattered_rx;
2550 qinfo->nb_desc = rxq->nb_rx_desc;
2552 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2553 qinfo->conf.rx_drop_en = rxq->drop_en;
2557 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2558 struct rte_eth_txq_info *qinfo)
2560 struct igb_tx_queue *txq;
2562 txq = dev->data->tx_queues[queue_id];
2564 qinfo->nb_desc = txq->nb_tx_desc;
2566 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2567 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2568 qinfo->conf.tx_thresh.wthresh = txq->wthresh;