4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
75 #ifdef RTE_LIBRTE_IEEE1588
76 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
78 #define IGB_TX_IEEE1588_TMST 0
80 /* Bit Mask to indicate what bits required for building TX context */
81 #define IGB_TX_OFFLOAD_MASK ( \
88 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
89 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
92 * Structure associated with each descriptor of the RX ring of a RX queue.
95 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
99 * Structure associated with each descriptor of the TX ring of a TX queue.
101 struct igb_tx_entry {
102 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
103 uint16_t next_id; /**< Index of next descriptor in ring. */
104 uint16_t last_id; /**< Index of last scattered descriptor. */
108 * Structure associated with each RX queue.
110 struct igb_rx_queue {
111 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
112 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
113 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
114 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
115 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
116 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
117 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
118 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
119 uint16_t nb_rx_desc; /**< number of RX descriptors. */
120 uint16_t rx_tail; /**< current value of RDT register. */
121 uint16_t nb_rx_hold; /**< number of held free RX desc. */
122 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
123 uint16_t queue_id; /**< RX queue index. */
124 uint16_t reg_idx; /**< RX queue register index. */
125 uint16_t port_id; /**< Device port identifier. */
126 uint8_t pthresh; /**< Prefetch threshold register. */
127 uint8_t hthresh; /**< Host threshold register. */
128 uint8_t wthresh; /**< Write-back threshold register. */
129 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
130 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
134 * Hardware context number
136 enum igb_advctx_num {
137 IGB_CTX_0 = 0, /**< CTX0 */
138 IGB_CTX_1 = 1, /**< CTX1 */
139 IGB_CTX_NUM = 2, /**< CTX_NUM */
142 /** Offload features */
143 union igb_tx_offload {
146 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
147 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
148 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
149 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
150 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
152 /* uint64_t unused:8; */
157 * Compare mask for igb_tx_offload.data,
158 * should be in sync with igb_tx_offload layout.
160 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
161 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
162 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
163 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
164 /** Mac + IP + TCP + Mss mask. */
165 #define TX_TSO_CMP_MASK \
166 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
169 * Strucutre to check if new context need be built
171 struct igb_advctx_info {
172 uint64_t flags; /**< ol_flags related to context build. */
173 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
174 union igb_tx_offload tx_offload;
175 /** compare mask for tx offload. */
176 union igb_tx_offload tx_offload_mask;
180 * Structure associated with each TX queue.
182 struct igb_tx_queue {
183 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
184 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
185 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
186 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
187 uint32_t txd_type; /**< Device-specific TXD type */
188 uint16_t nb_tx_desc; /**< number of TX descriptors. */
189 uint16_t tx_tail; /**< Current value of TDT register. */
191 /**< Index of first used TX descriptor. */
192 uint16_t queue_id; /**< TX queue index. */
193 uint16_t reg_idx; /**< TX queue register index. */
194 uint16_t port_id; /**< Device port identifier. */
195 uint8_t pthresh; /**< Prefetch threshold register. */
196 uint8_t hthresh; /**< Host threshold register. */
197 uint8_t wthresh; /**< Write-back threshold register. */
199 /**< Current used hardware descriptor. */
201 /**< Start context position for transmit queue. */
202 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
203 /**< Hardware context history.*/
207 #define RTE_PMD_USE_PREFETCH
210 #ifdef RTE_PMD_USE_PREFETCH
211 #define rte_igb_prefetch(p) rte_prefetch0(p)
213 #define rte_igb_prefetch(p) do {} while(0)
216 #ifdef RTE_PMD_PACKET_PREFETCH
217 #define rte_packet_prefetch(p) rte_prefetch1(p)
219 #define rte_packet_prefetch(p) do {} while(0)
223 * Macro for VMDq feature for 1 GbE NIC.
225 #define E1000_VMOLR_SIZE (8)
226 #define IGB_TSO_MAX_HDRLEN (512)
227 #define IGB_TSO_MAX_MSS (9216)
229 /*********************************************************************
233 **********************************************************************/
236 *There're some limitations in hardware for TCP segmentation offload. We
237 *should check whether the parameters are valid.
239 static inline uint64_t
240 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
242 if (!(ol_req & PKT_TX_TCP_SEG))
244 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
245 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
246 ol_req &= ~PKT_TX_TCP_SEG;
247 ol_req |= PKT_TX_TCP_CKSUM;
253 * Advanced context descriptor are almost same between igb/ixgbe
254 * This is a separate function, looking for optimization opportunity here
255 * Rework required to go with the pre-defined values.
259 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
260 volatile struct e1000_adv_tx_context_desc *ctx_txd,
261 uint64_t ol_flags, union igb_tx_offload tx_offload)
263 uint32_t type_tucmd_mlhl;
264 uint32_t mss_l4len_idx;
265 uint32_t ctx_idx, ctx_curr;
266 uint32_t vlan_macip_lens;
267 union igb_tx_offload tx_offload_mask;
269 ctx_curr = txq->ctx_curr;
270 ctx_idx = ctx_curr + txq->ctx_start;
272 tx_offload_mask.data = 0;
275 /* Specify which HW CTX to upload. */
276 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
278 if (ol_flags & PKT_TX_VLAN_PKT)
279 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
281 /* check if TCP segmentation required for this packet */
282 if (ol_flags & PKT_TX_TCP_SEG) {
283 /* implies IP cksum in IPv4 */
284 if (ol_flags & PKT_TX_IP_CKSUM)
285 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
286 E1000_ADVTXD_TUCMD_L4T_TCP |
287 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
289 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
290 E1000_ADVTXD_TUCMD_L4T_TCP |
291 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293 tx_offload_mask.data |= TX_TSO_CMP_MASK;
294 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
295 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
296 } else { /* no TSO, check if hardware checksum is needed */
297 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
298 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
300 if (ol_flags & PKT_TX_IP_CKSUM)
301 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
303 switch (ol_flags & PKT_TX_L4_MASK) {
304 case PKT_TX_UDP_CKSUM:
305 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
306 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
309 case PKT_TX_TCP_CKSUM:
310 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
311 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
312 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
314 case PKT_TX_SCTP_CKSUM:
315 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
316 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
317 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
320 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
321 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
326 txq->ctx_cache[ctx_curr].flags = ol_flags;
327 txq->ctx_cache[ctx_curr].tx_offload.data =
328 tx_offload_mask.data & tx_offload.data;
329 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
331 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
332 vlan_macip_lens = (uint32_t)tx_offload.data;
333 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
334 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
335 ctx_txd->seqnum_seed = 0;
339 * Check which hardware context can be used. Use the existing match
340 * or create a new context descriptor.
342 static inline uint32_t
343 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
344 union igb_tx_offload tx_offload)
346 /* If match with the current context */
347 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
348 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
349 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
350 return txq->ctx_curr;
353 /* If match with the second context */
355 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
356 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
357 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
358 return txq->ctx_curr;
361 /* Mismatch, use the previous context */
365 static inline uint32_t
366 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
368 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
369 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
372 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
373 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
374 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
378 static inline uint32_t
379 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
382 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
383 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
384 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
385 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
390 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
393 struct igb_tx_queue *txq;
394 struct igb_tx_entry *sw_ring;
395 struct igb_tx_entry *txe, *txn;
396 volatile union e1000_adv_tx_desc *txr;
397 volatile union e1000_adv_tx_desc *txd;
398 struct rte_mbuf *tx_pkt;
399 struct rte_mbuf *m_seg;
400 uint64_t buf_dma_addr;
401 uint32_t olinfo_status;
402 uint32_t cmd_type_len;
411 uint32_t new_ctx = 0;
413 union igb_tx_offload tx_offload = {0};
416 sw_ring = txq->sw_ring;
418 tx_id = txq->tx_tail;
419 txe = &sw_ring[tx_id];
421 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
423 pkt_len = tx_pkt->pkt_len;
425 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
428 * The number of descriptors that must be allocated for a
429 * packet is the number of segments of that packet, plus 1
430 * Context Descriptor for the VLAN Tag Identifier, if any.
431 * Determine the last TX descriptor to allocate in the TX ring
432 * for the packet, starting from the current position (tx_id)
435 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
437 ol_flags = tx_pkt->ol_flags;
438 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
440 /* If a Context Descriptor need be built . */
442 tx_offload.l2_len = tx_pkt->l2_len;
443 tx_offload.l3_len = tx_pkt->l3_len;
444 tx_offload.l4_len = tx_pkt->l4_len;
445 tx_offload.vlan_tci = tx_pkt->vlan_tci;
446 tx_offload.tso_segsz = tx_pkt->tso_segsz;
447 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
449 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
450 /* Only allocate context descriptor if required*/
451 new_ctx = (ctx == IGB_CTX_NUM);
452 ctx = txq->ctx_curr + txq->ctx_start;
453 tx_last = (uint16_t) (tx_last + new_ctx);
455 if (tx_last >= txq->nb_tx_desc)
456 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
458 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
459 " tx_first=%u tx_last=%u",
460 (unsigned) txq->port_id,
461 (unsigned) txq->queue_id,
467 * Check if there are enough free descriptors in the TX ring
468 * to transmit the next packet.
469 * This operation is based on the two following rules:
471 * 1- Only check that the last needed TX descriptor can be
472 * allocated (by construction, if that descriptor is free,
473 * all intermediate ones are also free).
475 * For this purpose, the index of the last TX descriptor
476 * used for a packet (the "last descriptor" of a packet)
477 * is recorded in the TX entries (the last one included)
478 * that are associated with all TX descriptors allocated
481 * 2- Avoid to allocate the last free TX descriptor of the
482 * ring, in order to never set the TDT register with the
483 * same value stored in parallel by the NIC in the TDH
484 * register, which makes the TX engine of the NIC enter
485 * in a deadlock situation.
487 * By extension, avoid to allocate a free descriptor that
488 * belongs to the last set of free descriptors allocated
489 * to the same packet previously transmitted.
493 * The "last descriptor" of the previously sent packet, if any,
494 * which used the last descriptor to allocate.
496 tx_end = sw_ring[tx_last].last_id;
499 * The next descriptor following that "last descriptor" in the
502 tx_end = sw_ring[tx_end].next_id;
505 * The "last descriptor" associated with that next descriptor.
507 tx_end = sw_ring[tx_end].last_id;
510 * Check that this descriptor is free.
512 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
519 * Set common flags of all TX Data Descriptors.
521 * The following bits must be set in all Data Descriptors:
522 * - E1000_ADVTXD_DTYP_DATA
523 * - E1000_ADVTXD_DCMD_DEXT
525 * The following bits must be set in the first Data Descriptor
526 * and are ignored in the other ones:
527 * - E1000_ADVTXD_DCMD_IFCS
528 * - E1000_ADVTXD_MAC_1588
529 * - E1000_ADVTXD_DCMD_VLE
531 * The following bits must only be set in the last Data
533 * - E1000_TXD_CMD_EOP
535 * The following bits can be set in any Data Descriptor, but
536 * are only set in the last Data Descriptor:
539 cmd_type_len = txq->txd_type |
540 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
541 if (tx_ol_req & PKT_TX_TCP_SEG)
542 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
543 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
544 #if defined(RTE_LIBRTE_IEEE1588)
545 if (ol_flags & PKT_TX_IEEE1588_TMST)
546 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
549 /* Setup TX Advanced context descriptor if required */
551 volatile struct e1000_adv_tx_context_desc *
554 ctx_txd = (volatile struct
555 e1000_adv_tx_context_desc *)
558 txn = &sw_ring[txe->next_id];
559 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
561 if (txe->mbuf != NULL) {
562 rte_pktmbuf_free_seg(txe->mbuf);
566 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
568 txe->last_id = tx_last;
569 tx_id = txe->next_id;
573 /* Setup the TX Advanced Data Descriptor */
574 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
575 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
576 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
581 txn = &sw_ring[txe->next_id];
584 if (txe->mbuf != NULL)
585 rte_pktmbuf_free_seg(txe->mbuf);
589 * Set up transmit descriptor.
591 slen = (uint16_t) m_seg->data_len;
592 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
593 txd->read.buffer_addr =
594 rte_cpu_to_le_64(buf_dma_addr);
595 txd->read.cmd_type_len =
596 rte_cpu_to_le_32(cmd_type_len | slen);
597 txd->read.olinfo_status =
598 rte_cpu_to_le_32(olinfo_status);
599 txe->last_id = tx_last;
600 tx_id = txe->next_id;
603 } while (m_seg != NULL);
606 * The last packet data descriptor needs End Of Packet (EOP)
607 * and Report Status (RS).
609 txd->read.cmd_type_len |=
610 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
616 * Set the Transmit Descriptor Tail (TDT).
618 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
619 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
620 (unsigned) txq->port_id, (unsigned) txq->queue_id,
621 (unsigned) tx_id, (unsigned) nb_tx);
622 txq->tx_tail = tx_id;
627 /*********************************************************************
631 **********************************************************************/
633 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
639 for (i = 0; i < nb_pkts; i++) {
642 /* Check some limitations for TSO in hardware */
643 if (m->ol_flags & PKT_TX_TCP_SEG)
644 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
645 (m->l2_len + m->l3_len + m->l4_len >
646 IGB_TSO_MAX_HDRLEN)) {
651 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
652 rte_errno = -ENOTSUP;
656 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
657 ret = rte_validate_tx_offload(m);
663 ret = rte_net_intel_cksum_prepare(m);
673 /*********************************************************************
677 **********************************************************************/
678 #define IGB_PACKET_TYPE_IPV4 0X01
679 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
680 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
681 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
682 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
683 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
684 #define IGB_PACKET_TYPE_IPV6 0X04
685 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
686 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
687 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
688 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
689 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
690 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
691 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
692 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
693 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
694 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
695 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
696 #define IGB_PACKET_TYPE_MAX 0X80
697 #define IGB_PACKET_TYPE_MASK 0X7F
698 #define IGB_PACKET_TYPE_SHIFT 0X04
699 static inline uint32_t
700 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
702 static const uint32_t
703 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
704 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
706 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
707 RTE_PTYPE_L3_IPV4_EXT,
708 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
710 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
711 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
712 RTE_PTYPE_INNER_L3_IPV6,
713 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
714 RTE_PTYPE_L3_IPV6_EXT,
715 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
716 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717 RTE_PTYPE_INNER_L3_IPV6_EXT,
718 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
719 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
720 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
721 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
722 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
723 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
724 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
725 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
726 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
727 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
728 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
729 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
730 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
731 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
732 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
733 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
734 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
735 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
736 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
737 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
738 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
739 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
740 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
741 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
742 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
743 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
744 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
745 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
747 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
748 return RTE_PTYPE_UNKNOWN;
750 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
752 return ptype_table[pkt_info];
755 static inline uint64_t
756 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
758 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
760 #if defined(RTE_LIBRTE_IEEE1588)
761 static uint32_t ip_pkt_etqf_map[8] = {
762 0, 0, 0, PKT_RX_IEEE1588_PTP,
766 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
767 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
769 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
770 if (hw->mac.type == e1000_i210)
771 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
773 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
781 static inline uint64_t
782 rx_desc_status_to_pkt_flags(uint32_t rx_status)
786 /* Check if VLAN present */
787 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
788 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
790 #if defined(RTE_LIBRTE_IEEE1588)
791 if (rx_status & E1000_RXD_STAT_TMST)
792 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
797 static inline uint64_t
798 rx_desc_error_to_pkt_flags(uint32_t rx_status)
801 * Bit 30: IPE, IPv4 checksum error
802 * Bit 29: L4I, L4I integrity error
805 static uint64_t error_to_pkt_flags_map[4] = {
806 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
807 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
808 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
809 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
811 return error_to_pkt_flags_map[(rx_status >>
812 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
816 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
819 struct igb_rx_queue *rxq;
820 volatile union e1000_adv_rx_desc *rx_ring;
821 volatile union e1000_adv_rx_desc *rxdp;
822 struct igb_rx_entry *sw_ring;
823 struct igb_rx_entry *rxe;
824 struct rte_mbuf *rxm;
825 struct rte_mbuf *nmb;
826 union e1000_adv_rx_desc rxd;
829 uint32_t hlen_type_rss;
839 rx_id = rxq->rx_tail;
840 rx_ring = rxq->rx_ring;
841 sw_ring = rxq->sw_ring;
842 while (nb_rx < nb_pkts) {
844 * The order of operations here is important as the DD status
845 * bit must not be read after any other descriptor fields.
846 * rx_ring and rxdp are pointing to volatile data so the order
847 * of accesses cannot be reordered by the compiler. If they were
848 * not volatile, they could be reordered which could lead to
849 * using invalid descriptor fields when read from rxd.
851 rxdp = &rx_ring[rx_id];
852 staterr = rxdp->wb.upper.status_error;
853 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
860 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
861 * likely to be invalid and to be dropped by the various
862 * validation checks performed by the network stack.
864 * Allocate a new mbuf to replenish the RX ring descriptor.
865 * If the allocation fails:
866 * - arrange for that RX descriptor to be the first one
867 * being parsed the next time the receive function is
868 * invoked [on the same queue].
870 * - Stop parsing the RX ring and return immediately.
872 * This policy do not drop the packet received in the RX
873 * descriptor for which the allocation of a new mbuf failed.
874 * Thus, it allows that packet to be later retrieved if
875 * mbuf have been freed in the mean time.
876 * As a side effect, holding RX descriptors instead of
877 * systematically giving them back to the NIC may lead to
878 * RX ring exhaustion situations.
879 * However, the NIC can gracefully prevent such situations
880 * to happen by sending specific "back-pressure" flow control
881 * frames to its peer(s).
883 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
884 "staterr=0x%x pkt_len=%u",
885 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
886 (unsigned) rx_id, (unsigned) staterr,
887 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
889 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
891 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
892 "queue_id=%u", (unsigned) rxq->port_id,
893 (unsigned) rxq->queue_id);
894 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
899 rxe = &sw_ring[rx_id];
901 if (rx_id == rxq->nb_rx_desc)
904 /* Prefetch next mbuf while processing current one. */
905 rte_igb_prefetch(sw_ring[rx_id].mbuf);
908 * When next RX descriptor is on a cache-line boundary,
909 * prefetch the next 4 RX descriptors and the next 8 pointers
912 if ((rx_id & 0x3) == 0) {
913 rte_igb_prefetch(&rx_ring[rx_id]);
914 rte_igb_prefetch(&sw_ring[rx_id]);
920 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
921 rxdp->read.hdr_addr = 0;
922 rxdp->read.pkt_addr = dma_addr;
925 * Initialize the returned mbuf.
926 * 1) setup generic mbuf fields:
927 * - number of segments,
930 * - RX port identifier.
931 * 2) integrate hardware offload data, if any:
933 * - IP checksum flag,
934 * - VLAN TCI, if any,
937 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
939 rxm->data_off = RTE_PKTMBUF_HEADROOM;
940 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
943 rxm->pkt_len = pkt_len;
944 rxm->data_len = pkt_len;
945 rxm->port = rxq->port_id;
947 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
948 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
949 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
950 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
952 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
953 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
954 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
955 rxm->ol_flags = pkt_flags;
956 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
957 lo_dword.hs_rss.pkt_info);
960 * Store the mbuf address into the next entry of the array
961 * of returned packets.
963 rx_pkts[nb_rx++] = rxm;
965 rxq->rx_tail = rx_id;
968 * If the number of free RX descriptors is greater than the RX free
969 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
971 * Update the RDT with the value of the last processed RX descriptor
972 * minus 1, to guarantee that the RDT register is never equal to the
973 * RDH register, which creates a "full" ring situtation from the
974 * hardware point of view...
976 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
977 if (nb_hold > rxq->rx_free_thresh) {
978 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
979 "nb_hold=%u nb_rx=%u",
980 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
981 (unsigned) rx_id, (unsigned) nb_hold,
983 rx_id = (uint16_t) ((rx_id == 0) ?
984 (rxq->nb_rx_desc - 1) : (rx_id - 1));
985 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
988 rxq->nb_rx_hold = nb_hold;
993 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
996 struct igb_rx_queue *rxq;
997 volatile union e1000_adv_rx_desc *rx_ring;
998 volatile union e1000_adv_rx_desc *rxdp;
999 struct igb_rx_entry *sw_ring;
1000 struct igb_rx_entry *rxe;
1001 struct rte_mbuf *first_seg;
1002 struct rte_mbuf *last_seg;
1003 struct rte_mbuf *rxm;
1004 struct rte_mbuf *nmb;
1005 union e1000_adv_rx_desc rxd;
1006 uint64_t dma; /* Physical address of mbuf data buffer */
1008 uint32_t hlen_type_rss;
1018 rx_id = rxq->rx_tail;
1019 rx_ring = rxq->rx_ring;
1020 sw_ring = rxq->sw_ring;
1023 * Retrieve RX context of current packet, if any.
1025 first_seg = rxq->pkt_first_seg;
1026 last_seg = rxq->pkt_last_seg;
1028 while (nb_rx < nb_pkts) {
1031 * The order of operations here is important as the DD status
1032 * bit must not be read after any other descriptor fields.
1033 * rx_ring and rxdp are pointing to volatile data so the order
1034 * of accesses cannot be reordered by the compiler. If they were
1035 * not volatile, they could be reordered which could lead to
1036 * using invalid descriptor fields when read from rxd.
1038 rxdp = &rx_ring[rx_id];
1039 staterr = rxdp->wb.upper.status_error;
1040 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1047 * Allocate a new mbuf to replenish the RX ring descriptor.
1048 * If the allocation fails:
1049 * - arrange for that RX descriptor to be the first one
1050 * being parsed the next time the receive function is
1051 * invoked [on the same queue].
1053 * - Stop parsing the RX ring and return immediately.
1055 * This policy does not drop the packet received in the RX
1056 * descriptor for which the allocation of a new mbuf failed.
1057 * Thus, it allows that packet to be later retrieved if
1058 * mbuf have been freed in the mean time.
1059 * As a side effect, holding RX descriptors instead of
1060 * systematically giving them back to the NIC may lead to
1061 * RX ring exhaustion situations.
1062 * However, the NIC can gracefully prevent such situations
1063 * to happen by sending specific "back-pressure" flow control
1064 * frames to its peer(s).
1066 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1067 "staterr=0x%x data_len=%u",
1068 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1069 (unsigned) rx_id, (unsigned) staterr,
1070 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1072 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1074 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1075 "queue_id=%u", (unsigned) rxq->port_id,
1076 (unsigned) rxq->queue_id);
1077 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1082 rxe = &sw_ring[rx_id];
1084 if (rx_id == rxq->nb_rx_desc)
1087 /* Prefetch next mbuf while processing current one. */
1088 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1091 * When next RX descriptor is on a cache-line boundary,
1092 * prefetch the next 4 RX descriptors and the next 8 pointers
1095 if ((rx_id & 0x3) == 0) {
1096 rte_igb_prefetch(&rx_ring[rx_id]);
1097 rte_igb_prefetch(&sw_ring[rx_id]);
1101 * Update RX descriptor with the physical address of the new
1102 * data buffer of the new allocated mbuf.
1106 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1107 rxdp->read.pkt_addr = dma;
1108 rxdp->read.hdr_addr = 0;
1111 * Set data length & data buffer address of mbuf.
1113 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1114 rxm->data_len = data_len;
1115 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1118 * If this is the first buffer of the received packet,
1119 * set the pointer to the first mbuf of the packet and
1120 * initialize its context.
1121 * Otherwise, update the total length and the number of segments
1122 * of the current scattered packet, and update the pointer to
1123 * the last mbuf of the current packet.
1125 if (first_seg == NULL) {
1127 first_seg->pkt_len = data_len;
1128 first_seg->nb_segs = 1;
1130 first_seg->pkt_len += data_len;
1131 first_seg->nb_segs++;
1132 last_seg->next = rxm;
1136 * If this is not the last buffer of the received packet,
1137 * update the pointer to the last mbuf of the current scattered
1138 * packet and continue to parse the RX ring.
1140 if (! (staterr & E1000_RXD_STAT_EOP)) {
1146 * This is the last buffer of the received packet.
1147 * If the CRC is not stripped by the hardware:
1148 * - Subtract the CRC length from the total packet length.
1149 * - If the last buffer only contains the whole CRC or a part
1150 * of it, free the mbuf associated to the last buffer.
1151 * If part of the CRC is also contained in the previous
1152 * mbuf, subtract the length of that CRC part from the
1153 * data length of the previous mbuf.
1156 if (unlikely(rxq->crc_len > 0)) {
1157 first_seg->pkt_len -= ETHER_CRC_LEN;
1158 if (data_len <= ETHER_CRC_LEN) {
1159 rte_pktmbuf_free_seg(rxm);
1160 first_seg->nb_segs--;
1161 last_seg->data_len = (uint16_t)
1162 (last_seg->data_len -
1163 (ETHER_CRC_LEN - data_len));
1164 last_seg->next = NULL;
1167 (uint16_t) (data_len - ETHER_CRC_LEN);
1171 * Initialize the first mbuf of the returned packet:
1172 * - RX port identifier,
1173 * - hardware offload data, if any:
1174 * - RSS flag & hash,
1175 * - IP checksum flag,
1176 * - VLAN TCI, if any,
1179 first_seg->port = rxq->port_id;
1180 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1183 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1184 * set in the pkt_flags field.
1186 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1187 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1188 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1189 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1190 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1191 first_seg->ol_flags = pkt_flags;
1192 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1193 lower.lo_dword.hs_rss.pkt_info);
1195 /* Prefetch data of first segment, if configured to do so. */
1196 rte_packet_prefetch((char *)first_seg->buf_addr +
1197 first_seg->data_off);
1200 * Store the mbuf address into the next entry of the array
1201 * of returned packets.
1203 rx_pkts[nb_rx++] = first_seg;
1206 * Setup receipt context for a new packet.
1212 * Record index of the next RX descriptor to probe.
1214 rxq->rx_tail = rx_id;
1217 * Save receive context.
1219 rxq->pkt_first_seg = first_seg;
1220 rxq->pkt_last_seg = last_seg;
1223 * If the number of free RX descriptors is greater than the RX free
1224 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1226 * Update the RDT with the value of the last processed RX descriptor
1227 * minus 1, to guarantee that the RDT register is never equal to the
1228 * RDH register, which creates a "full" ring situtation from the
1229 * hardware point of view...
1231 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1232 if (nb_hold > rxq->rx_free_thresh) {
1233 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1234 "nb_hold=%u nb_rx=%u",
1235 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1236 (unsigned) rx_id, (unsigned) nb_hold,
1238 rx_id = (uint16_t) ((rx_id == 0) ?
1239 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1240 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1243 rxq->nb_rx_hold = nb_hold;
1248 * Maximum number of Ring Descriptors.
1250 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1251 * desscriptors should meet the following condition:
1252 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1256 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1260 if (txq->sw_ring != NULL) {
1261 for (i = 0; i < txq->nb_tx_desc; i++) {
1262 if (txq->sw_ring[i].mbuf != NULL) {
1263 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1264 txq->sw_ring[i].mbuf = NULL;
1271 igb_tx_queue_release(struct igb_tx_queue *txq)
1274 igb_tx_queue_release_mbufs(txq);
1275 rte_free(txq->sw_ring);
1281 eth_igb_tx_queue_release(void *txq)
1283 igb_tx_queue_release(txq);
1287 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1289 struct igb_tx_entry *sw_ring;
1290 volatile union e1000_adv_tx_desc *txr;
1291 uint16_t tx_first; /* First segment analyzed. */
1292 uint16_t tx_id; /* Current segment being processed. */
1293 uint16_t tx_last; /* Last segment in the current packet. */
1294 uint16_t tx_next; /* First segment of the next packet. */
1299 sw_ring = txq->sw_ring;
1303 * tx_tail is the last sent packet on the sw_ring. Goto the end
1304 * of that packet (the last segment in the packet chain) and
1305 * then the next segment will be the start of the oldest segment
1306 * in the sw_ring. This is the first packet that will be
1307 * attempted to be freed.
1310 /* Get last segment in most recently added packet. */
1311 tx_first = sw_ring[txq->tx_tail].last_id;
1313 /* Get the next segment, which is the oldest segment in ring. */
1314 tx_first = sw_ring[tx_first].next_id;
1316 /* Set the current index to the first. */
1320 * Loop through each packet. For each packet, verify that an
1321 * mbuf exists and that the last segment is free. If so, free
1325 tx_last = sw_ring[tx_id].last_id;
1327 if (sw_ring[tx_last].mbuf) {
1328 if (txr[tx_last].wb.status &
1329 E1000_TXD_STAT_DD) {
1331 * Increment the number of packets
1336 /* Get the start of the next packet. */
1337 tx_next = sw_ring[tx_last].next_id;
1340 * Loop through all segments in a
1344 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1345 sw_ring[tx_id].mbuf = NULL;
1346 sw_ring[tx_id].last_id = tx_id;
1348 /* Move to next segemnt. */
1349 tx_id = sw_ring[tx_id].next_id;
1351 } while (tx_id != tx_next);
1353 if (unlikely(count == (int)free_cnt))
1357 * mbuf still in use, nothing left to
1363 * There are multiple reasons to be here:
1364 * 1) All the packets on the ring have been
1365 * freed - tx_id is equal to tx_first
1366 * and some packets have been freed.
1368 * 2) Interfaces has not sent a rings worth of
1369 * packets yet, so the segment after tail is
1370 * still empty. Or a previous call to this
1371 * function freed some of the segments but
1372 * not all so there is a hole in the list.
1373 * Hopefully this is a rare case.
1374 * - Walk the list and find the next mbuf. If
1375 * there isn't one, then done.
1377 if (likely((tx_id == tx_first) && (count != 0)))
1381 * Walk the list and find the next mbuf, if any.
1384 /* Move to next segemnt. */
1385 tx_id = sw_ring[tx_id].next_id;
1387 if (sw_ring[tx_id].mbuf)
1390 } while (tx_id != tx_first);
1393 * Determine why previous loop bailed. If there
1394 * is not an mbuf, done.
1396 if (sw_ring[tx_id].mbuf == NULL)
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1409 return igb_tx_done_cleanup(txq, free_cnt);
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1418 memset((void*)&txq->ctx_cache, 0,
1419 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1425 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426 struct igb_tx_entry *txe = txq->sw_ring;
1428 struct e1000_hw *hw;
1430 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431 /* Zero out HW ring memory */
1432 for (i = 0; i < txq->nb_tx_desc; i++) {
1433 txq->tx_ring[i] = zeroed_desc;
1436 /* Initialize ring entries */
1437 prev = (uint16_t)(txq->nb_tx_desc - 1);
1438 for (i = 0; i < txq->nb_tx_desc; i++) {
1439 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1441 txd->wb.status = E1000_TXD_STAT_DD;
1444 txe[prev].next_id = i;
1448 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449 /* 82575 specific, each tx queue will use 2 hw contexts */
1450 if (hw->mac.type == e1000_82575)
1451 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1453 igb_reset_tx_queue_stat(txq);
1457 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1460 unsigned int socket_id,
1461 const struct rte_eth_txconf *tx_conf)
1463 const struct rte_memzone *tz;
1464 struct igb_tx_queue *txq;
1465 struct e1000_hw *hw;
1468 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1471 * Validate number of transmit descriptors.
1472 * It must not exceed hardware maximum, and must be multiple
1475 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1476 (nb_desc > E1000_MAX_RING_DESC) ||
1477 (nb_desc < E1000_MIN_RING_DESC)) {
1482 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1485 if (tx_conf->tx_free_thresh != 0)
1486 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1487 "used for the 1G driver.");
1488 if (tx_conf->tx_rs_thresh != 0)
1489 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1490 "used for the 1G driver.");
1491 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1492 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1493 "consider setting the TX WTHRESH value to 4, 8, "
1496 /* Free memory prior to re-allocation if needed */
1497 if (dev->data->tx_queues[queue_idx] != NULL) {
1498 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1499 dev->data->tx_queues[queue_idx] = NULL;
1502 /* First allocate the tx queue data structure */
1503 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1504 RTE_CACHE_LINE_SIZE);
1509 * Allocate TX ring hardware descriptors. A memzone large enough to
1510 * handle the maximum ring size is allocated in order to allow for
1511 * resizing in later calls to the queue setup function.
1513 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1514 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1515 E1000_ALIGN, socket_id);
1517 igb_tx_queue_release(txq);
1521 txq->nb_tx_desc = nb_desc;
1522 txq->pthresh = tx_conf->tx_thresh.pthresh;
1523 txq->hthresh = tx_conf->tx_thresh.hthresh;
1524 txq->wthresh = tx_conf->tx_thresh.wthresh;
1525 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1527 txq->queue_id = queue_idx;
1528 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1529 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1530 txq->port_id = dev->data->port_id;
1532 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1533 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1535 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1536 /* Allocate software ring */
1537 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1538 sizeof(struct igb_tx_entry) * nb_desc,
1539 RTE_CACHE_LINE_SIZE);
1540 if (txq->sw_ring == NULL) {
1541 igb_tx_queue_release(txq);
1544 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1545 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1547 igb_reset_tx_queue(txq, dev);
1548 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1549 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1550 dev->data->tx_queues[queue_idx] = txq;
1556 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1560 if (rxq->sw_ring != NULL) {
1561 for (i = 0; i < rxq->nb_rx_desc; i++) {
1562 if (rxq->sw_ring[i].mbuf != NULL) {
1563 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1564 rxq->sw_ring[i].mbuf = NULL;
1571 igb_rx_queue_release(struct igb_rx_queue *rxq)
1574 igb_rx_queue_release_mbufs(rxq);
1575 rte_free(rxq->sw_ring);
1581 eth_igb_rx_queue_release(void *rxq)
1583 igb_rx_queue_release(rxq);
1587 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1589 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1592 /* Zero out HW ring memory */
1593 for (i = 0; i < rxq->nb_rx_desc; i++) {
1594 rxq->rx_ring[i] = zeroed_desc;
1598 rxq->pkt_first_seg = NULL;
1599 rxq->pkt_last_seg = NULL;
1603 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1606 unsigned int socket_id,
1607 const struct rte_eth_rxconf *rx_conf,
1608 struct rte_mempool *mp)
1610 const struct rte_memzone *rz;
1611 struct igb_rx_queue *rxq;
1612 struct e1000_hw *hw;
1615 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1618 * Validate number of receive descriptors.
1619 * It must not exceed hardware maximum, and must be multiple
1622 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1623 (nb_desc > E1000_MAX_RING_DESC) ||
1624 (nb_desc < E1000_MIN_RING_DESC)) {
1628 /* Free memory prior to re-allocation if needed */
1629 if (dev->data->rx_queues[queue_idx] != NULL) {
1630 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1631 dev->data->rx_queues[queue_idx] = NULL;
1634 /* First allocate the RX queue data structure. */
1635 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1636 RTE_CACHE_LINE_SIZE);
1640 rxq->nb_rx_desc = nb_desc;
1641 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1642 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1643 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1644 if (rxq->wthresh > 0 &&
1645 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1647 rxq->drop_en = rx_conf->rx_drop_en;
1648 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1649 rxq->queue_id = queue_idx;
1650 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1651 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1652 rxq->port_id = dev->data->port_id;
1653 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1657 * Allocate RX ring hardware descriptors. A memzone large enough to
1658 * handle the maximum ring size is allocated in order to allow for
1659 * resizing in later calls to the queue setup function.
1661 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1662 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1663 E1000_ALIGN, socket_id);
1665 igb_rx_queue_release(rxq);
1668 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1669 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1670 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1671 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1673 /* Allocate software ring. */
1674 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1675 sizeof(struct igb_rx_entry) * nb_desc,
1676 RTE_CACHE_LINE_SIZE);
1677 if (rxq->sw_ring == NULL) {
1678 igb_rx_queue_release(rxq);
1681 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1682 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1684 dev->data->rx_queues[queue_idx] = rxq;
1685 igb_reset_rx_queue(rxq);
1691 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1693 #define IGB_RXQ_SCAN_INTERVAL 4
1694 volatile union e1000_adv_rx_desc *rxdp;
1695 struct igb_rx_queue *rxq;
1698 rxq = dev->data->rx_queues[rx_queue_id];
1699 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1701 while ((desc < rxq->nb_rx_desc) &&
1702 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1703 desc += IGB_RXQ_SCAN_INTERVAL;
1704 rxdp += IGB_RXQ_SCAN_INTERVAL;
1705 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1706 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1707 desc - rxq->nb_rx_desc]);
1714 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1716 volatile union e1000_adv_rx_desc *rxdp;
1717 struct igb_rx_queue *rxq = rx_queue;
1720 if (unlikely(offset >= rxq->nb_rx_desc))
1722 desc = rxq->rx_tail + offset;
1723 if (desc >= rxq->nb_rx_desc)
1724 desc -= rxq->nb_rx_desc;
1726 rxdp = &rxq->rx_ring[desc];
1727 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1731 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1733 struct igb_rx_queue *rxq = rx_queue;
1734 volatile uint32_t *status;
1737 if (unlikely(offset >= rxq->nb_rx_desc))
1740 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1741 return RTE_ETH_RX_DESC_UNAVAIL;
1743 desc = rxq->rx_tail + offset;
1744 if (desc >= rxq->nb_rx_desc)
1745 desc -= rxq->nb_rx_desc;
1747 status = &rxq->rx_ring[desc].wb.upper.status_error;
1748 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1749 return RTE_ETH_RX_DESC_DONE;
1751 return RTE_ETH_RX_DESC_AVAIL;
1755 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1757 struct igb_tx_queue *txq = tx_queue;
1758 volatile uint32_t *status;
1761 if (unlikely(offset >= txq->nb_tx_desc))
1764 desc = txq->tx_tail + offset;
1765 if (desc >= txq->nb_tx_desc)
1766 desc -= txq->nb_tx_desc;
1768 status = &txq->tx_ring[desc].wb.status;
1769 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1770 return RTE_ETH_TX_DESC_DONE;
1772 return RTE_ETH_TX_DESC_FULL;
1776 igb_dev_clear_queues(struct rte_eth_dev *dev)
1779 struct igb_tx_queue *txq;
1780 struct igb_rx_queue *rxq;
1782 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1783 txq = dev->data->tx_queues[i];
1785 igb_tx_queue_release_mbufs(txq);
1786 igb_reset_tx_queue(txq, dev);
1790 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1791 rxq = dev->data->rx_queues[i];
1793 igb_rx_queue_release_mbufs(rxq);
1794 igb_reset_rx_queue(rxq);
1800 igb_dev_free_queues(struct rte_eth_dev *dev)
1804 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1805 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1806 dev->data->rx_queues[i] = NULL;
1808 dev->data->nb_rx_queues = 0;
1810 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1811 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1812 dev->data->tx_queues[i] = NULL;
1814 dev->data->nb_tx_queues = 0;
1818 * Receive Side Scaling (RSS).
1819 * See section 7.1.1.7 in the following document:
1820 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1823 * The source and destination IP addresses of the IP header and the source and
1824 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1825 * against a configurable random key to compute a 32-bit RSS hash result.
1826 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1827 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1828 * RSS output index which is used as the RX queue index where to store the
1830 * The following output is supplied in the RX write-back descriptor:
1831 * - 32-bit result of the Microsoft RSS hash function,
1832 * - 4-bit RSS type field.
1836 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1837 * Used as the default key.
1839 static uint8_t rss_intel_key[40] = {
1840 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1841 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1842 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1843 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1844 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1848 igb_rss_disable(struct rte_eth_dev *dev)
1850 struct e1000_hw *hw;
1853 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1854 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1855 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1856 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1860 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1868 hash_key = rss_conf->rss_key;
1869 if (hash_key != NULL) {
1870 /* Fill in RSS hash key */
1871 for (i = 0; i < 10; i++) {
1872 rss_key = hash_key[(i * 4)];
1873 rss_key |= hash_key[(i * 4) + 1] << 8;
1874 rss_key |= hash_key[(i * 4) + 2] << 16;
1875 rss_key |= hash_key[(i * 4) + 3] << 24;
1876 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1880 /* Set configured hashing protocols in MRQC register */
1881 rss_hf = rss_conf->rss_hf;
1882 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1883 if (rss_hf & ETH_RSS_IPV4)
1884 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1885 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1886 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1887 if (rss_hf & ETH_RSS_IPV6)
1888 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1889 if (rss_hf & ETH_RSS_IPV6_EX)
1890 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1891 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1892 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1893 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1894 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1895 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1896 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1897 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1898 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1899 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1900 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1901 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1905 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1906 struct rte_eth_rss_conf *rss_conf)
1908 struct e1000_hw *hw;
1912 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 * Before changing anything, first check that the update RSS operation
1916 * does not attempt to disable RSS, if RSS was enabled at
1917 * initialization time, or does not attempt to enable RSS, if RSS was
1918 * disabled at initialization time.
1920 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1921 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1922 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1923 if (rss_hf != 0) /* Enable RSS */
1925 return 0; /* Nothing to do */
1928 if (rss_hf == 0) /* Disable RSS */
1930 igb_hw_rss_hash_set(hw, rss_conf);
1934 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1935 struct rte_eth_rss_conf *rss_conf)
1937 struct e1000_hw *hw;
1944 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1945 hash_key = rss_conf->rss_key;
1946 if (hash_key != NULL) {
1947 /* Return RSS hash key */
1948 for (i = 0; i < 10; i++) {
1949 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1950 hash_key[(i * 4)] = rss_key & 0x000000FF;
1951 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1952 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1953 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1957 /* Get RSS functions configured in MRQC register */
1958 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1959 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1960 rss_conf->rss_hf = 0;
1964 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1965 rss_hf |= ETH_RSS_IPV4;
1966 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1967 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1968 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1969 rss_hf |= ETH_RSS_IPV6;
1970 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1971 rss_hf |= ETH_RSS_IPV6_EX;
1972 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1973 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1974 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1975 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1976 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1977 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1978 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1979 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1980 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1981 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1982 rss_conf->rss_hf = rss_hf;
1987 igb_rss_configure(struct rte_eth_dev *dev)
1989 struct rte_eth_rss_conf rss_conf;
1990 struct e1000_hw *hw;
1994 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1996 /* Fill in redirection table. */
1997 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1998 for (i = 0; i < 128; i++) {
2005 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2006 i % dev->data->nb_rx_queues : 0);
2007 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2009 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2013 * Configure the RSS key and the RSS protocols used to compute
2014 * the RSS hash of input packets.
2016 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2017 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2018 igb_rss_disable(dev);
2021 if (rss_conf.rss_key == NULL)
2022 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2023 igb_hw_rss_hash_set(hw, &rss_conf);
2027 * Check if the mac type support VMDq or not.
2028 * Return 1 if it supports, otherwise, return 0.
2031 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2033 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2035 switch (hw->mac.type) {
2056 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2062 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2064 struct rte_eth_vmdq_rx_conf *cfg;
2065 struct e1000_hw *hw;
2066 uint32_t mrqc, vt_ctl, vmolr, rctl;
2069 PMD_INIT_FUNC_TRACE();
2071 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2074 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2075 if (igb_is_vmdq_supported(dev) == 0)
2078 igb_rss_disable(dev);
2080 /* RCTL: eanble VLAN filter */
2081 rctl = E1000_READ_REG(hw, E1000_RCTL);
2082 rctl |= E1000_RCTL_VFE;
2083 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2085 /* MRQC: enable vmdq */
2086 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2087 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2088 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2090 /* VTCTL: pool selection according to VLAN tag */
2091 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2092 if (cfg->enable_default_pool)
2093 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2094 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2095 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2097 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2098 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2099 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2100 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2103 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2104 vmolr |= E1000_VMOLR_AUPE;
2105 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2106 vmolr |= E1000_VMOLR_ROMPE;
2107 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2108 vmolr |= E1000_VMOLR_ROPE;
2109 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2110 vmolr |= E1000_VMOLR_BAM;
2111 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2112 vmolr |= E1000_VMOLR_MPME;
2114 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2118 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2119 * Both 82576 and 82580 support it
2121 if (hw->mac.type != e1000_i350) {
2122 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2123 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2124 vmolr |= E1000_VMOLR_STRVLAN;
2125 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2129 /* VFTA - enable all vlan filters */
2130 for (i = 0; i < IGB_VFTA_SIZE; i++)
2131 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2133 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2134 if (hw->mac.type != e1000_82580)
2135 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2138 * RAH/RAL - allow pools to read specific mac addresses
2139 * In this case, all pools should be able to read from mac addr 0
2141 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2142 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2144 /* VLVF: set up filters for vlan tags as configured */
2145 for (i = 0; i < cfg->nb_pool_maps; i++) {
2146 /* set vlan id in VF register and set the valid bit */
2147 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2148 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2149 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2150 E1000_VLVF_POOLSEL_MASK)));
2153 E1000_WRITE_FLUSH(hw);
2159 /*********************************************************************
2161 * Enable receive unit.
2163 **********************************************************************/
2166 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2168 struct igb_rx_entry *rxe = rxq->sw_ring;
2172 /* Initialize software ring entries. */
2173 for (i = 0; i < rxq->nb_rx_desc; i++) {
2174 volatile union e1000_adv_rx_desc *rxd;
2175 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2178 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2179 "queue_id=%hu", rxq->queue_id);
2183 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
2184 rxd = &rxq->rx_ring[i];
2185 rxd->read.hdr_addr = 0;
2186 rxd->read.pkt_addr = dma_addr;
2193 #define E1000_MRQC_DEF_Q_SHIFT (3)
2195 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2197 struct e1000_hw *hw =
2198 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2201 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2203 * SRIOV active scheme
2204 * FIXME if support RSS together with VMDq & SRIOV
2206 mrqc = E1000_MRQC_ENABLE_VMDQ;
2207 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2208 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2209 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2210 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2212 * SRIOV inactive scheme
2214 switch (dev->data->dev_conf.rxmode.mq_mode) {
2216 igb_rss_configure(dev);
2218 case ETH_MQ_RX_VMDQ_ONLY:
2219 /*Configure general VMDQ only RX parameters*/
2220 igb_vmdq_rx_hw_configure(dev);
2222 case ETH_MQ_RX_NONE:
2223 /* if mq_mode is none, disable rss mode.*/
2225 igb_rss_disable(dev);
2234 eth_igb_rx_init(struct rte_eth_dev *dev)
2236 struct e1000_hw *hw;
2237 struct igb_rx_queue *rxq;
2242 uint16_t rctl_bsize;
2246 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2250 * Make sure receives are disabled while setting
2251 * up the descriptor ring.
2253 rctl = E1000_READ_REG(hw, E1000_RCTL);
2254 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2257 * Configure support of jumbo frames, if any.
2259 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2260 rctl |= E1000_RCTL_LPE;
2263 * Set maximum packet length by default, and might be updated
2264 * together with enabling/disabling dual VLAN.
2266 E1000_WRITE_REG(hw, E1000_RLPML,
2267 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2270 rctl &= ~E1000_RCTL_LPE;
2272 /* Configure and enable each RX queue. */
2274 dev->rx_pkt_burst = eth_igb_recv_pkts;
2275 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2279 rxq = dev->data->rx_queues[i];
2281 /* Allocate buffers for descriptor rings and set up queue */
2282 ret = igb_alloc_rx_queue_mbufs(rxq);
2287 * Reset crc_len in case it was changed after queue setup by a
2291 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2294 bus_addr = rxq->rx_ring_phys_addr;
2295 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2297 sizeof(union e1000_adv_rx_desc));
2298 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2299 (uint32_t)(bus_addr >> 32));
2300 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2302 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2305 * Configure RX buffer size.
2307 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2308 RTE_PKTMBUF_HEADROOM);
2309 if (buf_size >= 1024) {
2311 * Configure the BSIZEPACKET field of the SRRCTL
2312 * register of the queue.
2313 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2314 * If this field is equal to 0b, then RCTL.BSIZE
2315 * determines the RX packet buffer size.
2317 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2318 E1000_SRRCTL_BSIZEPKT_MASK);
2319 buf_size = (uint16_t) ((srrctl &
2320 E1000_SRRCTL_BSIZEPKT_MASK) <<
2321 E1000_SRRCTL_BSIZEPKT_SHIFT);
2323 /* It adds dual VLAN length for supporting dual VLAN */
2324 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2325 2 * VLAN_TAG_SIZE) > buf_size){
2326 if (!dev->data->scattered_rx)
2328 "forcing scatter mode");
2329 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2330 dev->data->scattered_rx = 1;
2334 * Use BSIZE field of the device RCTL register.
2336 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2337 rctl_bsize = buf_size;
2338 if (!dev->data->scattered_rx)
2339 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2340 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2341 dev->data->scattered_rx = 1;
2344 /* Set if packets are dropped when no descriptors available */
2346 srrctl |= E1000_SRRCTL_DROP_EN;
2348 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2350 /* Enable this RX queue. */
2351 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2352 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2353 rxdctl &= 0xFFF00000;
2354 rxdctl |= (rxq->pthresh & 0x1F);
2355 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2356 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2357 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2360 if (dev->data->dev_conf.rxmode.enable_scatter) {
2361 if (!dev->data->scattered_rx)
2362 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2363 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2364 dev->data->scattered_rx = 1;
2368 * Setup BSIZE field of RCTL register, if needed.
2369 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2370 * register, since the code above configures the SRRCTL register of
2371 * the RX queue in such a case.
2372 * All configurable sizes are:
2373 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2374 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2375 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2376 * 2048: rctl |= E1000_RCTL_SZ_2048;
2377 * 1024: rctl |= E1000_RCTL_SZ_1024;
2378 * 512: rctl |= E1000_RCTL_SZ_512;
2379 * 256: rctl |= E1000_RCTL_SZ_256;
2381 if (rctl_bsize > 0) {
2382 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2383 rctl |= E1000_RCTL_SZ_512;
2384 else /* 256 <= buf_size < 512 - use 256 */
2385 rctl |= E1000_RCTL_SZ_256;
2389 * Configure RSS if device configured with multiple RX queues.
2391 igb_dev_mq_rx_configure(dev);
2393 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2394 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2397 * Setup the Checksum Register.
2398 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2400 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2401 rxcsum |= E1000_RXCSUM_PCSD;
2403 /* Enable both L3/L4 rx checksum offload */
2404 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2405 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2406 E1000_RXCSUM_CRCOFL);
2408 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2409 E1000_RXCSUM_CRCOFL);
2410 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2412 /* Setup the Receive Control Register. */
2413 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2414 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2416 /* set STRCRC bit in all queues */
2417 if (hw->mac.type == e1000_i350 ||
2418 hw->mac.type == e1000_i210 ||
2419 hw->mac.type == e1000_i211 ||
2420 hw->mac.type == e1000_i354) {
2421 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2422 rxq = dev->data->rx_queues[i];
2423 uint32_t dvmolr = E1000_READ_REG(hw,
2424 E1000_DVMOLR(rxq->reg_idx));
2425 dvmolr |= E1000_DVMOLR_STRCRC;
2426 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2430 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2432 /* clear STRCRC bit in all queues */
2433 if (hw->mac.type == e1000_i350 ||
2434 hw->mac.type == e1000_i210 ||
2435 hw->mac.type == e1000_i211 ||
2436 hw->mac.type == e1000_i354) {
2437 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2438 rxq = dev->data->rx_queues[i];
2439 uint32_t dvmolr = E1000_READ_REG(hw,
2440 E1000_DVMOLR(rxq->reg_idx));
2441 dvmolr &= ~E1000_DVMOLR_STRCRC;
2442 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2447 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2448 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2449 E1000_RCTL_RDMTS_HALF |
2450 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2452 /* Make sure VLAN Filters are off. */
2453 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2454 rctl &= ~E1000_RCTL_VFE;
2455 /* Don't store bad packets. */
2456 rctl &= ~E1000_RCTL_SBP;
2458 /* Enable Receives. */
2459 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2462 * Setup the HW Rx Head and Tail Descriptor Pointers.
2463 * This needs to be done after enable.
2465 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2466 rxq = dev->data->rx_queues[i];
2467 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2468 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2474 /*********************************************************************
2476 * Enable transmit unit.
2478 **********************************************************************/
2480 eth_igb_tx_init(struct rte_eth_dev *dev)
2482 struct e1000_hw *hw;
2483 struct igb_tx_queue *txq;
2488 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2490 /* Setup the Base and Length of the Tx Descriptor Rings. */
2491 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2493 txq = dev->data->tx_queues[i];
2494 bus_addr = txq->tx_ring_phys_addr;
2496 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2498 sizeof(union e1000_adv_tx_desc));
2499 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2500 (uint32_t)(bus_addr >> 32));
2501 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2503 /* Setup the HW Tx Head and Tail descriptor pointers. */
2504 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2505 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2507 /* Setup Transmit threshold registers. */
2508 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2509 txdctl |= txq->pthresh & 0x1F;
2510 txdctl |= ((txq->hthresh & 0x1F) << 8);
2511 txdctl |= ((txq->wthresh & 0x1F) << 16);
2512 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2513 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2516 /* Program the Transmit Control Register. */
2517 tctl = E1000_READ_REG(hw, E1000_TCTL);
2518 tctl &= ~E1000_TCTL_CT;
2519 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2520 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2522 e1000_config_collision_dist(hw);
2524 /* This write will effectively turn on the transmit unit. */
2525 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2528 /*********************************************************************
2530 * Enable VF receive unit.
2532 **********************************************************************/
2534 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2536 struct e1000_hw *hw;
2537 struct igb_rx_queue *rxq;
2540 uint16_t rctl_bsize;
2544 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2547 e1000_rlpml_set_vf(hw,
2548 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2551 /* Configure and enable each RX queue. */
2553 dev->rx_pkt_burst = eth_igb_recv_pkts;
2554 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2558 rxq = dev->data->rx_queues[i];
2560 /* Allocate buffers for descriptor rings and set up queue */
2561 ret = igb_alloc_rx_queue_mbufs(rxq);
2565 bus_addr = rxq->rx_ring_phys_addr;
2566 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2568 sizeof(union e1000_adv_rx_desc));
2569 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2570 (uint32_t)(bus_addr >> 32));
2571 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2573 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2576 * Configure RX buffer size.
2578 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2579 RTE_PKTMBUF_HEADROOM);
2580 if (buf_size >= 1024) {
2582 * Configure the BSIZEPACKET field of the SRRCTL
2583 * register of the queue.
2584 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2585 * If this field is equal to 0b, then RCTL.BSIZE
2586 * determines the RX packet buffer size.
2588 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2589 E1000_SRRCTL_BSIZEPKT_MASK);
2590 buf_size = (uint16_t) ((srrctl &
2591 E1000_SRRCTL_BSIZEPKT_MASK) <<
2592 E1000_SRRCTL_BSIZEPKT_SHIFT);
2594 /* It adds dual VLAN length for supporting dual VLAN */
2595 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2596 2 * VLAN_TAG_SIZE) > buf_size){
2597 if (!dev->data->scattered_rx)
2599 "forcing scatter mode");
2600 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2601 dev->data->scattered_rx = 1;
2605 * Use BSIZE field of the device RCTL register.
2607 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2608 rctl_bsize = buf_size;
2609 if (!dev->data->scattered_rx)
2610 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2611 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2612 dev->data->scattered_rx = 1;
2615 /* Set if packets are dropped when no descriptors available */
2617 srrctl |= E1000_SRRCTL_DROP_EN;
2619 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2621 /* Enable this RX queue. */
2622 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2623 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2624 rxdctl &= 0xFFF00000;
2625 rxdctl |= (rxq->pthresh & 0x1F);
2626 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2627 if (hw->mac.type == e1000_vfadapt) {
2629 * Workaround of 82576 VF Erratum
2630 * force set WTHRESH to 1
2631 * to avoid Write-Back not triggered sometimes
2634 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2637 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2638 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2641 if (dev->data->dev_conf.rxmode.enable_scatter) {
2642 if (!dev->data->scattered_rx)
2643 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2644 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2645 dev->data->scattered_rx = 1;
2649 * Setup the HW Rx Head and Tail Descriptor Pointers.
2650 * This needs to be done after enable.
2652 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2653 rxq = dev->data->rx_queues[i];
2654 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2655 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2661 /*********************************************************************
2663 * Enable VF transmit unit.
2665 **********************************************************************/
2667 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2669 struct e1000_hw *hw;
2670 struct igb_tx_queue *txq;
2674 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2676 /* Setup the Base and Length of the Tx Descriptor Rings. */
2677 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2680 txq = dev->data->tx_queues[i];
2681 bus_addr = txq->tx_ring_phys_addr;
2682 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2684 sizeof(union e1000_adv_tx_desc));
2685 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2686 (uint32_t)(bus_addr >> 32));
2687 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2689 /* Setup the HW Tx Head and Tail descriptor pointers. */
2690 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2691 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2693 /* Setup Transmit threshold registers. */
2694 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2695 txdctl |= txq->pthresh & 0x1F;
2696 txdctl |= ((txq->hthresh & 0x1F) << 8);
2697 if (hw->mac.type == e1000_82576) {
2699 * Workaround of 82576 VF Erratum
2700 * force set WTHRESH to 1
2701 * to avoid Write-Back not triggered sometimes
2704 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2707 txdctl |= ((txq->wthresh & 0x1F) << 16);
2708 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2709 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2715 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2716 struct rte_eth_rxq_info *qinfo)
2718 struct igb_rx_queue *rxq;
2720 rxq = dev->data->rx_queues[queue_id];
2722 qinfo->mp = rxq->mb_pool;
2723 qinfo->scattered_rx = dev->data->scattered_rx;
2724 qinfo->nb_desc = rxq->nb_rx_desc;
2726 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2727 qinfo->conf.rx_drop_en = rxq->drop_en;
2731 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2732 struct rte_eth_txq_info *qinfo)
2734 struct igb_tx_queue *txq;
2736 txq = dev->data->tx_queues[queue_id];
2738 qinfo->nb_desc = txq->nb_tx_desc;
2740 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2741 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2742 qinfo->conf.tx_thresh.wthresh = txq->wthresh;