1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
49 #define IGB_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK ( \
63 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
64 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
67 * Structure associated with each descriptor of the RX ring of a RX queue.
70 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
74 * Structure associated with each descriptor of the TX ring of a TX queue.
77 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
78 uint16_t next_id; /**< Index of next descriptor in ring. */
79 uint16_t last_id; /**< Index of last scattered descriptor. */
86 IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
90 * Structure associated with each RX queue.
93 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
94 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
95 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
96 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
97 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
98 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
99 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
100 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
101 uint16_t nb_rx_desc; /**< number of RX descriptors. */
102 uint16_t rx_tail; /**< current value of RDT register. */
103 uint16_t nb_rx_hold; /**< number of held free RX desc. */
104 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
105 uint16_t queue_id; /**< RX queue index. */
106 uint16_t reg_idx; /**< RX queue register index. */
107 uint16_t port_id; /**< Device port identifier. */
108 uint8_t pthresh; /**< Prefetch threshold register. */
109 uint8_t hthresh; /**< Host threshold register. */
110 uint8_t wthresh; /**< Write-back threshold register. */
111 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
112 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
113 uint32_t flags; /**< RX flags. */
114 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
115 const struct rte_memzone *mz;
119 * Hardware context number
121 enum igb_advctx_num {
122 IGB_CTX_0 = 0, /**< CTX0 */
123 IGB_CTX_1 = 1, /**< CTX1 */
124 IGB_CTX_NUM = 2, /**< CTX_NUM */
127 /** Offload features */
128 union igb_tx_offload {
131 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
132 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
133 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
134 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
135 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
137 /* uint64_t unused:8; */
142 * Compare mask for igb_tx_offload.data,
143 * should be in sync with igb_tx_offload layout.
145 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
146 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
147 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
148 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
149 /** Mac + IP + TCP + Mss mask. */
150 #define TX_TSO_CMP_MASK \
151 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
154 * Strucutre to check if new context need be built
156 struct igb_advctx_info {
157 uint64_t flags; /**< ol_flags related to context build. */
158 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
159 union igb_tx_offload tx_offload;
160 /** compare mask for tx offload. */
161 union igb_tx_offload tx_offload_mask;
165 * Structure associated with each TX queue.
167 struct igb_tx_queue {
168 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
169 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
170 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
171 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
172 uint32_t txd_type; /**< Device-specific TXD type */
173 uint16_t nb_tx_desc; /**< number of TX descriptors. */
174 uint16_t tx_tail; /**< Current value of TDT register. */
176 /**< Index of first used TX descriptor. */
177 uint16_t queue_id; /**< TX queue index. */
178 uint16_t reg_idx; /**< TX queue register index. */
179 uint16_t port_id; /**< Device port identifier. */
180 uint8_t pthresh; /**< Prefetch threshold register. */
181 uint8_t hthresh; /**< Host threshold register. */
182 uint8_t wthresh; /**< Write-back threshold register. */
184 /**< Current used hardware descriptor. */
186 /**< Start context position for transmit queue. */
187 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
188 /**< Hardware context history.*/
189 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
190 const struct rte_memzone *mz;
194 #define RTE_PMD_USE_PREFETCH
197 #ifdef RTE_PMD_USE_PREFETCH
198 #define rte_igb_prefetch(p) rte_prefetch0(p)
200 #define rte_igb_prefetch(p) do {} while(0)
203 #ifdef RTE_PMD_PACKET_PREFETCH
204 #define rte_packet_prefetch(p) rte_prefetch1(p)
206 #define rte_packet_prefetch(p) do {} while(0)
210 * Macro for VMDq feature for 1 GbE NIC.
212 #define E1000_VMOLR_SIZE (8)
213 #define IGB_TSO_MAX_HDRLEN (512)
214 #define IGB_TSO_MAX_MSS (9216)
216 /*********************************************************************
220 **********************************************************************/
223 *There're some limitations in hardware for TCP segmentation offload. We
224 *should check whether the parameters are valid.
226 static inline uint64_t
227 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
229 if (!(ol_req & PKT_TX_TCP_SEG))
231 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
232 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
233 ol_req &= ~PKT_TX_TCP_SEG;
234 ol_req |= PKT_TX_TCP_CKSUM;
240 * Advanced context descriptor are almost same between igb/ixgbe
241 * This is a separate function, looking for optimization opportunity here
242 * Rework required to go with the pre-defined values.
246 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
247 volatile struct e1000_adv_tx_context_desc *ctx_txd,
248 uint64_t ol_flags, union igb_tx_offload tx_offload)
250 uint32_t type_tucmd_mlhl;
251 uint32_t mss_l4len_idx;
252 uint32_t ctx_idx, ctx_curr;
253 uint32_t vlan_macip_lens;
254 union igb_tx_offload tx_offload_mask;
256 ctx_curr = txq->ctx_curr;
257 ctx_idx = ctx_curr + txq->ctx_start;
259 tx_offload_mask.data = 0;
262 /* Specify which HW CTX to upload. */
263 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
265 if (ol_flags & PKT_TX_VLAN_PKT)
266 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
268 /* check if TCP segmentation required for this packet */
269 if (ol_flags & PKT_TX_TCP_SEG) {
270 /* implies IP cksum in IPv4 */
271 if (ol_flags & PKT_TX_IP_CKSUM)
272 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
273 E1000_ADVTXD_TUCMD_L4T_TCP |
274 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
276 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
277 E1000_ADVTXD_TUCMD_L4T_TCP |
278 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
280 tx_offload_mask.data |= TX_TSO_CMP_MASK;
281 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
282 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
283 } else { /* no TSO, check if hardware checksum is needed */
284 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
285 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
287 if (ol_flags & PKT_TX_IP_CKSUM)
288 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
290 switch (ol_flags & PKT_TX_L4_MASK) {
291 case PKT_TX_UDP_CKSUM:
292 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
293 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
294 mss_l4len_idx |= sizeof(struct rte_udp_hdr)
295 << E1000_ADVTXD_L4LEN_SHIFT;
297 case PKT_TX_TCP_CKSUM:
298 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
299 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
300 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
301 << E1000_ADVTXD_L4LEN_SHIFT;
303 case PKT_TX_SCTP_CKSUM:
304 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
305 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
306 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
307 << E1000_ADVTXD_L4LEN_SHIFT;
310 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
311 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
316 txq->ctx_cache[ctx_curr].flags = ol_flags;
317 txq->ctx_cache[ctx_curr].tx_offload.data =
318 tx_offload_mask.data & tx_offload.data;
319 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
321 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
322 vlan_macip_lens = (uint32_t)tx_offload.data;
323 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
324 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
325 ctx_txd->u.seqnum_seed = 0;
329 * Check which hardware context can be used. Use the existing match
330 * or create a new context descriptor.
332 static inline uint32_t
333 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
334 union igb_tx_offload tx_offload)
336 /* If match with the current context */
337 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
338 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
339 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
340 return txq->ctx_curr;
343 /* If match with the second context */
345 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
346 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
347 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
348 return txq->ctx_curr;
351 /* Mismatch, use the previous context */
355 static inline uint32_t
356 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
358 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
359 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
362 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
363 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
364 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
368 static inline uint32_t
369 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
372 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
373 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
374 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
375 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
380 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
383 struct igb_tx_queue *txq;
384 struct igb_tx_entry *sw_ring;
385 struct igb_tx_entry *txe, *txn;
386 volatile union e1000_adv_tx_desc *txr;
387 volatile union e1000_adv_tx_desc *txd;
388 struct rte_mbuf *tx_pkt;
389 struct rte_mbuf *m_seg;
390 uint64_t buf_dma_addr;
391 uint32_t olinfo_status;
392 uint32_t cmd_type_len;
401 uint32_t new_ctx = 0;
403 union igb_tx_offload tx_offload = {0};
406 sw_ring = txq->sw_ring;
408 tx_id = txq->tx_tail;
409 txe = &sw_ring[tx_id];
411 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
413 pkt_len = tx_pkt->pkt_len;
415 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
418 * The number of descriptors that must be allocated for a
419 * packet is the number of segments of that packet, plus 1
420 * Context Descriptor for the VLAN Tag Identifier, if any.
421 * Determine the last TX descriptor to allocate in the TX ring
422 * for the packet, starting from the current position (tx_id)
425 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
427 ol_flags = tx_pkt->ol_flags;
428 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
430 /* If a Context Descriptor need be built . */
432 tx_offload.l2_len = tx_pkt->l2_len;
433 tx_offload.l3_len = tx_pkt->l3_len;
434 tx_offload.l4_len = tx_pkt->l4_len;
435 tx_offload.vlan_tci = tx_pkt->vlan_tci;
436 tx_offload.tso_segsz = tx_pkt->tso_segsz;
437 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
439 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
440 /* Only allocate context descriptor if required*/
441 new_ctx = (ctx == IGB_CTX_NUM);
442 ctx = txq->ctx_curr + txq->ctx_start;
443 tx_last = (uint16_t) (tx_last + new_ctx);
445 if (tx_last >= txq->nb_tx_desc)
446 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
448 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
449 " tx_first=%u tx_last=%u",
450 (unsigned) txq->port_id,
451 (unsigned) txq->queue_id,
457 * Check if there are enough free descriptors in the TX ring
458 * to transmit the next packet.
459 * This operation is based on the two following rules:
461 * 1- Only check that the last needed TX descriptor can be
462 * allocated (by construction, if that descriptor is free,
463 * all intermediate ones are also free).
465 * For this purpose, the index of the last TX descriptor
466 * used for a packet (the "last descriptor" of a packet)
467 * is recorded in the TX entries (the last one included)
468 * that are associated with all TX descriptors allocated
471 * 2- Avoid to allocate the last free TX descriptor of the
472 * ring, in order to never set the TDT register with the
473 * same value stored in parallel by the NIC in the TDH
474 * register, which makes the TX engine of the NIC enter
475 * in a deadlock situation.
477 * By extension, avoid to allocate a free descriptor that
478 * belongs to the last set of free descriptors allocated
479 * to the same packet previously transmitted.
483 * The "last descriptor" of the previously sent packet, if any,
484 * which used the last descriptor to allocate.
486 tx_end = sw_ring[tx_last].last_id;
489 * The next descriptor following that "last descriptor" in the
492 tx_end = sw_ring[tx_end].next_id;
495 * The "last descriptor" associated with that next descriptor.
497 tx_end = sw_ring[tx_end].last_id;
500 * Check that this descriptor is free.
502 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
509 * Set common flags of all TX Data Descriptors.
511 * The following bits must be set in all Data Descriptors:
512 * - E1000_ADVTXD_DTYP_DATA
513 * - E1000_ADVTXD_DCMD_DEXT
515 * The following bits must be set in the first Data Descriptor
516 * and are ignored in the other ones:
517 * - E1000_ADVTXD_DCMD_IFCS
518 * - E1000_ADVTXD_MAC_1588
519 * - E1000_ADVTXD_DCMD_VLE
521 * The following bits must only be set in the last Data
523 * - E1000_TXD_CMD_EOP
525 * The following bits can be set in any Data Descriptor, but
526 * are only set in the last Data Descriptor:
529 cmd_type_len = txq->txd_type |
530 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
531 if (tx_ol_req & PKT_TX_TCP_SEG)
532 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
533 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
534 #if defined(RTE_LIBRTE_IEEE1588)
535 if (ol_flags & PKT_TX_IEEE1588_TMST)
536 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
539 /* Setup TX Advanced context descriptor if required */
541 volatile struct e1000_adv_tx_context_desc *
544 ctx_txd = (volatile struct
545 e1000_adv_tx_context_desc *)
548 txn = &sw_ring[txe->next_id];
549 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
551 if (txe->mbuf != NULL) {
552 rte_pktmbuf_free_seg(txe->mbuf);
556 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
558 txe->last_id = tx_last;
559 tx_id = txe->next_id;
563 /* Setup the TX Advanced Data Descriptor */
564 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
565 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
566 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
571 txn = &sw_ring[txe->next_id];
574 if (txe->mbuf != NULL)
575 rte_pktmbuf_free_seg(txe->mbuf);
579 * Set up transmit descriptor.
581 slen = (uint16_t) m_seg->data_len;
582 buf_dma_addr = rte_mbuf_data_iova(m_seg);
583 txd->read.buffer_addr =
584 rte_cpu_to_le_64(buf_dma_addr);
585 txd->read.cmd_type_len =
586 rte_cpu_to_le_32(cmd_type_len | slen);
587 txd->read.olinfo_status =
588 rte_cpu_to_le_32(olinfo_status);
589 txe->last_id = tx_last;
590 tx_id = txe->next_id;
593 } while (m_seg != NULL);
596 * The last packet data descriptor needs End Of Packet (EOP)
597 * and Report Status (RS).
599 txd->read.cmd_type_len |=
600 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
606 * Set the Transmit Descriptor Tail (TDT).
608 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
609 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
610 (unsigned) txq->port_id, (unsigned) txq->queue_id,
611 (unsigned) tx_id, (unsigned) nb_tx);
612 txq->tx_tail = tx_id;
617 /*********************************************************************
621 **********************************************************************/
623 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
629 for (i = 0; i < nb_pkts; i++) {
632 /* Check some limitations for TSO in hardware */
633 if (m->ol_flags & PKT_TX_TCP_SEG)
634 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
635 (m->l2_len + m->l3_len + m->l4_len >
636 IGB_TSO_MAX_HDRLEN)) {
641 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
646 #ifdef RTE_ETHDEV_DEBUG_TX
647 ret = rte_validate_tx_offload(m);
653 ret = rte_net_intel_cksum_prepare(m);
663 /*********************************************************************
667 **********************************************************************/
668 #define IGB_PACKET_TYPE_IPV4 0X01
669 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
670 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
671 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
672 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
673 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
674 #define IGB_PACKET_TYPE_IPV6 0X04
675 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
676 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
677 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
678 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
679 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
680 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
681 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
682 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
683 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
684 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
685 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
686 #define IGB_PACKET_TYPE_MAX 0X80
687 #define IGB_PACKET_TYPE_MASK 0X7F
688 #define IGB_PACKET_TYPE_SHIFT 0X04
689 static inline uint32_t
690 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
692 static const uint32_t
693 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
694 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
696 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
697 RTE_PTYPE_L3_IPV4_EXT,
698 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
700 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
701 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702 RTE_PTYPE_INNER_L3_IPV6,
703 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
704 RTE_PTYPE_L3_IPV6_EXT,
705 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
706 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
707 RTE_PTYPE_INNER_L3_IPV6_EXT,
708 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
709 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
710 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
711 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
712 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
713 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
714 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
715 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
716 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
717 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
718 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
719 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
720 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
721 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
722 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
723 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
724 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
725 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
726 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
727 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
728 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
729 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
730 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
731 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
732 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
733 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
734 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
735 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
737 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
738 return RTE_PTYPE_UNKNOWN;
740 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
742 return ptype_table[pkt_info];
745 static inline uint64_t
746 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
748 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
750 #if defined(RTE_LIBRTE_IEEE1588)
751 static uint32_t ip_pkt_etqf_map[8] = {
752 0, 0, 0, PKT_RX_IEEE1588_PTP,
756 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
757 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
759 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
760 if (hw->mac.type == e1000_i210)
761 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
763 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
771 static inline uint64_t
772 rx_desc_status_to_pkt_flags(uint32_t rx_status)
776 /* Check if VLAN present */
777 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
778 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
780 #if defined(RTE_LIBRTE_IEEE1588)
781 if (rx_status & E1000_RXD_STAT_TMST)
782 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
787 static inline uint64_t
788 rx_desc_error_to_pkt_flags(uint32_t rx_status)
791 * Bit 30: IPE, IPv4 checksum error
792 * Bit 29: L4I, L4I integrity error
795 static uint64_t error_to_pkt_flags_map[4] = {
796 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
797 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
798 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
799 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
801 return error_to_pkt_flags_map[(rx_status >>
802 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
806 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
809 struct igb_rx_queue *rxq;
810 volatile union e1000_adv_rx_desc *rx_ring;
811 volatile union e1000_adv_rx_desc *rxdp;
812 struct igb_rx_entry *sw_ring;
813 struct igb_rx_entry *rxe;
814 struct rte_mbuf *rxm;
815 struct rte_mbuf *nmb;
816 union e1000_adv_rx_desc rxd;
819 uint32_t hlen_type_rss;
829 rx_id = rxq->rx_tail;
830 rx_ring = rxq->rx_ring;
831 sw_ring = rxq->sw_ring;
832 while (nb_rx < nb_pkts) {
834 * The order of operations here is important as the DD status
835 * bit must not be read after any other descriptor fields.
836 * rx_ring and rxdp are pointing to volatile data so the order
837 * of accesses cannot be reordered by the compiler. If they were
838 * not volatile, they could be reordered which could lead to
839 * using invalid descriptor fields when read from rxd.
841 rxdp = &rx_ring[rx_id];
842 staterr = rxdp->wb.upper.status_error;
843 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
850 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
851 * likely to be invalid and to be dropped by the various
852 * validation checks performed by the network stack.
854 * Allocate a new mbuf to replenish the RX ring descriptor.
855 * If the allocation fails:
856 * - arrange for that RX descriptor to be the first one
857 * being parsed the next time the receive function is
858 * invoked [on the same queue].
860 * - Stop parsing the RX ring and return immediately.
862 * This policy do not drop the packet received in the RX
863 * descriptor for which the allocation of a new mbuf failed.
864 * Thus, it allows that packet to be later retrieved if
865 * mbuf have been freed in the mean time.
866 * As a side effect, holding RX descriptors instead of
867 * systematically giving them back to the NIC may lead to
868 * RX ring exhaustion situations.
869 * However, the NIC can gracefully prevent such situations
870 * to happen by sending specific "back-pressure" flow control
871 * frames to its peer(s).
873 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
874 "staterr=0x%x pkt_len=%u",
875 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
876 (unsigned) rx_id, (unsigned) staterr,
877 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
879 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
881 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
882 "queue_id=%u", (unsigned) rxq->port_id,
883 (unsigned) rxq->queue_id);
884 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
889 rxe = &sw_ring[rx_id];
891 if (rx_id == rxq->nb_rx_desc)
894 /* Prefetch next mbuf while processing current one. */
895 rte_igb_prefetch(sw_ring[rx_id].mbuf);
898 * When next RX descriptor is on a cache-line boundary,
899 * prefetch the next 4 RX descriptors and the next 8 pointers
902 if ((rx_id & 0x3) == 0) {
903 rte_igb_prefetch(&rx_ring[rx_id]);
904 rte_igb_prefetch(&sw_ring[rx_id]);
910 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
911 rxdp->read.hdr_addr = 0;
912 rxdp->read.pkt_addr = dma_addr;
915 * Initialize the returned mbuf.
916 * 1) setup generic mbuf fields:
917 * - number of segments,
920 * - RX port identifier.
921 * 2) integrate hardware offload data, if any:
923 * - IP checksum flag,
924 * - VLAN TCI, if any,
927 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
929 rxm->data_off = RTE_PKTMBUF_HEADROOM;
930 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
933 rxm->pkt_len = pkt_len;
934 rxm->data_len = pkt_len;
935 rxm->port = rxq->port_id;
937 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
938 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
941 * The vlan_tci field is only valid when PKT_RX_VLAN is
942 * set in the pkt_flags field and must be in CPU byte order.
944 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
945 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
946 rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
948 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
950 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
951 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
952 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
953 rxm->ol_flags = pkt_flags;
954 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
955 lo_dword.hs_rss.pkt_info);
958 * Store the mbuf address into the next entry of the array
959 * of returned packets.
961 rx_pkts[nb_rx++] = rxm;
963 rxq->rx_tail = rx_id;
966 * If the number of free RX descriptors is greater than the RX free
967 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
969 * Update the RDT with the value of the last processed RX descriptor
970 * minus 1, to guarantee that the RDT register is never equal to the
971 * RDH register, which creates a "full" ring situtation from the
972 * hardware point of view...
974 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
975 if (nb_hold > rxq->rx_free_thresh) {
976 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
977 "nb_hold=%u nb_rx=%u",
978 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
979 (unsigned) rx_id, (unsigned) nb_hold,
981 rx_id = (uint16_t) ((rx_id == 0) ?
982 (rxq->nb_rx_desc - 1) : (rx_id - 1));
983 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
986 rxq->nb_rx_hold = nb_hold;
991 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
994 struct igb_rx_queue *rxq;
995 volatile union e1000_adv_rx_desc *rx_ring;
996 volatile union e1000_adv_rx_desc *rxdp;
997 struct igb_rx_entry *sw_ring;
998 struct igb_rx_entry *rxe;
999 struct rte_mbuf *first_seg;
1000 struct rte_mbuf *last_seg;
1001 struct rte_mbuf *rxm;
1002 struct rte_mbuf *nmb;
1003 union e1000_adv_rx_desc rxd;
1004 uint64_t dma; /* Physical address of mbuf data buffer */
1006 uint32_t hlen_type_rss;
1016 rx_id = rxq->rx_tail;
1017 rx_ring = rxq->rx_ring;
1018 sw_ring = rxq->sw_ring;
1021 * Retrieve RX context of current packet, if any.
1023 first_seg = rxq->pkt_first_seg;
1024 last_seg = rxq->pkt_last_seg;
1026 while (nb_rx < nb_pkts) {
1029 * The order of operations here is important as the DD status
1030 * bit must not be read after any other descriptor fields.
1031 * rx_ring and rxdp are pointing to volatile data so the order
1032 * of accesses cannot be reordered by the compiler. If they were
1033 * not volatile, they could be reordered which could lead to
1034 * using invalid descriptor fields when read from rxd.
1036 rxdp = &rx_ring[rx_id];
1037 staterr = rxdp->wb.upper.status_error;
1038 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1045 * Allocate a new mbuf to replenish the RX ring descriptor.
1046 * If the allocation fails:
1047 * - arrange for that RX descriptor to be the first one
1048 * being parsed the next time the receive function is
1049 * invoked [on the same queue].
1051 * - Stop parsing the RX ring and return immediately.
1053 * This policy does not drop the packet received in the RX
1054 * descriptor for which the allocation of a new mbuf failed.
1055 * Thus, it allows that packet to be later retrieved if
1056 * mbuf have been freed in the mean time.
1057 * As a side effect, holding RX descriptors instead of
1058 * systematically giving them back to the NIC may lead to
1059 * RX ring exhaustion situations.
1060 * However, the NIC can gracefully prevent such situations
1061 * to happen by sending specific "back-pressure" flow control
1062 * frames to its peer(s).
1064 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1065 "staterr=0x%x data_len=%u",
1066 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1067 (unsigned) rx_id, (unsigned) staterr,
1068 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1070 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1072 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1073 "queue_id=%u", (unsigned) rxq->port_id,
1074 (unsigned) rxq->queue_id);
1075 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1080 rxe = &sw_ring[rx_id];
1082 if (rx_id == rxq->nb_rx_desc)
1085 /* Prefetch next mbuf while processing current one. */
1086 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1089 * When next RX descriptor is on a cache-line boundary,
1090 * prefetch the next 4 RX descriptors and the next 8 pointers
1093 if ((rx_id & 0x3) == 0) {
1094 rte_igb_prefetch(&rx_ring[rx_id]);
1095 rte_igb_prefetch(&sw_ring[rx_id]);
1099 * Update RX descriptor with the physical address of the new
1100 * data buffer of the new allocated mbuf.
1104 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1105 rxdp->read.pkt_addr = dma;
1106 rxdp->read.hdr_addr = 0;
1109 * Set data length & data buffer address of mbuf.
1111 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1112 rxm->data_len = data_len;
1113 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1116 * If this is the first buffer of the received packet,
1117 * set the pointer to the first mbuf of the packet and
1118 * initialize its context.
1119 * Otherwise, update the total length and the number of segments
1120 * of the current scattered packet, and update the pointer to
1121 * the last mbuf of the current packet.
1123 if (first_seg == NULL) {
1125 first_seg->pkt_len = data_len;
1126 first_seg->nb_segs = 1;
1128 first_seg->pkt_len += data_len;
1129 first_seg->nb_segs++;
1130 last_seg->next = rxm;
1134 * If this is not the last buffer of the received packet,
1135 * update the pointer to the last mbuf of the current scattered
1136 * packet and continue to parse the RX ring.
1138 if (! (staterr & E1000_RXD_STAT_EOP)) {
1144 * This is the last buffer of the received packet.
1145 * If the CRC is not stripped by the hardware:
1146 * - Subtract the CRC length from the total packet length.
1147 * - If the last buffer only contains the whole CRC or a part
1148 * of it, free the mbuf associated to the last buffer.
1149 * If part of the CRC is also contained in the previous
1150 * mbuf, subtract the length of that CRC part from the
1151 * data length of the previous mbuf.
1154 if (unlikely(rxq->crc_len > 0)) {
1155 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1156 if (data_len <= RTE_ETHER_CRC_LEN) {
1157 rte_pktmbuf_free_seg(rxm);
1158 first_seg->nb_segs--;
1159 last_seg->data_len = (uint16_t)
1160 (last_seg->data_len -
1161 (RTE_ETHER_CRC_LEN - data_len));
1162 last_seg->next = NULL;
1164 rxm->data_len = (uint16_t)
1165 (data_len - RTE_ETHER_CRC_LEN);
1169 * Initialize the first mbuf of the returned packet:
1170 * - RX port identifier,
1171 * - hardware offload data, if any:
1172 * - RSS flag & hash,
1173 * - IP checksum flag,
1174 * - VLAN TCI, if any,
1177 first_seg->port = rxq->port_id;
1178 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1181 * The vlan_tci field is only valid when PKT_RX_VLAN is
1182 * set in the pkt_flags field and must be in CPU byte order.
1184 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1185 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1186 first_seg->vlan_tci =
1187 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1189 first_seg->vlan_tci =
1190 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1192 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1193 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1194 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1195 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1196 first_seg->ol_flags = pkt_flags;
1197 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1198 lower.lo_dword.hs_rss.pkt_info);
1200 /* Prefetch data of first segment, if configured to do so. */
1201 rte_packet_prefetch((char *)first_seg->buf_addr +
1202 first_seg->data_off);
1205 * Store the mbuf address into the next entry of the array
1206 * of returned packets.
1208 rx_pkts[nb_rx++] = first_seg;
1211 * Setup receipt context for a new packet.
1217 * Record index of the next RX descriptor to probe.
1219 rxq->rx_tail = rx_id;
1222 * Save receive context.
1224 rxq->pkt_first_seg = first_seg;
1225 rxq->pkt_last_seg = last_seg;
1228 * If the number of free RX descriptors is greater than the RX free
1229 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1231 * Update the RDT with the value of the last processed RX descriptor
1232 * minus 1, to guarantee that the RDT register is never equal to the
1233 * RDH register, which creates a "full" ring situtation from the
1234 * hardware point of view...
1236 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1237 if (nb_hold > rxq->rx_free_thresh) {
1238 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1239 "nb_hold=%u nb_rx=%u",
1240 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1241 (unsigned) rx_id, (unsigned) nb_hold,
1243 rx_id = (uint16_t) ((rx_id == 0) ?
1244 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1245 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1248 rxq->nb_rx_hold = nb_hold;
1253 * Maximum number of Ring Descriptors.
1255 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1256 * desscriptors should meet the following condition:
1257 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1261 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1265 if (txq->sw_ring != NULL) {
1266 for (i = 0; i < txq->nb_tx_desc; i++) {
1267 if (txq->sw_ring[i].mbuf != NULL) {
1268 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1269 txq->sw_ring[i].mbuf = NULL;
1276 igb_tx_queue_release(struct igb_tx_queue *txq)
1279 igb_tx_queue_release_mbufs(txq);
1280 rte_free(txq->sw_ring);
1281 rte_memzone_free(txq->mz);
1287 eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1289 igb_tx_queue_release(dev->data->tx_queues[qid]);
1293 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1295 struct igb_tx_entry *sw_ring;
1296 volatile union e1000_adv_tx_desc *txr;
1297 uint16_t tx_first; /* First segment analyzed. */
1298 uint16_t tx_id; /* Current segment being processed. */
1299 uint16_t tx_last; /* Last segment in the current packet. */
1300 uint16_t tx_next; /* First segment of the next packet. */
1306 sw_ring = txq->sw_ring;
1309 /* tx_tail is the last sent packet on the sw_ring. Goto the end
1310 * of that packet (the last segment in the packet chain) and
1311 * then the next segment will be the start of the oldest segment
1312 * in the sw_ring. This is the first packet that will be
1313 * attempted to be freed.
1316 /* Get last segment in most recently added packet. */
1317 tx_first = sw_ring[txq->tx_tail].last_id;
1319 /* Get the next segment, which is the oldest segment in ring. */
1320 tx_first = sw_ring[tx_first].next_id;
1322 /* Set the current index to the first. */
1325 /* Loop through each packet. For each packet, verify that an
1326 * mbuf exists and that the last segment is free. If so, free
1330 tx_last = sw_ring[tx_id].last_id;
1332 if (sw_ring[tx_last].mbuf) {
1333 if (txr[tx_last].wb.status &
1334 E1000_TXD_STAT_DD) {
1335 /* Increment the number of packets
1340 /* Get the start of the next packet. */
1341 tx_next = sw_ring[tx_last].next_id;
1343 /* Loop through all segments in a
1347 if (sw_ring[tx_id].mbuf) {
1348 rte_pktmbuf_free_seg(
1349 sw_ring[tx_id].mbuf);
1350 sw_ring[tx_id].mbuf = NULL;
1351 sw_ring[tx_id].last_id = tx_id;
1354 /* Move to next segemnt. */
1355 tx_id = sw_ring[tx_id].next_id;
1357 } while (tx_id != tx_next);
1359 if (unlikely(count == (int)free_cnt))
1362 /* mbuf still in use, nothing left to
1368 /* There are multiple reasons to be here:
1369 * 1) All the packets on the ring have been
1370 * freed - tx_id is equal to tx_first
1371 * and some packets have been freed.
1373 * 2) Interfaces has not sent a rings worth of
1374 * packets yet, so the segment after tail is
1375 * still empty. Or a previous call to this
1376 * function freed some of the segments but
1377 * not all so there is a hole in the list.
1378 * Hopefully this is a rare case.
1379 * - Walk the list and find the next mbuf. If
1380 * there isn't one, then done.
1382 if (likely(tx_id == tx_first && count != 0))
1385 /* Walk the list and find the next mbuf, if any. */
1387 /* Move to next segemnt. */
1388 tx_id = sw_ring[tx_id].next_id;
1390 if (sw_ring[tx_id].mbuf)
1393 } while (tx_id != tx_first);
1395 /* Determine why previous loop bailed. If there
1396 * is not an mbuf, done.
1398 if (!sw_ring[tx_id].mbuf)
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1409 return igb_tx_done_cleanup(txq, free_cnt);
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1418 memset((void*)&txq->ctx_cache, 0,
1419 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1425 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426 struct igb_tx_entry *txe = txq->sw_ring;
1428 struct e1000_hw *hw;
1430 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431 /* Zero out HW ring memory */
1432 for (i = 0; i < txq->nb_tx_desc; i++) {
1433 txq->tx_ring[i] = zeroed_desc;
1436 /* Initialize ring entries */
1437 prev = (uint16_t)(txq->nb_tx_desc - 1);
1438 for (i = 0; i < txq->nb_tx_desc; i++) {
1439 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1441 txd->wb.status = E1000_TXD_STAT_DD;
1444 txe[prev].next_id = i;
1448 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449 /* 82575 specific, each tx queue will use 2 hw contexts */
1450 if (hw->mac.type == e1000_82575)
1451 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1453 igb_reset_tx_queue_stat(txq);
1457 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1459 uint64_t tx_offload_capa;
1462 tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1463 DEV_TX_OFFLOAD_IPV4_CKSUM |
1464 DEV_TX_OFFLOAD_UDP_CKSUM |
1465 DEV_TX_OFFLOAD_TCP_CKSUM |
1466 DEV_TX_OFFLOAD_SCTP_CKSUM |
1467 DEV_TX_OFFLOAD_TCP_TSO |
1468 DEV_TX_OFFLOAD_MULTI_SEGS;
1470 return tx_offload_capa;
1474 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1476 uint64_t tx_queue_offload_capa;
1478 tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1480 return tx_queue_offload_capa;
1484 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1487 unsigned int socket_id,
1488 const struct rte_eth_txconf *tx_conf)
1490 const struct rte_memzone *tz;
1491 struct igb_tx_queue *txq;
1492 struct e1000_hw *hw;
1496 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1498 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 * Validate number of transmit descriptors.
1502 * It must not exceed hardware maximum, and must be multiple
1505 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1506 (nb_desc > E1000_MAX_RING_DESC) ||
1507 (nb_desc < E1000_MIN_RING_DESC)) {
1512 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1515 if (tx_conf->tx_free_thresh != 0)
1516 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1517 "used for the 1G driver.");
1518 if (tx_conf->tx_rs_thresh != 0)
1519 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1520 "used for the 1G driver.");
1521 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1522 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1523 "consider setting the TX WTHRESH value to 4, 8, "
1526 /* Free memory prior to re-allocation if needed */
1527 if (dev->data->tx_queues[queue_idx] != NULL) {
1528 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1529 dev->data->tx_queues[queue_idx] = NULL;
1532 /* First allocate the tx queue data structure */
1533 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1534 RTE_CACHE_LINE_SIZE);
1539 * Allocate TX ring hardware descriptors. A memzone large enough to
1540 * handle the maximum ring size is allocated in order to allow for
1541 * resizing in later calls to the queue setup function.
1543 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1544 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1545 E1000_ALIGN, socket_id);
1547 igb_tx_queue_release(txq);
1552 txq->nb_tx_desc = nb_desc;
1553 txq->pthresh = tx_conf->tx_thresh.pthresh;
1554 txq->hthresh = tx_conf->tx_thresh.hthresh;
1555 txq->wthresh = tx_conf->tx_thresh.wthresh;
1556 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1558 txq->queue_id = queue_idx;
1559 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1560 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1561 txq->port_id = dev->data->port_id;
1563 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1564 txq->tx_ring_phys_addr = tz->iova;
1566 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1567 /* Allocate software ring */
1568 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1569 sizeof(struct igb_tx_entry) * nb_desc,
1570 RTE_CACHE_LINE_SIZE);
1571 if (txq->sw_ring == NULL) {
1572 igb_tx_queue_release(txq);
1575 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1576 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1578 igb_reset_tx_queue(txq, dev);
1579 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1580 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1581 dev->data->tx_queues[queue_idx] = txq;
1582 txq->offloads = offloads;
1588 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1592 if (rxq->sw_ring != NULL) {
1593 for (i = 0; i < rxq->nb_rx_desc; i++) {
1594 if (rxq->sw_ring[i].mbuf != NULL) {
1595 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1596 rxq->sw_ring[i].mbuf = NULL;
1603 igb_rx_queue_release(struct igb_rx_queue *rxq)
1606 igb_rx_queue_release_mbufs(rxq);
1607 rte_free(rxq->sw_ring);
1608 rte_memzone_free(rxq->mz);
1614 eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1616 igb_rx_queue_release(dev->data->rx_queues[qid]);
1620 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1622 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1625 /* Zero out HW ring memory */
1626 for (i = 0; i < rxq->nb_rx_desc; i++) {
1627 rxq->rx_ring[i] = zeroed_desc;
1631 rxq->pkt_first_seg = NULL;
1632 rxq->pkt_last_seg = NULL;
1636 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1638 uint64_t rx_offload_capa;
1639 struct e1000_hw *hw;
1641 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1643 rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1644 DEV_RX_OFFLOAD_VLAN_FILTER |
1645 DEV_RX_OFFLOAD_IPV4_CKSUM |
1646 DEV_RX_OFFLOAD_UDP_CKSUM |
1647 DEV_RX_OFFLOAD_TCP_CKSUM |
1648 DEV_RX_OFFLOAD_JUMBO_FRAME |
1649 DEV_RX_OFFLOAD_KEEP_CRC |
1650 DEV_RX_OFFLOAD_SCATTER |
1651 DEV_RX_OFFLOAD_RSS_HASH;
1653 if (hw->mac.type == e1000_i350 ||
1654 hw->mac.type == e1000_i210 ||
1655 hw->mac.type == e1000_i211)
1656 rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
1658 return rx_offload_capa;
1662 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1664 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665 uint64_t rx_queue_offload_capa;
1667 switch (hw->mac.type) {
1668 case e1000_vfadapt_i350:
1670 * As only one Rx queue can be used, let per queue offloading
1671 * capability be same to per port queue offloading capability
1672 * for better convenience.
1674 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1677 rx_queue_offload_capa = 0;
1679 return rx_queue_offload_capa;
1683 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1686 unsigned int socket_id,
1687 const struct rte_eth_rxconf *rx_conf,
1688 struct rte_mempool *mp)
1690 const struct rte_memzone *rz;
1691 struct igb_rx_queue *rxq;
1692 struct e1000_hw *hw;
1696 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1698 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1701 * Validate number of receive descriptors.
1702 * It must not exceed hardware maximum, and must be multiple
1705 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1706 (nb_desc > E1000_MAX_RING_DESC) ||
1707 (nb_desc < E1000_MIN_RING_DESC)) {
1711 /* Free memory prior to re-allocation if needed */
1712 if (dev->data->rx_queues[queue_idx] != NULL) {
1713 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1714 dev->data->rx_queues[queue_idx] = NULL;
1717 /* First allocate the RX queue data structure. */
1718 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1719 RTE_CACHE_LINE_SIZE);
1722 rxq->offloads = offloads;
1724 rxq->nb_rx_desc = nb_desc;
1725 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1726 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1727 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1728 if (rxq->wthresh > 0 &&
1729 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1731 rxq->drop_en = rx_conf->rx_drop_en;
1732 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1733 rxq->queue_id = queue_idx;
1734 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1735 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1736 rxq->port_id = dev->data->port_id;
1737 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1738 rxq->crc_len = RTE_ETHER_CRC_LEN;
1743 * Allocate RX ring hardware descriptors. A memzone large enough to
1744 * handle the maximum ring size is allocated in order to allow for
1745 * resizing in later calls to the queue setup function.
1747 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1748 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1749 E1000_ALIGN, socket_id);
1751 igb_rx_queue_release(rxq);
1756 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1757 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1758 rxq->rx_ring_phys_addr = rz->iova;
1759 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1761 /* Allocate software ring. */
1762 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1763 sizeof(struct igb_rx_entry) * nb_desc,
1764 RTE_CACHE_LINE_SIZE);
1765 if (rxq->sw_ring == NULL) {
1766 igb_rx_queue_release(rxq);
1769 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1770 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1772 dev->data->rx_queues[queue_idx] = rxq;
1773 igb_reset_rx_queue(rxq);
1779 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1781 #define IGB_RXQ_SCAN_INTERVAL 4
1782 volatile union e1000_adv_rx_desc *rxdp;
1783 struct igb_rx_queue *rxq;
1786 rxq = dev->data->rx_queues[rx_queue_id];
1787 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1789 while ((desc < rxq->nb_rx_desc) &&
1790 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1791 desc += IGB_RXQ_SCAN_INTERVAL;
1792 rxdp += IGB_RXQ_SCAN_INTERVAL;
1793 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1794 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1795 desc - rxq->nb_rx_desc]);
1802 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1804 struct igb_rx_queue *rxq = rx_queue;
1805 volatile uint32_t *status;
1808 if (unlikely(offset >= rxq->nb_rx_desc))
1811 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1812 return RTE_ETH_RX_DESC_UNAVAIL;
1814 desc = rxq->rx_tail + offset;
1815 if (desc >= rxq->nb_rx_desc)
1816 desc -= rxq->nb_rx_desc;
1818 status = &rxq->rx_ring[desc].wb.upper.status_error;
1819 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1820 return RTE_ETH_RX_DESC_DONE;
1822 return RTE_ETH_RX_DESC_AVAIL;
1826 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1828 struct igb_tx_queue *txq = tx_queue;
1829 volatile uint32_t *status;
1832 if (unlikely(offset >= txq->nb_tx_desc))
1835 desc = txq->tx_tail + offset;
1836 if (desc >= txq->nb_tx_desc)
1837 desc -= txq->nb_tx_desc;
1839 status = &txq->tx_ring[desc].wb.status;
1840 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1841 return RTE_ETH_TX_DESC_DONE;
1843 return RTE_ETH_TX_DESC_FULL;
1847 igb_dev_clear_queues(struct rte_eth_dev *dev)
1850 struct igb_tx_queue *txq;
1851 struct igb_rx_queue *rxq;
1853 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1854 txq = dev->data->tx_queues[i];
1856 igb_tx_queue_release_mbufs(txq);
1857 igb_reset_tx_queue(txq, dev);
1861 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1862 rxq = dev->data->rx_queues[i];
1864 igb_rx_queue_release_mbufs(rxq);
1865 igb_reset_rx_queue(rxq);
1871 igb_dev_free_queues(struct rte_eth_dev *dev)
1875 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1876 eth_igb_rx_queue_release(dev, i);
1877 dev->data->rx_queues[i] = NULL;
1879 dev->data->nb_rx_queues = 0;
1881 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1882 eth_igb_tx_queue_release(dev, i);
1883 dev->data->tx_queues[i] = NULL;
1885 dev->data->nb_tx_queues = 0;
1889 * Receive Side Scaling (RSS).
1890 * See section 7.1.1.7 in the following document:
1891 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1894 * The source and destination IP addresses of the IP header and the source and
1895 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1896 * against a configurable random key to compute a 32-bit RSS hash result.
1897 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1898 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1899 * RSS output index which is used as the RX queue index where to store the
1901 * The following output is supplied in the RX write-back descriptor:
1902 * - 32-bit result of the Microsoft RSS hash function,
1903 * - 4-bit RSS type field.
1907 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1908 * Used as the default key.
1910 static uint8_t rss_intel_key[40] = {
1911 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1912 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1913 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1914 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1915 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1919 igb_rss_disable(struct rte_eth_dev *dev)
1921 struct e1000_hw *hw;
1924 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1925 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1926 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1927 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1931 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1939 hash_key = rss_conf->rss_key;
1940 if (hash_key != NULL) {
1941 /* Fill in RSS hash key */
1942 for (i = 0; i < 10; i++) {
1943 rss_key = hash_key[(i * 4)];
1944 rss_key |= hash_key[(i * 4) + 1] << 8;
1945 rss_key |= hash_key[(i * 4) + 2] << 16;
1946 rss_key |= hash_key[(i * 4) + 3] << 24;
1947 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1951 /* Set configured hashing protocols in MRQC register */
1952 rss_hf = rss_conf->rss_hf;
1953 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1954 if (rss_hf & ETH_RSS_IPV4)
1955 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1956 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1957 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1958 if (rss_hf & ETH_RSS_IPV6)
1959 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1960 if (rss_hf & ETH_RSS_IPV6_EX)
1961 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1962 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1963 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1964 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1965 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1966 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1967 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1968 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1969 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1970 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1971 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1972 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1976 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1977 struct rte_eth_rss_conf *rss_conf)
1979 struct e1000_hw *hw;
1983 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1986 * Before changing anything, first check that the update RSS operation
1987 * does not attempt to disable RSS, if RSS was enabled at
1988 * initialization time, or does not attempt to enable RSS, if RSS was
1989 * disabled at initialization time.
1991 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1992 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1993 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1994 if (rss_hf != 0) /* Enable RSS */
1996 return 0; /* Nothing to do */
1999 if (rss_hf == 0) /* Disable RSS */
2001 igb_hw_rss_hash_set(hw, rss_conf);
2005 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2006 struct rte_eth_rss_conf *rss_conf)
2008 struct e1000_hw *hw;
2015 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2016 hash_key = rss_conf->rss_key;
2017 if (hash_key != NULL) {
2018 /* Return RSS hash key */
2019 for (i = 0; i < 10; i++) {
2020 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2021 hash_key[(i * 4)] = rss_key & 0x000000FF;
2022 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2023 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2024 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2028 /* Get RSS functions configured in MRQC register */
2029 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2030 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2031 rss_conf->rss_hf = 0;
2035 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2036 rss_hf |= ETH_RSS_IPV4;
2037 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2038 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2039 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2040 rss_hf |= ETH_RSS_IPV6;
2041 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2042 rss_hf |= ETH_RSS_IPV6_EX;
2043 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2044 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2045 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2046 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2047 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2048 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2049 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2050 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2051 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2052 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2053 rss_conf->rss_hf = rss_hf;
2058 igb_rss_configure(struct rte_eth_dev *dev)
2060 struct rte_eth_rss_conf rss_conf;
2061 struct e1000_hw *hw;
2065 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2067 /* Fill in redirection table. */
2068 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2069 for (i = 0; i < 128; i++) {
2076 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2077 i % dev->data->nb_rx_queues : 0);
2078 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2080 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2084 * Configure the RSS key and the RSS protocols used to compute
2085 * the RSS hash of input packets.
2087 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2088 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2089 igb_rss_disable(dev);
2092 if (rss_conf.rss_key == NULL)
2093 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2094 igb_hw_rss_hash_set(hw, &rss_conf);
2098 * Check if the mac type support VMDq or not.
2099 * Return 1 if it supports, otherwise, return 0.
2102 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2104 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2106 switch (hw->mac.type) {
2127 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2133 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2135 struct rte_eth_vmdq_rx_conf *cfg;
2136 struct e1000_hw *hw;
2137 uint32_t mrqc, vt_ctl, vmolr, rctl;
2140 PMD_INIT_FUNC_TRACE();
2142 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2145 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2146 if (igb_is_vmdq_supported(dev) == 0)
2149 igb_rss_disable(dev);
2151 /* RCTL: eanble VLAN filter */
2152 rctl = E1000_READ_REG(hw, E1000_RCTL);
2153 rctl |= E1000_RCTL_VFE;
2154 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2156 /* MRQC: enable vmdq */
2157 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2158 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2159 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2161 /* VTCTL: pool selection according to VLAN tag */
2162 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2163 if (cfg->enable_default_pool)
2164 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2165 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2166 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2168 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2169 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2170 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2171 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2174 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2175 vmolr |= E1000_VMOLR_AUPE;
2176 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2177 vmolr |= E1000_VMOLR_ROMPE;
2178 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2179 vmolr |= E1000_VMOLR_ROPE;
2180 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2181 vmolr |= E1000_VMOLR_BAM;
2182 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2183 vmolr |= E1000_VMOLR_MPME;
2185 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2189 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2190 * Both 82576 and 82580 support it
2192 if (hw->mac.type != e1000_i350) {
2193 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2194 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2195 vmolr |= E1000_VMOLR_STRVLAN;
2196 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2200 /* VFTA - enable all vlan filters */
2201 for (i = 0; i < IGB_VFTA_SIZE; i++)
2202 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2204 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2205 if (hw->mac.type != e1000_82580)
2206 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2209 * RAH/RAL - allow pools to read specific mac addresses
2210 * In this case, all pools should be able to read from mac addr 0
2212 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2213 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2215 /* VLVF: set up filters for vlan tags as configured */
2216 for (i = 0; i < cfg->nb_pool_maps; i++) {
2217 /* set vlan id in VF register and set the valid bit */
2218 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2219 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2220 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2221 E1000_VLVF_POOLSEL_MASK)));
2224 E1000_WRITE_FLUSH(hw);
2230 /*********************************************************************
2232 * Enable receive unit.
2234 **********************************************************************/
2237 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2239 struct igb_rx_entry *rxe = rxq->sw_ring;
2243 /* Initialize software ring entries. */
2244 for (i = 0; i < rxq->nb_rx_desc; i++) {
2245 volatile union e1000_adv_rx_desc *rxd;
2246 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2249 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2250 "queue_id=%hu", rxq->queue_id);
2254 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2255 rxd = &rxq->rx_ring[i];
2256 rxd->read.hdr_addr = 0;
2257 rxd->read.pkt_addr = dma_addr;
2264 #define E1000_MRQC_DEF_Q_SHIFT (3)
2266 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2268 struct e1000_hw *hw =
2269 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2274 * SRIOV active scheme
2275 * FIXME if support RSS together with VMDq & SRIOV
2277 mrqc = E1000_MRQC_ENABLE_VMDQ;
2278 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2279 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2280 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2281 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2283 * SRIOV inactive scheme
2285 switch (dev->data->dev_conf.rxmode.mq_mode) {
2287 igb_rss_configure(dev);
2289 case ETH_MQ_RX_VMDQ_ONLY:
2290 /*Configure general VMDQ only RX parameters*/
2291 igb_vmdq_rx_hw_configure(dev);
2293 case ETH_MQ_RX_NONE:
2294 /* if mq_mode is none, disable rss mode.*/
2296 igb_rss_disable(dev);
2305 eth_igb_rx_init(struct rte_eth_dev *dev)
2307 struct rte_eth_rxmode *rxmode;
2308 struct e1000_hw *hw;
2309 struct igb_rx_queue *rxq;
2314 uint16_t rctl_bsize;
2318 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2322 * Make sure receives are disabled while setting
2323 * up the descriptor ring.
2325 rctl = E1000_READ_REG(hw, E1000_RCTL);
2326 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2328 rxmode = &dev->data->dev_conf.rxmode;
2331 * Configure support of jumbo frames, if any.
2333 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2334 uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2336 rctl |= E1000_RCTL_LPE;
2339 * Set maximum packet length by default, and might be updated
2340 * together with enabling/disabling dual VLAN.
2342 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2343 max_len += VLAN_TAG_SIZE;
2345 E1000_WRITE_REG(hw, E1000_RLPML, max_len);
2347 rctl &= ~E1000_RCTL_LPE;
2349 /* Configure and enable each RX queue. */
2351 dev->rx_pkt_burst = eth_igb_recv_pkts;
2352 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2356 rxq = dev->data->rx_queues[i];
2360 * i350 and i354 vlan packets have vlan tags byte swapped.
2362 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2363 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2364 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2366 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2369 /* Allocate buffers for descriptor rings and set up queue */
2370 ret = igb_alloc_rx_queue_mbufs(rxq);
2375 * Reset crc_len in case it was changed after queue setup by a
2378 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2379 rxq->crc_len = RTE_ETHER_CRC_LEN;
2383 bus_addr = rxq->rx_ring_phys_addr;
2384 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2386 sizeof(union e1000_adv_rx_desc));
2387 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2388 (uint32_t)(bus_addr >> 32));
2389 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2391 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2394 * Configure RX buffer size.
2396 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2397 RTE_PKTMBUF_HEADROOM);
2398 if (buf_size >= 1024) {
2400 * Configure the BSIZEPACKET field of the SRRCTL
2401 * register of the queue.
2402 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2403 * If this field is equal to 0b, then RCTL.BSIZE
2404 * determines the RX packet buffer size.
2406 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2407 E1000_SRRCTL_BSIZEPKT_MASK);
2408 buf_size = (uint16_t) ((srrctl &
2409 E1000_SRRCTL_BSIZEPKT_MASK) <<
2410 E1000_SRRCTL_BSIZEPKT_SHIFT);
2412 /* It adds dual VLAN length for supporting dual VLAN */
2413 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2414 2 * VLAN_TAG_SIZE) > buf_size){
2415 if (!dev->data->scattered_rx)
2417 "forcing scatter mode");
2418 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2419 dev->data->scattered_rx = 1;
2423 * Use BSIZE field of the device RCTL register.
2425 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2426 rctl_bsize = buf_size;
2427 if (!dev->data->scattered_rx)
2428 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2429 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2430 dev->data->scattered_rx = 1;
2433 /* Set if packets are dropped when no descriptors available */
2435 srrctl |= E1000_SRRCTL_DROP_EN;
2437 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2439 /* Enable this RX queue. */
2440 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2441 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2442 rxdctl &= 0xFFF00000;
2443 rxdctl |= (rxq->pthresh & 0x1F);
2444 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2445 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2446 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2449 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2450 if (!dev->data->scattered_rx)
2451 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2452 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2453 dev->data->scattered_rx = 1;
2457 * Setup BSIZE field of RCTL register, if needed.
2458 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2459 * register, since the code above configures the SRRCTL register of
2460 * the RX queue in such a case.
2461 * All configurable sizes are:
2462 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2463 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2464 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2465 * 2048: rctl |= E1000_RCTL_SZ_2048;
2466 * 1024: rctl |= E1000_RCTL_SZ_1024;
2467 * 512: rctl |= E1000_RCTL_SZ_512;
2468 * 256: rctl |= E1000_RCTL_SZ_256;
2470 if (rctl_bsize > 0) {
2471 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2472 rctl |= E1000_RCTL_SZ_512;
2473 else /* 256 <= buf_size < 512 - use 256 */
2474 rctl |= E1000_RCTL_SZ_256;
2478 * Configure RSS if device configured with multiple RX queues.
2480 igb_dev_mq_rx_configure(dev);
2482 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2483 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2486 * Setup the Checksum Register.
2487 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2489 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2490 rxcsum |= E1000_RXCSUM_PCSD;
2492 /* Enable both L3/L4 rx checksum offload */
2493 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2494 rxcsum |= E1000_RXCSUM_IPOFL;
2496 rxcsum &= ~E1000_RXCSUM_IPOFL;
2497 if (rxmode->offloads &
2498 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2499 rxcsum |= E1000_RXCSUM_TUOFL;
2501 rxcsum &= ~E1000_RXCSUM_TUOFL;
2502 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2503 rxcsum |= E1000_RXCSUM_CRCOFL;
2505 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2507 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2509 /* Setup the Receive Control Register. */
2510 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2511 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2513 /* clear STRCRC bit in all queues */
2514 if (hw->mac.type == e1000_i350 ||
2515 hw->mac.type == e1000_i210 ||
2516 hw->mac.type == e1000_i211 ||
2517 hw->mac.type == e1000_i354) {
2518 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2519 rxq = dev->data->rx_queues[i];
2520 uint32_t dvmolr = E1000_READ_REG(hw,
2521 E1000_DVMOLR(rxq->reg_idx));
2522 dvmolr &= ~E1000_DVMOLR_STRCRC;
2523 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2527 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2529 /* set STRCRC bit in all queues */
2530 if (hw->mac.type == e1000_i350 ||
2531 hw->mac.type == e1000_i210 ||
2532 hw->mac.type == e1000_i211 ||
2533 hw->mac.type == e1000_i354) {
2534 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2535 rxq = dev->data->rx_queues[i];
2536 uint32_t dvmolr = E1000_READ_REG(hw,
2537 E1000_DVMOLR(rxq->reg_idx));
2538 dvmolr |= E1000_DVMOLR_STRCRC;
2539 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2544 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2545 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2546 E1000_RCTL_RDMTS_HALF |
2547 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2549 /* Make sure VLAN Filters are off. */
2550 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2551 rctl &= ~E1000_RCTL_VFE;
2552 /* Don't store bad packets. */
2553 rctl &= ~E1000_RCTL_SBP;
2555 /* Enable Receives. */
2556 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2559 * Setup the HW Rx Head and Tail Descriptor Pointers.
2560 * This needs to be done after enable.
2562 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2563 rxq = dev->data->rx_queues[i];
2564 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2565 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2571 /*********************************************************************
2573 * Enable transmit unit.
2575 **********************************************************************/
2577 eth_igb_tx_init(struct rte_eth_dev *dev)
2579 struct e1000_hw *hw;
2580 struct igb_tx_queue *txq;
2585 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2587 /* Setup the Base and Length of the Tx Descriptor Rings. */
2588 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2590 txq = dev->data->tx_queues[i];
2591 bus_addr = txq->tx_ring_phys_addr;
2593 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2595 sizeof(union e1000_adv_tx_desc));
2596 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2597 (uint32_t)(bus_addr >> 32));
2598 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2600 /* Setup the HW Tx Head and Tail descriptor pointers. */
2601 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2602 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2604 /* Setup Transmit threshold registers. */
2605 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2606 txdctl |= txq->pthresh & 0x1F;
2607 txdctl |= ((txq->hthresh & 0x1F) << 8);
2608 txdctl |= ((txq->wthresh & 0x1F) << 16);
2609 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2610 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2613 /* Program the Transmit Control Register. */
2614 tctl = E1000_READ_REG(hw, E1000_TCTL);
2615 tctl &= ~E1000_TCTL_CT;
2616 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2617 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2619 e1000_config_collision_dist(hw);
2621 /* This write will effectively turn on the transmit unit. */
2622 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2625 /*********************************************************************
2627 * Enable VF receive unit.
2629 **********************************************************************/
2631 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2633 struct e1000_hw *hw;
2634 struct igb_rx_queue *rxq;
2637 uint16_t rctl_bsize;
2641 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2644 e1000_rlpml_set_vf(hw,
2645 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2648 /* Configure and enable each RX queue. */
2650 dev->rx_pkt_burst = eth_igb_recv_pkts;
2651 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2655 rxq = dev->data->rx_queues[i];
2659 * i350VF LB vlan packets have vlan tags byte swapped.
2661 if (hw->mac.type == e1000_vfadapt_i350) {
2662 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2663 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2665 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2668 /* Allocate buffers for descriptor rings and set up queue */
2669 ret = igb_alloc_rx_queue_mbufs(rxq);
2673 bus_addr = rxq->rx_ring_phys_addr;
2674 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2676 sizeof(union e1000_adv_rx_desc));
2677 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2678 (uint32_t)(bus_addr >> 32));
2679 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2681 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2684 * Configure RX buffer size.
2686 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2687 RTE_PKTMBUF_HEADROOM);
2688 if (buf_size >= 1024) {
2690 * Configure the BSIZEPACKET field of the SRRCTL
2691 * register of the queue.
2692 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2693 * If this field is equal to 0b, then RCTL.BSIZE
2694 * determines the RX packet buffer size.
2696 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2697 E1000_SRRCTL_BSIZEPKT_MASK);
2698 buf_size = (uint16_t) ((srrctl &
2699 E1000_SRRCTL_BSIZEPKT_MASK) <<
2700 E1000_SRRCTL_BSIZEPKT_SHIFT);
2702 /* It adds dual VLAN length for supporting dual VLAN */
2703 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2704 2 * VLAN_TAG_SIZE) > buf_size){
2705 if (!dev->data->scattered_rx)
2707 "forcing scatter mode");
2708 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2709 dev->data->scattered_rx = 1;
2713 * Use BSIZE field of the device RCTL register.
2715 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2716 rctl_bsize = buf_size;
2717 if (!dev->data->scattered_rx)
2718 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2719 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2720 dev->data->scattered_rx = 1;
2723 /* Set if packets are dropped when no descriptors available */
2725 srrctl |= E1000_SRRCTL_DROP_EN;
2727 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2729 /* Enable this RX queue. */
2730 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2731 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2732 rxdctl &= 0xFFF00000;
2733 rxdctl |= (rxq->pthresh & 0x1F);
2734 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2735 if (hw->mac.type == e1000_vfadapt) {
2737 * Workaround of 82576 VF Erratum
2738 * force set WTHRESH to 1
2739 * to avoid Write-Back not triggered sometimes
2742 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2745 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2746 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2749 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2750 if (!dev->data->scattered_rx)
2751 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2752 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2753 dev->data->scattered_rx = 1;
2757 * Setup the HW Rx Head and Tail Descriptor Pointers.
2758 * This needs to be done after enable.
2760 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2761 rxq = dev->data->rx_queues[i];
2762 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2763 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2769 /*********************************************************************
2771 * Enable VF transmit unit.
2773 **********************************************************************/
2775 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2777 struct e1000_hw *hw;
2778 struct igb_tx_queue *txq;
2782 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2784 /* Setup the Base and Length of the Tx Descriptor Rings. */
2785 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2788 txq = dev->data->tx_queues[i];
2789 bus_addr = txq->tx_ring_phys_addr;
2790 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2792 sizeof(union e1000_adv_tx_desc));
2793 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2794 (uint32_t)(bus_addr >> 32));
2795 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2797 /* Setup the HW Tx Head and Tail descriptor pointers. */
2798 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2799 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2801 /* Setup Transmit threshold registers. */
2802 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2803 txdctl |= txq->pthresh & 0x1F;
2804 txdctl |= ((txq->hthresh & 0x1F) << 8);
2805 if (hw->mac.type == e1000_82576) {
2807 * Workaround of 82576 VF Erratum
2808 * force set WTHRESH to 1
2809 * to avoid Write-Back not triggered sometimes
2812 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2815 txdctl |= ((txq->wthresh & 0x1F) << 16);
2816 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2817 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2823 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2824 struct rte_eth_rxq_info *qinfo)
2826 struct igb_rx_queue *rxq;
2828 rxq = dev->data->rx_queues[queue_id];
2830 qinfo->mp = rxq->mb_pool;
2831 qinfo->scattered_rx = dev->data->scattered_rx;
2832 qinfo->nb_desc = rxq->nb_rx_desc;
2834 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2835 qinfo->conf.rx_drop_en = rxq->drop_en;
2836 qinfo->conf.offloads = rxq->offloads;
2840 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2841 struct rte_eth_txq_info *qinfo)
2843 struct igb_tx_queue *txq;
2845 txq = dev->data->tx_queues[queue_id];
2847 qinfo->nb_desc = txq->nb_tx_desc;
2849 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2850 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2851 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2852 qinfo->conf.offloads = txq->offloads;
2856 igb_rss_conf_init(struct rte_eth_dev *dev,
2857 struct igb_rte_flow_rss_conf *out,
2858 const struct rte_flow_action_rss *in)
2860 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2862 if (in->key_len > RTE_DIM(out->key) ||
2863 ((hw->mac.type == e1000_82576) &&
2864 (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2865 ((hw->mac.type != e1000_82576) &&
2866 (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2868 out->conf = (struct rte_flow_action_rss){
2872 .key_len = in->key_len,
2873 .queue_num = in->queue_num,
2874 .key = memcpy(out->key, in->key, in->key_len),
2875 .queue = memcpy(out->queue, in->queue,
2876 sizeof(*in->queue) * in->queue_num),
2882 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2883 const struct rte_flow_action_rss *with)
2885 return (comp->func == with->func &&
2886 comp->level == with->level &&
2887 comp->types == with->types &&
2888 comp->key_len == with->key_len &&
2889 comp->queue_num == with->queue_num &&
2890 !memcmp(comp->key, with->key, with->key_len) &&
2891 !memcmp(comp->queue, with->queue,
2892 sizeof(*with->queue) * with->queue_num));
2896 igb_config_rss_filter(struct rte_eth_dev *dev,
2897 struct igb_rte_flow_rss_conf *conf, bool add)
2901 struct rte_eth_rss_conf rss_conf = {
2902 .rss_key = conf->conf.key_len ?
2903 (void *)(uintptr_t)conf->conf.key : NULL,
2904 .rss_key_len = conf->conf.key_len,
2905 .rss_hf = conf->conf.types,
2907 struct e1000_filter_info *filter_info =
2908 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2909 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2911 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2914 if (igb_action_rss_same(&filter_info->rss_info.conf,
2916 igb_rss_disable(dev);
2917 memset(&filter_info->rss_info, 0,
2918 sizeof(struct igb_rte_flow_rss_conf));
2924 if (filter_info->rss_info.conf.queue_num)
2927 /* Fill in redirection table. */
2928 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2929 for (i = 0, j = 0; i < 128; i++, j++) {
2936 if (j == conf->conf.queue_num)
2938 q_idx = conf->conf.queue[j];
2939 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2941 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2944 /* Configure the RSS key and the RSS protocols used to compute
2945 * the RSS hash of input packets.
2947 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2948 igb_rss_disable(dev);
2951 if (rss_conf.rss_key == NULL)
2952 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2953 igb_hw_rss_hash_set(hw, &rss_conf);
2955 if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))