1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
49 #define IGB_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK ( \
59 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
60 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
63 * Structure associated with each descriptor of the RX ring of a RX queue.
66 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
70 * Structure associated with each descriptor of the TX ring of a TX queue.
73 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
74 uint16_t next_id; /**< Index of next descriptor in ring. */
75 uint16_t last_id; /**< Index of last scattered descriptor. */
82 IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
86 * Structure associated with each RX queue.
89 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
90 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
91 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
92 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
93 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
94 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
95 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
96 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
97 uint16_t nb_rx_desc; /**< number of RX descriptors. */
98 uint16_t rx_tail; /**< current value of RDT register. */
99 uint16_t nb_rx_hold; /**< number of held free RX desc. */
100 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
101 uint16_t queue_id; /**< RX queue index. */
102 uint16_t reg_idx; /**< RX queue register index. */
103 uint16_t port_id; /**< Device port identifier. */
104 uint8_t pthresh; /**< Prefetch threshold register. */
105 uint8_t hthresh; /**< Host threshold register. */
106 uint8_t wthresh; /**< Write-back threshold register. */
107 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
108 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
109 uint32_t flags; /**< RX flags. */
110 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
114 * Hardware context number
116 enum igb_advctx_num {
117 IGB_CTX_0 = 0, /**< CTX0 */
118 IGB_CTX_1 = 1, /**< CTX1 */
119 IGB_CTX_NUM = 2, /**< CTX_NUM */
122 /** Offload features */
123 union igb_tx_offload {
126 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
127 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
128 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
129 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
130 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
132 /* uint64_t unused:8; */
137 * Compare mask for igb_tx_offload.data,
138 * should be in sync with igb_tx_offload layout.
140 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
141 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
142 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
143 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
144 /** Mac + IP + TCP + Mss mask. */
145 #define TX_TSO_CMP_MASK \
146 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
149 * Strucutre to check if new context need be built
151 struct igb_advctx_info {
152 uint64_t flags; /**< ol_flags related to context build. */
153 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
154 union igb_tx_offload tx_offload;
155 /** compare mask for tx offload. */
156 union igb_tx_offload tx_offload_mask;
160 * Structure associated with each TX queue.
162 struct igb_tx_queue {
163 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
164 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
165 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
166 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
167 uint32_t txd_type; /**< Device-specific TXD type */
168 uint16_t nb_tx_desc; /**< number of TX descriptors. */
169 uint16_t tx_tail; /**< Current value of TDT register. */
171 /**< Index of first used TX descriptor. */
172 uint16_t queue_id; /**< TX queue index. */
173 uint16_t reg_idx; /**< TX queue register index. */
174 uint16_t port_id; /**< Device port identifier. */
175 uint8_t pthresh; /**< Prefetch threshold register. */
176 uint8_t hthresh; /**< Host threshold register. */
177 uint8_t wthresh; /**< Write-back threshold register. */
179 /**< Current used hardware descriptor. */
181 /**< Start context position for transmit queue. */
182 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
183 /**< Hardware context history.*/
184 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
188 #define RTE_PMD_USE_PREFETCH
191 #ifdef RTE_PMD_USE_PREFETCH
192 #define rte_igb_prefetch(p) rte_prefetch0(p)
194 #define rte_igb_prefetch(p) do {} while(0)
197 #ifdef RTE_PMD_PACKET_PREFETCH
198 #define rte_packet_prefetch(p) rte_prefetch1(p)
200 #define rte_packet_prefetch(p) do {} while(0)
204 * Macro for VMDq feature for 1 GbE NIC.
206 #define E1000_VMOLR_SIZE (8)
207 #define IGB_TSO_MAX_HDRLEN (512)
208 #define IGB_TSO_MAX_MSS (9216)
210 /*********************************************************************
214 **********************************************************************/
217 *There're some limitations in hardware for TCP segmentation offload. We
218 *should check whether the parameters are valid.
220 static inline uint64_t
221 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
223 if (!(ol_req & PKT_TX_TCP_SEG))
225 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
226 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
227 ol_req &= ~PKT_TX_TCP_SEG;
228 ol_req |= PKT_TX_TCP_CKSUM;
234 * Advanced context descriptor are almost same between igb/ixgbe
235 * This is a separate function, looking for optimization opportunity here
236 * Rework required to go with the pre-defined values.
240 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
241 volatile struct e1000_adv_tx_context_desc *ctx_txd,
242 uint64_t ol_flags, union igb_tx_offload tx_offload)
244 uint32_t type_tucmd_mlhl;
245 uint32_t mss_l4len_idx;
246 uint32_t ctx_idx, ctx_curr;
247 uint32_t vlan_macip_lens;
248 union igb_tx_offload tx_offload_mask;
250 ctx_curr = txq->ctx_curr;
251 ctx_idx = ctx_curr + txq->ctx_start;
253 tx_offload_mask.data = 0;
256 /* Specify which HW CTX to upload. */
257 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
259 if (ol_flags & PKT_TX_VLAN_PKT)
260 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
262 /* check if TCP segmentation required for this packet */
263 if (ol_flags & PKT_TX_TCP_SEG) {
264 /* implies IP cksum in IPv4 */
265 if (ol_flags & PKT_TX_IP_CKSUM)
266 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
267 E1000_ADVTXD_TUCMD_L4T_TCP |
268 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
270 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
271 E1000_ADVTXD_TUCMD_L4T_TCP |
272 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 tx_offload_mask.data |= TX_TSO_CMP_MASK;
275 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
276 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
277 } else { /* no TSO, check if hardware checksum is needed */
278 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
279 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
281 if (ol_flags & PKT_TX_IP_CKSUM)
282 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
284 switch (ol_flags & PKT_TX_L4_MASK) {
285 case PKT_TX_UDP_CKSUM:
286 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
287 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
290 case PKT_TX_TCP_CKSUM:
291 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
292 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
295 case PKT_TX_SCTP_CKSUM:
296 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
297 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
301 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
302 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307 txq->ctx_cache[ctx_curr].flags = ol_flags;
308 txq->ctx_cache[ctx_curr].tx_offload.data =
309 tx_offload_mask.data & tx_offload.data;
310 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
312 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
313 vlan_macip_lens = (uint32_t)tx_offload.data;
314 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
315 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
316 ctx_txd->seqnum_seed = 0;
320 * Check which hardware context can be used. Use the existing match
321 * or create a new context descriptor.
323 static inline uint32_t
324 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
325 union igb_tx_offload tx_offload)
327 /* If match with the current context */
328 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
329 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
330 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
331 return txq->ctx_curr;
334 /* If match with the second context */
336 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
337 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
338 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
339 return txq->ctx_curr;
342 /* Mismatch, use the previous context */
346 static inline uint32_t
347 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
349 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
350 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
353 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
354 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
355 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
359 static inline uint32_t
360 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
363 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
364 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
365 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
366 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
371 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
374 struct igb_tx_queue *txq;
375 struct igb_tx_entry *sw_ring;
376 struct igb_tx_entry *txe, *txn;
377 volatile union e1000_adv_tx_desc *txr;
378 volatile union e1000_adv_tx_desc *txd;
379 struct rte_mbuf *tx_pkt;
380 struct rte_mbuf *m_seg;
381 uint64_t buf_dma_addr;
382 uint32_t olinfo_status;
383 uint32_t cmd_type_len;
392 uint32_t new_ctx = 0;
394 union igb_tx_offload tx_offload = {0};
397 sw_ring = txq->sw_ring;
399 tx_id = txq->tx_tail;
400 txe = &sw_ring[tx_id];
402 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
404 pkt_len = tx_pkt->pkt_len;
406 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
409 * The number of descriptors that must be allocated for a
410 * packet is the number of segments of that packet, plus 1
411 * Context Descriptor for the VLAN Tag Identifier, if any.
412 * Determine the last TX descriptor to allocate in the TX ring
413 * for the packet, starting from the current position (tx_id)
416 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
418 ol_flags = tx_pkt->ol_flags;
419 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
421 /* If a Context Descriptor need be built . */
423 tx_offload.l2_len = tx_pkt->l2_len;
424 tx_offload.l3_len = tx_pkt->l3_len;
425 tx_offload.l4_len = tx_pkt->l4_len;
426 tx_offload.vlan_tci = tx_pkt->vlan_tci;
427 tx_offload.tso_segsz = tx_pkt->tso_segsz;
428 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
430 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
431 /* Only allocate context descriptor if required*/
432 new_ctx = (ctx == IGB_CTX_NUM);
433 ctx = txq->ctx_curr + txq->ctx_start;
434 tx_last = (uint16_t) (tx_last + new_ctx);
436 if (tx_last >= txq->nb_tx_desc)
437 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
439 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
440 " tx_first=%u tx_last=%u",
441 (unsigned) txq->port_id,
442 (unsigned) txq->queue_id,
448 * Check if there are enough free descriptors in the TX ring
449 * to transmit the next packet.
450 * This operation is based on the two following rules:
452 * 1- Only check that the last needed TX descriptor can be
453 * allocated (by construction, if that descriptor is free,
454 * all intermediate ones are also free).
456 * For this purpose, the index of the last TX descriptor
457 * used for a packet (the "last descriptor" of a packet)
458 * is recorded in the TX entries (the last one included)
459 * that are associated with all TX descriptors allocated
462 * 2- Avoid to allocate the last free TX descriptor of the
463 * ring, in order to never set the TDT register with the
464 * same value stored in parallel by the NIC in the TDH
465 * register, which makes the TX engine of the NIC enter
466 * in a deadlock situation.
468 * By extension, avoid to allocate a free descriptor that
469 * belongs to the last set of free descriptors allocated
470 * to the same packet previously transmitted.
474 * The "last descriptor" of the previously sent packet, if any,
475 * which used the last descriptor to allocate.
477 tx_end = sw_ring[tx_last].last_id;
480 * The next descriptor following that "last descriptor" in the
483 tx_end = sw_ring[tx_end].next_id;
486 * The "last descriptor" associated with that next descriptor.
488 tx_end = sw_ring[tx_end].last_id;
491 * Check that this descriptor is free.
493 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
500 * Set common flags of all TX Data Descriptors.
502 * The following bits must be set in all Data Descriptors:
503 * - E1000_ADVTXD_DTYP_DATA
504 * - E1000_ADVTXD_DCMD_DEXT
506 * The following bits must be set in the first Data Descriptor
507 * and are ignored in the other ones:
508 * - E1000_ADVTXD_DCMD_IFCS
509 * - E1000_ADVTXD_MAC_1588
510 * - E1000_ADVTXD_DCMD_VLE
512 * The following bits must only be set in the last Data
514 * - E1000_TXD_CMD_EOP
516 * The following bits can be set in any Data Descriptor, but
517 * are only set in the last Data Descriptor:
520 cmd_type_len = txq->txd_type |
521 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
522 if (tx_ol_req & PKT_TX_TCP_SEG)
523 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
524 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
525 #if defined(RTE_LIBRTE_IEEE1588)
526 if (ol_flags & PKT_TX_IEEE1588_TMST)
527 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
530 /* Setup TX Advanced context descriptor if required */
532 volatile struct e1000_adv_tx_context_desc *
535 ctx_txd = (volatile struct
536 e1000_adv_tx_context_desc *)
539 txn = &sw_ring[txe->next_id];
540 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
542 if (txe->mbuf != NULL) {
543 rte_pktmbuf_free_seg(txe->mbuf);
547 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
549 txe->last_id = tx_last;
550 tx_id = txe->next_id;
554 /* Setup the TX Advanced Data Descriptor */
555 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
556 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
557 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
562 txn = &sw_ring[txe->next_id];
565 if (txe->mbuf != NULL)
566 rte_pktmbuf_free_seg(txe->mbuf);
570 * Set up transmit descriptor.
572 slen = (uint16_t) m_seg->data_len;
573 buf_dma_addr = rte_mbuf_data_iova(m_seg);
574 txd->read.buffer_addr =
575 rte_cpu_to_le_64(buf_dma_addr);
576 txd->read.cmd_type_len =
577 rte_cpu_to_le_32(cmd_type_len | slen);
578 txd->read.olinfo_status =
579 rte_cpu_to_le_32(olinfo_status);
580 txe->last_id = tx_last;
581 tx_id = txe->next_id;
584 } while (m_seg != NULL);
587 * The last packet data descriptor needs End Of Packet (EOP)
588 * and Report Status (RS).
590 txd->read.cmd_type_len |=
591 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
597 * Set the Transmit Descriptor Tail (TDT).
599 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
600 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
601 (unsigned) txq->port_id, (unsigned) txq->queue_id,
602 (unsigned) tx_id, (unsigned) nb_tx);
603 txq->tx_tail = tx_id;
608 /*********************************************************************
612 **********************************************************************/
614 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
620 for (i = 0; i < nb_pkts; i++) {
623 /* Check some limitations for TSO in hardware */
624 if (m->ol_flags & PKT_TX_TCP_SEG)
625 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
626 (m->l2_len + m->l3_len + m->l4_len >
627 IGB_TSO_MAX_HDRLEN)) {
632 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
633 rte_errno = -ENOTSUP;
637 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
638 ret = rte_validate_tx_offload(m);
644 ret = rte_net_intel_cksum_prepare(m);
654 /*********************************************************************
658 **********************************************************************/
659 #define IGB_PACKET_TYPE_IPV4 0X01
660 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
661 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
662 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
663 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
664 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
665 #define IGB_PACKET_TYPE_IPV6 0X04
666 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
667 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
668 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
669 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
670 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
671 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
672 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
673 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
674 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
675 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
676 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
677 #define IGB_PACKET_TYPE_MAX 0X80
678 #define IGB_PACKET_TYPE_MASK 0X7F
679 #define IGB_PACKET_TYPE_SHIFT 0X04
680 static inline uint32_t
681 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
683 static const uint32_t
684 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
685 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
687 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
688 RTE_PTYPE_L3_IPV4_EXT,
689 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
691 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
692 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
693 RTE_PTYPE_INNER_L3_IPV6,
694 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
695 RTE_PTYPE_L3_IPV6_EXT,
696 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
697 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
698 RTE_PTYPE_INNER_L3_IPV6_EXT,
699 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
700 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
701 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
702 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
703 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
704 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
705 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
706 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
707 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
708 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
709 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
710 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
711 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
712 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
713 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
714 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
715 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
716 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
718 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
719 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
720 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
721 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
722 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
723 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
724 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
725 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
726 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
728 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
729 return RTE_PTYPE_UNKNOWN;
731 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
733 return ptype_table[pkt_info];
736 static inline uint64_t
737 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
739 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
741 #if defined(RTE_LIBRTE_IEEE1588)
742 static uint32_t ip_pkt_etqf_map[8] = {
743 0, 0, 0, PKT_RX_IEEE1588_PTP,
747 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
748 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
750 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
751 if (hw->mac.type == e1000_i210)
752 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
754 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
762 static inline uint64_t
763 rx_desc_status_to_pkt_flags(uint32_t rx_status)
767 /* Check if VLAN present */
768 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
769 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
771 #if defined(RTE_LIBRTE_IEEE1588)
772 if (rx_status & E1000_RXD_STAT_TMST)
773 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
778 static inline uint64_t
779 rx_desc_error_to_pkt_flags(uint32_t rx_status)
782 * Bit 30: IPE, IPv4 checksum error
783 * Bit 29: L4I, L4I integrity error
786 static uint64_t error_to_pkt_flags_map[4] = {
787 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
788 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
789 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
790 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
792 return error_to_pkt_flags_map[(rx_status >>
793 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
797 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
800 struct igb_rx_queue *rxq;
801 volatile union e1000_adv_rx_desc *rx_ring;
802 volatile union e1000_adv_rx_desc *rxdp;
803 struct igb_rx_entry *sw_ring;
804 struct igb_rx_entry *rxe;
805 struct rte_mbuf *rxm;
806 struct rte_mbuf *nmb;
807 union e1000_adv_rx_desc rxd;
810 uint32_t hlen_type_rss;
820 rx_id = rxq->rx_tail;
821 rx_ring = rxq->rx_ring;
822 sw_ring = rxq->sw_ring;
823 while (nb_rx < nb_pkts) {
825 * The order of operations here is important as the DD status
826 * bit must not be read after any other descriptor fields.
827 * rx_ring and rxdp are pointing to volatile data so the order
828 * of accesses cannot be reordered by the compiler. If they were
829 * not volatile, they could be reordered which could lead to
830 * using invalid descriptor fields when read from rxd.
832 rxdp = &rx_ring[rx_id];
833 staterr = rxdp->wb.upper.status_error;
834 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
841 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
842 * likely to be invalid and to be dropped by the various
843 * validation checks performed by the network stack.
845 * Allocate a new mbuf to replenish the RX ring descriptor.
846 * If the allocation fails:
847 * - arrange for that RX descriptor to be the first one
848 * being parsed the next time the receive function is
849 * invoked [on the same queue].
851 * - Stop parsing the RX ring and return immediately.
853 * This policy do not drop the packet received in the RX
854 * descriptor for which the allocation of a new mbuf failed.
855 * Thus, it allows that packet to be later retrieved if
856 * mbuf have been freed in the mean time.
857 * As a side effect, holding RX descriptors instead of
858 * systematically giving them back to the NIC may lead to
859 * RX ring exhaustion situations.
860 * However, the NIC can gracefully prevent such situations
861 * to happen by sending specific "back-pressure" flow control
862 * frames to its peer(s).
864 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
865 "staterr=0x%x pkt_len=%u",
866 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
867 (unsigned) rx_id, (unsigned) staterr,
868 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
870 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
872 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
873 "queue_id=%u", (unsigned) rxq->port_id,
874 (unsigned) rxq->queue_id);
875 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
880 rxe = &sw_ring[rx_id];
882 if (rx_id == rxq->nb_rx_desc)
885 /* Prefetch next mbuf while processing current one. */
886 rte_igb_prefetch(sw_ring[rx_id].mbuf);
889 * When next RX descriptor is on a cache-line boundary,
890 * prefetch the next 4 RX descriptors and the next 8 pointers
893 if ((rx_id & 0x3) == 0) {
894 rte_igb_prefetch(&rx_ring[rx_id]);
895 rte_igb_prefetch(&sw_ring[rx_id]);
901 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
902 rxdp->read.hdr_addr = 0;
903 rxdp->read.pkt_addr = dma_addr;
906 * Initialize the returned mbuf.
907 * 1) setup generic mbuf fields:
908 * - number of segments,
911 * - RX port identifier.
912 * 2) integrate hardware offload data, if any:
914 * - IP checksum flag,
915 * - VLAN TCI, if any,
918 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
920 rxm->data_off = RTE_PKTMBUF_HEADROOM;
921 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
924 rxm->pkt_len = pkt_len;
925 rxm->data_len = pkt_len;
926 rxm->port = rxq->port_id;
928 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
929 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
932 * The vlan_tci field is only valid when PKT_RX_VLAN is
933 * set in the pkt_flags field and must be in CPU byte order.
935 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
936 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
937 rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
939 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
941 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
942 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
943 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
944 rxm->ol_flags = pkt_flags;
945 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
946 lo_dword.hs_rss.pkt_info);
949 * Store the mbuf address into the next entry of the array
950 * of returned packets.
952 rx_pkts[nb_rx++] = rxm;
954 rxq->rx_tail = rx_id;
957 * If the number of free RX descriptors is greater than the RX free
958 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
960 * Update the RDT with the value of the last processed RX descriptor
961 * minus 1, to guarantee that the RDT register is never equal to the
962 * RDH register, which creates a "full" ring situtation from the
963 * hardware point of view...
965 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
966 if (nb_hold > rxq->rx_free_thresh) {
967 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
968 "nb_hold=%u nb_rx=%u",
969 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
970 (unsigned) rx_id, (unsigned) nb_hold,
972 rx_id = (uint16_t) ((rx_id == 0) ?
973 (rxq->nb_rx_desc - 1) : (rx_id - 1));
974 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
977 rxq->nb_rx_hold = nb_hold;
982 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
985 struct igb_rx_queue *rxq;
986 volatile union e1000_adv_rx_desc *rx_ring;
987 volatile union e1000_adv_rx_desc *rxdp;
988 struct igb_rx_entry *sw_ring;
989 struct igb_rx_entry *rxe;
990 struct rte_mbuf *first_seg;
991 struct rte_mbuf *last_seg;
992 struct rte_mbuf *rxm;
993 struct rte_mbuf *nmb;
994 union e1000_adv_rx_desc rxd;
995 uint64_t dma; /* Physical address of mbuf data buffer */
997 uint32_t hlen_type_rss;
1007 rx_id = rxq->rx_tail;
1008 rx_ring = rxq->rx_ring;
1009 sw_ring = rxq->sw_ring;
1012 * Retrieve RX context of current packet, if any.
1014 first_seg = rxq->pkt_first_seg;
1015 last_seg = rxq->pkt_last_seg;
1017 while (nb_rx < nb_pkts) {
1020 * The order of operations here is important as the DD status
1021 * bit must not be read after any other descriptor fields.
1022 * rx_ring and rxdp are pointing to volatile data so the order
1023 * of accesses cannot be reordered by the compiler. If they were
1024 * not volatile, they could be reordered which could lead to
1025 * using invalid descriptor fields when read from rxd.
1027 rxdp = &rx_ring[rx_id];
1028 staterr = rxdp->wb.upper.status_error;
1029 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1036 * Allocate a new mbuf to replenish the RX ring descriptor.
1037 * If the allocation fails:
1038 * - arrange for that RX descriptor to be the first one
1039 * being parsed the next time the receive function is
1040 * invoked [on the same queue].
1042 * - Stop parsing the RX ring and return immediately.
1044 * This policy does not drop the packet received in the RX
1045 * descriptor for which the allocation of a new mbuf failed.
1046 * Thus, it allows that packet to be later retrieved if
1047 * mbuf have been freed in the mean time.
1048 * As a side effect, holding RX descriptors instead of
1049 * systematically giving them back to the NIC may lead to
1050 * RX ring exhaustion situations.
1051 * However, the NIC can gracefully prevent such situations
1052 * to happen by sending specific "back-pressure" flow control
1053 * frames to its peer(s).
1055 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1056 "staterr=0x%x data_len=%u",
1057 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1058 (unsigned) rx_id, (unsigned) staterr,
1059 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1061 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1063 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1064 "queue_id=%u", (unsigned) rxq->port_id,
1065 (unsigned) rxq->queue_id);
1066 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1071 rxe = &sw_ring[rx_id];
1073 if (rx_id == rxq->nb_rx_desc)
1076 /* Prefetch next mbuf while processing current one. */
1077 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1080 * When next RX descriptor is on a cache-line boundary,
1081 * prefetch the next 4 RX descriptors and the next 8 pointers
1084 if ((rx_id & 0x3) == 0) {
1085 rte_igb_prefetch(&rx_ring[rx_id]);
1086 rte_igb_prefetch(&sw_ring[rx_id]);
1090 * Update RX descriptor with the physical address of the new
1091 * data buffer of the new allocated mbuf.
1095 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1096 rxdp->read.pkt_addr = dma;
1097 rxdp->read.hdr_addr = 0;
1100 * Set data length & data buffer address of mbuf.
1102 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1103 rxm->data_len = data_len;
1104 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1107 * If this is the first buffer of the received packet,
1108 * set the pointer to the first mbuf of the packet and
1109 * initialize its context.
1110 * Otherwise, update the total length and the number of segments
1111 * of the current scattered packet, and update the pointer to
1112 * the last mbuf of the current packet.
1114 if (first_seg == NULL) {
1116 first_seg->pkt_len = data_len;
1117 first_seg->nb_segs = 1;
1119 first_seg->pkt_len += data_len;
1120 first_seg->nb_segs++;
1121 last_seg->next = rxm;
1125 * If this is not the last buffer of the received packet,
1126 * update the pointer to the last mbuf of the current scattered
1127 * packet and continue to parse the RX ring.
1129 if (! (staterr & E1000_RXD_STAT_EOP)) {
1135 * This is the last buffer of the received packet.
1136 * If the CRC is not stripped by the hardware:
1137 * - Subtract the CRC length from the total packet length.
1138 * - If the last buffer only contains the whole CRC or a part
1139 * of it, free the mbuf associated to the last buffer.
1140 * If part of the CRC is also contained in the previous
1141 * mbuf, subtract the length of that CRC part from the
1142 * data length of the previous mbuf.
1145 if (unlikely(rxq->crc_len > 0)) {
1146 first_seg->pkt_len -= ETHER_CRC_LEN;
1147 if (data_len <= ETHER_CRC_LEN) {
1148 rte_pktmbuf_free_seg(rxm);
1149 first_seg->nb_segs--;
1150 last_seg->data_len = (uint16_t)
1151 (last_seg->data_len -
1152 (ETHER_CRC_LEN - data_len));
1153 last_seg->next = NULL;
1156 (uint16_t) (data_len - ETHER_CRC_LEN);
1160 * Initialize the first mbuf of the returned packet:
1161 * - RX port identifier,
1162 * - hardware offload data, if any:
1163 * - RSS flag & hash,
1164 * - IP checksum flag,
1165 * - VLAN TCI, if any,
1168 first_seg->port = rxq->port_id;
1169 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1172 * The vlan_tci field is only valid when PKT_RX_VLAN is
1173 * set in the pkt_flags field and must be in CPU byte order.
1175 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1176 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1177 first_seg->vlan_tci =
1178 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1180 first_seg->vlan_tci =
1181 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1183 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1184 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1185 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1186 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1187 first_seg->ol_flags = pkt_flags;
1188 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1189 lower.lo_dword.hs_rss.pkt_info);
1191 /* Prefetch data of first segment, if configured to do so. */
1192 rte_packet_prefetch((char *)first_seg->buf_addr +
1193 first_seg->data_off);
1196 * Store the mbuf address into the next entry of the array
1197 * of returned packets.
1199 rx_pkts[nb_rx++] = first_seg;
1202 * Setup receipt context for a new packet.
1208 * Record index of the next RX descriptor to probe.
1210 rxq->rx_tail = rx_id;
1213 * Save receive context.
1215 rxq->pkt_first_seg = first_seg;
1216 rxq->pkt_last_seg = last_seg;
1219 * If the number of free RX descriptors is greater than the RX free
1220 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1222 * Update the RDT with the value of the last processed RX descriptor
1223 * minus 1, to guarantee that the RDT register is never equal to the
1224 * RDH register, which creates a "full" ring situtation from the
1225 * hardware point of view...
1227 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1228 if (nb_hold > rxq->rx_free_thresh) {
1229 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1230 "nb_hold=%u nb_rx=%u",
1231 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1232 (unsigned) rx_id, (unsigned) nb_hold,
1234 rx_id = (uint16_t) ((rx_id == 0) ?
1235 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1236 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1239 rxq->nb_rx_hold = nb_hold;
1244 * Maximum number of Ring Descriptors.
1246 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1247 * desscriptors should meet the following condition:
1248 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1252 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1256 if (txq->sw_ring != NULL) {
1257 for (i = 0; i < txq->nb_tx_desc; i++) {
1258 if (txq->sw_ring[i].mbuf != NULL) {
1259 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1260 txq->sw_ring[i].mbuf = NULL;
1267 igb_tx_queue_release(struct igb_tx_queue *txq)
1270 igb_tx_queue_release_mbufs(txq);
1271 rte_free(txq->sw_ring);
1277 eth_igb_tx_queue_release(void *txq)
1279 igb_tx_queue_release(txq);
1283 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1285 struct igb_tx_entry *sw_ring;
1286 volatile union e1000_adv_tx_desc *txr;
1287 uint16_t tx_first; /* First segment analyzed. */
1288 uint16_t tx_id; /* Current segment being processed. */
1289 uint16_t tx_last; /* Last segment in the current packet. */
1290 uint16_t tx_next; /* First segment of the next packet. */
1295 sw_ring = txq->sw_ring;
1299 * tx_tail is the last sent packet on the sw_ring. Goto the end
1300 * of that packet (the last segment in the packet chain) and
1301 * then the next segment will be the start of the oldest segment
1302 * in the sw_ring. This is the first packet that will be
1303 * attempted to be freed.
1306 /* Get last segment in most recently added packet. */
1307 tx_first = sw_ring[txq->tx_tail].last_id;
1309 /* Get the next segment, which is the oldest segment in ring. */
1310 tx_first = sw_ring[tx_first].next_id;
1312 /* Set the current index to the first. */
1316 * Loop through each packet. For each packet, verify that an
1317 * mbuf exists and that the last segment is free. If so, free
1321 tx_last = sw_ring[tx_id].last_id;
1323 if (sw_ring[tx_last].mbuf) {
1324 if (txr[tx_last].wb.status &
1325 E1000_TXD_STAT_DD) {
1327 * Increment the number of packets
1332 /* Get the start of the next packet. */
1333 tx_next = sw_ring[tx_last].next_id;
1336 * Loop through all segments in a
1340 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1341 sw_ring[tx_id].mbuf = NULL;
1342 sw_ring[tx_id].last_id = tx_id;
1344 /* Move to next segemnt. */
1345 tx_id = sw_ring[tx_id].next_id;
1347 } while (tx_id != tx_next);
1349 if (unlikely(count == (int)free_cnt))
1353 * mbuf still in use, nothing left to
1359 * There are multiple reasons to be here:
1360 * 1) All the packets on the ring have been
1361 * freed - tx_id is equal to tx_first
1362 * and some packets have been freed.
1364 * 2) Interfaces has not sent a rings worth of
1365 * packets yet, so the segment after tail is
1366 * still empty. Or a previous call to this
1367 * function freed some of the segments but
1368 * not all so there is a hole in the list.
1369 * Hopefully this is a rare case.
1370 * - Walk the list and find the next mbuf. If
1371 * there isn't one, then done.
1373 if (likely((tx_id == tx_first) && (count != 0)))
1377 * Walk the list and find the next mbuf, if any.
1380 /* Move to next segemnt. */
1381 tx_id = sw_ring[tx_id].next_id;
1383 if (sw_ring[tx_id].mbuf)
1386 } while (tx_id != tx_first);
1389 * Determine why previous loop bailed. If there
1390 * is not an mbuf, done.
1392 if (sw_ring[tx_id].mbuf == NULL)
1403 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1405 return igb_tx_done_cleanup(txq, free_cnt);
1409 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1414 memset((void*)&txq->ctx_cache, 0,
1415 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1419 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1421 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1422 struct igb_tx_entry *txe = txq->sw_ring;
1424 struct e1000_hw *hw;
1426 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 /* Zero out HW ring memory */
1428 for (i = 0; i < txq->nb_tx_desc; i++) {
1429 txq->tx_ring[i] = zeroed_desc;
1432 /* Initialize ring entries */
1433 prev = (uint16_t)(txq->nb_tx_desc - 1);
1434 for (i = 0; i < txq->nb_tx_desc; i++) {
1435 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1437 txd->wb.status = E1000_TXD_STAT_DD;
1440 txe[prev].next_id = i;
1444 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1445 /* 82575 specific, each tx queue will use 2 hw contexts */
1446 if (hw->mac.type == e1000_82575)
1447 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1449 igb_reset_tx_queue_stat(txq);
1453 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1455 uint64_t rx_offload_capa;
1458 rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1459 DEV_TX_OFFLOAD_IPV4_CKSUM |
1460 DEV_TX_OFFLOAD_UDP_CKSUM |
1461 DEV_TX_OFFLOAD_TCP_CKSUM |
1462 DEV_TX_OFFLOAD_SCTP_CKSUM |
1463 DEV_TX_OFFLOAD_TCP_TSO |
1464 DEV_TX_OFFLOAD_MULTI_SEGS;
1466 return rx_offload_capa;
1470 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1472 uint64_t rx_queue_offload_capa;
1474 rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1476 return rx_queue_offload_capa;
1480 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1483 unsigned int socket_id,
1484 const struct rte_eth_txconf *tx_conf)
1486 const struct rte_memzone *tz;
1487 struct igb_tx_queue *txq;
1488 struct e1000_hw *hw;
1492 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1494 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 * Validate number of transmit descriptors.
1498 * It must not exceed hardware maximum, and must be multiple
1501 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1502 (nb_desc > E1000_MAX_RING_DESC) ||
1503 (nb_desc < E1000_MIN_RING_DESC)) {
1508 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1511 if (tx_conf->tx_free_thresh != 0)
1512 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1513 "used for the 1G driver.");
1514 if (tx_conf->tx_rs_thresh != 0)
1515 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1516 "used for the 1G driver.");
1517 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1518 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1519 "consider setting the TX WTHRESH value to 4, 8, "
1522 /* Free memory prior to re-allocation if needed */
1523 if (dev->data->tx_queues[queue_idx] != NULL) {
1524 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1525 dev->data->tx_queues[queue_idx] = NULL;
1528 /* First allocate the tx queue data structure */
1529 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1530 RTE_CACHE_LINE_SIZE);
1535 * Allocate TX ring hardware descriptors. A memzone large enough to
1536 * handle the maximum ring size is allocated in order to allow for
1537 * resizing in later calls to the queue setup function.
1539 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1540 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1541 E1000_ALIGN, socket_id);
1543 igb_tx_queue_release(txq);
1547 txq->nb_tx_desc = nb_desc;
1548 txq->pthresh = tx_conf->tx_thresh.pthresh;
1549 txq->hthresh = tx_conf->tx_thresh.hthresh;
1550 txq->wthresh = tx_conf->tx_thresh.wthresh;
1551 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1553 txq->queue_id = queue_idx;
1554 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1555 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1556 txq->port_id = dev->data->port_id;
1558 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1559 txq->tx_ring_phys_addr = tz->iova;
1561 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1562 /* Allocate software ring */
1563 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1564 sizeof(struct igb_tx_entry) * nb_desc,
1565 RTE_CACHE_LINE_SIZE);
1566 if (txq->sw_ring == NULL) {
1567 igb_tx_queue_release(txq);
1570 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1571 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1573 igb_reset_tx_queue(txq, dev);
1574 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1575 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1576 dev->data->tx_queues[queue_idx] = txq;
1577 txq->offloads = offloads;
1583 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1587 if (rxq->sw_ring != NULL) {
1588 for (i = 0; i < rxq->nb_rx_desc; i++) {
1589 if (rxq->sw_ring[i].mbuf != NULL) {
1590 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1591 rxq->sw_ring[i].mbuf = NULL;
1598 igb_rx_queue_release(struct igb_rx_queue *rxq)
1601 igb_rx_queue_release_mbufs(rxq);
1602 rte_free(rxq->sw_ring);
1608 eth_igb_rx_queue_release(void *rxq)
1610 igb_rx_queue_release(rxq);
1614 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1616 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1619 /* Zero out HW ring memory */
1620 for (i = 0; i < rxq->nb_rx_desc; i++) {
1621 rxq->rx_ring[i] = zeroed_desc;
1625 rxq->pkt_first_seg = NULL;
1626 rxq->pkt_last_seg = NULL;
1630 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1632 uint64_t rx_offload_capa;
1635 rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1636 DEV_RX_OFFLOAD_VLAN_FILTER |
1637 DEV_RX_OFFLOAD_IPV4_CKSUM |
1638 DEV_RX_OFFLOAD_UDP_CKSUM |
1639 DEV_RX_OFFLOAD_TCP_CKSUM |
1640 DEV_RX_OFFLOAD_JUMBO_FRAME |
1641 DEV_RX_OFFLOAD_CRC_STRIP |
1642 DEV_RX_OFFLOAD_SCATTER;
1644 return rx_offload_capa;
1648 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1650 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1651 uint64_t rx_queue_offload_capa;
1653 switch (hw->mac.type) {
1654 case e1000_vfadapt_i350:
1656 * As only one Rx queue can be used, let per queue offloading
1657 * capability be same to per port queue offloading capability
1658 * for better convenience.
1660 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1663 rx_queue_offload_capa = 0;
1665 return rx_queue_offload_capa;
1669 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1672 unsigned int socket_id,
1673 const struct rte_eth_rxconf *rx_conf,
1674 struct rte_mempool *mp)
1676 const struct rte_memzone *rz;
1677 struct igb_rx_queue *rxq;
1678 struct e1000_hw *hw;
1682 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1684 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1687 * Validate number of receive descriptors.
1688 * It must not exceed hardware maximum, and must be multiple
1691 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1692 (nb_desc > E1000_MAX_RING_DESC) ||
1693 (nb_desc < E1000_MIN_RING_DESC)) {
1697 /* Free memory prior to re-allocation if needed */
1698 if (dev->data->rx_queues[queue_idx] != NULL) {
1699 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1700 dev->data->rx_queues[queue_idx] = NULL;
1703 /* First allocate the RX queue data structure. */
1704 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1705 RTE_CACHE_LINE_SIZE);
1708 rxq->offloads = offloads;
1710 rxq->nb_rx_desc = nb_desc;
1711 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1712 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1713 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1714 if (rxq->wthresh > 0 &&
1715 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1717 rxq->drop_en = rx_conf->rx_drop_en;
1718 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1719 rxq->queue_id = queue_idx;
1720 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1721 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1722 rxq->port_id = dev->data->port_id;
1723 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
1724 DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
1727 * Allocate RX ring hardware descriptors. A memzone large enough to
1728 * handle the maximum ring size is allocated in order to allow for
1729 * resizing in later calls to the queue setup function.
1731 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1732 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1733 E1000_ALIGN, socket_id);
1735 igb_rx_queue_release(rxq);
1738 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1739 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1740 rxq->rx_ring_phys_addr = rz->iova;
1741 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1743 /* Allocate software ring. */
1744 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1745 sizeof(struct igb_rx_entry) * nb_desc,
1746 RTE_CACHE_LINE_SIZE);
1747 if (rxq->sw_ring == NULL) {
1748 igb_rx_queue_release(rxq);
1751 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1752 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1754 dev->data->rx_queues[queue_idx] = rxq;
1755 igb_reset_rx_queue(rxq);
1761 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1763 #define IGB_RXQ_SCAN_INTERVAL 4
1764 volatile union e1000_adv_rx_desc *rxdp;
1765 struct igb_rx_queue *rxq;
1768 rxq = dev->data->rx_queues[rx_queue_id];
1769 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1771 while ((desc < rxq->nb_rx_desc) &&
1772 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1773 desc += IGB_RXQ_SCAN_INTERVAL;
1774 rxdp += IGB_RXQ_SCAN_INTERVAL;
1775 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1776 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1777 desc - rxq->nb_rx_desc]);
1784 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1786 volatile union e1000_adv_rx_desc *rxdp;
1787 struct igb_rx_queue *rxq = rx_queue;
1790 if (unlikely(offset >= rxq->nb_rx_desc))
1792 desc = rxq->rx_tail + offset;
1793 if (desc >= rxq->nb_rx_desc)
1794 desc -= rxq->nb_rx_desc;
1796 rxdp = &rxq->rx_ring[desc];
1797 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1801 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1803 struct igb_rx_queue *rxq = rx_queue;
1804 volatile uint32_t *status;
1807 if (unlikely(offset >= rxq->nb_rx_desc))
1810 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1811 return RTE_ETH_RX_DESC_UNAVAIL;
1813 desc = rxq->rx_tail + offset;
1814 if (desc >= rxq->nb_rx_desc)
1815 desc -= rxq->nb_rx_desc;
1817 status = &rxq->rx_ring[desc].wb.upper.status_error;
1818 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1819 return RTE_ETH_RX_DESC_DONE;
1821 return RTE_ETH_RX_DESC_AVAIL;
1825 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1827 struct igb_tx_queue *txq = tx_queue;
1828 volatile uint32_t *status;
1831 if (unlikely(offset >= txq->nb_tx_desc))
1834 desc = txq->tx_tail + offset;
1835 if (desc >= txq->nb_tx_desc)
1836 desc -= txq->nb_tx_desc;
1838 status = &txq->tx_ring[desc].wb.status;
1839 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1840 return RTE_ETH_TX_DESC_DONE;
1842 return RTE_ETH_TX_DESC_FULL;
1846 igb_dev_clear_queues(struct rte_eth_dev *dev)
1849 struct igb_tx_queue *txq;
1850 struct igb_rx_queue *rxq;
1852 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1853 txq = dev->data->tx_queues[i];
1855 igb_tx_queue_release_mbufs(txq);
1856 igb_reset_tx_queue(txq, dev);
1860 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1861 rxq = dev->data->rx_queues[i];
1863 igb_rx_queue_release_mbufs(rxq);
1864 igb_reset_rx_queue(rxq);
1870 igb_dev_free_queues(struct rte_eth_dev *dev)
1874 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1875 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1876 dev->data->rx_queues[i] = NULL;
1878 dev->data->nb_rx_queues = 0;
1880 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1881 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1882 dev->data->tx_queues[i] = NULL;
1884 dev->data->nb_tx_queues = 0;
1888 * Receive Side Scaling (RSS).
1889 * See section 7.1.1.7 in the following document:
1890 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1893 * The source and destination IP addresses of the IP header and the source and
1894 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1895 * against a configurable random key to compute a 32-bit RSS hash result.
1896 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1897 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1898 * RSS output index which is used as the RX queue index where to store the
1900 * The following output is supplied in the RX write-back descriptor:
1901 * - 32-bit result of the Microsoft RSS hash function,
1902 * - 4-bit RSS type field.
1906 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1907 * Used as the default key.
1909 static uint8_t rss_intel_key[40] = {
1910 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1911 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1912 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1913 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1914 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1918 igb_rss_disable(struct rte_eth_dev *dev)
1920 struct e1000_hw *hw;
1923 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1924 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1925 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1926 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1930 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1938 hash_key = rss_conf->rss_key;
1939 if (hash_key != NULL) {
1940 /* Fill in RSS hash key */
1941 for (i = 0; i < 10; i++) {
1942 rss_key = hash_key[(i * 4)];
1943 rss_key |= hash_key[(i * 4) + 1] << 8;
1944 rss_key |= hash_key[(i * 4) + 2] << 16;
1945 rss_key |= hash_key[(i * 4) + 3] << 24;
1946 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1950 /* Set configured hashing protocols in MRQC register */
1951 rss_hf = rss_conf->rss_hf;
1952 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1953 if (rss_hf & ETH_RSS_IPV4)
1954 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1955 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1956 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1957 if (rss_hf & ETH_RSS_IPV6)
1958 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1959 if (rss_hf & ETH_RSS_IPV6_EX)
1960 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1961 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1962 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1963 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1964 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1965 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1966 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1967 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1968 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1969 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1970 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1971 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1975 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1976 struct rte_eth_rss_conf *rss_conf)
1978 struct e1000_hw *hw;
1982 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1985 * Before changing anything, first check that the update RSS operation
1986 * does not attempt to disable RSS, if RSS was enabled at
1987 * initialization time, or does not attempt to enable RSS, if RSS was
1988 * disabled at initialization time.
1990 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1991 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1992 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1993 if (rss_hf != 0) /* Enable RSS */
1995 return 0; /* Nothing to do */
1998 if (rss_hf == 0) /* Disable RSS */
2000 igb_hw_rss_hash_set(hw, rss_conf);
2004 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2005 struct rte_eth_rss_conf *rss_conf)
2007 struct e1000_hw *hw;
2014 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2015 hash_key = rss_conf->rss_key;
2016 if (hash_key != NULL) {
2017 /* Return RSS hash key */
2018 for (i = 0; i < 10; i++) {
2019 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2020 hash_key[(i * 4)] = rss_key & 0x000000FF;
2021 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2022 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2023 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2027 /* Get RSS functions configured in MRQC register */
2028 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2029 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2030 rss_conf->rss_hf = 0;
2034 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2035 rss_hf |= ETH_RSS_IPV4;
2036 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2037 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2038 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2039 rss_hf |= ETH_RSS_IPV6;
2040 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2041 rss_hf |= ETH_RSS_IPV6_EX;
2042 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2043 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2044 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2045 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2046 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2047 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2048 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2049 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2050 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2051 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2052 rss_conf->rss_hf = rss_hf;
2057 igb_rss_configure(struct rte_eth_dev *dev)
2059 struct rte_eth_rss_conf rss_conf;
2060 struct e1000_hw *hw;
2064 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2066 /* Fill in redirection table. */
2067 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2068 for (i = 0; i < 128; i++) {
2075 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2076 i % dev->data->nb_rx_queues : 0);
2077 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2079 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2083 * Configure the RSS key and the RSS protocols used to compute
2084 * the RSS hash of input packets.
2086 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2087 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2088 igb_rss_disable(dev);
2091 if (rss_conf.rss_key == NULL)
2092 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2093 igb_hw_rss_hash_set(hw, &rss_conf);
2097 * Check if the mac type support VMDq or not.
2098 * Return 1 if it supports, otherwise, return 0.
2101 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2103 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2105 switch (hw->mac.type) {
2126 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2132 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2134 struct rte_eth_vmdq_rx_conf *cfg;
2135 struct e1000_hw *hw;
2136 uint32_t mrqc, vt_ctl, vmolr, rctl;
2139 PMD_INIT_FUNC_TRACE();
2141 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2142 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2144 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2145 if (igb_is_vmdq_supported(dev) == 0)
2148 igb_rss_disable(dev);
2150 /* RCTL: eanble VLAN filter */
2151 rctl = E1000_READ_REG(hw, E1000_RCTL);
2152 rctl |= E1000_RCTL_VFE;
2153 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2155 /* MRQC: enable vmdq */
2156 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2157 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2158 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2160 /* VTCTL: pool selection according to VLAN tag */
2161 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2162 if (cfg->enable_default_pool)
2163 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2164 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2165 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2167 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2168 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2169 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2170 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2173 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2174 vmolr |= E1000_VMOLR_AUPE;
2175 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2176 vmolr |= E1000_VMOLR_ROMPE;
2177 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2178 vmolr |= E1000_VMOLR_ROPE;
2179 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2180 vmolr |= E1000_VMOLR_BAM;
2181 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2182 vmolr |= E1000_VMOLR_MPME;
2184 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2188 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2189 * Both 82576 and 82580 support it
2191 if (hw->mac.type != e1000_i350) {
2192 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2193 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2194 vmolr |= E1000_VMOLR_STRVLAN;
2195 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2199 /* VFTA - enable all vlan filters */
2200 for (i = 0; i < IGB_VFTA_SIZE; i++)
2201 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2203 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2204 if (hw->mac.type != e1000_82580)
2205 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2208 * RAH/RAL - allow pools to read specific mac addresses
2209 * In this case, all pools should be able to read from mac addr 0
2211 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2212 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2214 /* VLVF: set up filters for vlan tags as configured */
2215 for (i = 0; i < cfg->nb_pool_maps; i++) {
2216 /* set vlan id in VF register and set the valid bit */
2217 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2218 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2219 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2220 E1000_VLVF_POOLSEL_MASK)));
2223 E1000_WRITE_FLUSH(hw);
2229 /*********************************************************************
2231 * Enable receive unit.
2233 **********************************************************************/
2236 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2238 struct igb_rx_entry *rxe = rxq->sw_ring;
2242 /* Initialize software ring entries. */
2243 for (i = 0; i < rxq->nb_rx_desc; i++) {
2244 volatile union e1000_adv_rx_desc *rxd;
2245 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2248 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2249 "queue_id=%hu", rxq->queue_id);
2253 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2254 rxd = &rxq->rx_ring[i];
2255 rxd->read.hdr_addr = 0;
2256 rxd->read.pkt_addr = dma_addr;
2263 #define E1000_MRQC_DEF_Q_SHIFT (3)
2265 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2267 struct e1000_hw *hw =
2268 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2271 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2273 * SRIOV active scheme
2274 * FIXME if support RSS together with VMDq & SRIOV
2276 mrqc = E1000_MRQC_ENABLE_VMDQ;
2277 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2278 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2279 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2280 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2282 * SRIOV inactive scheme
2284 switch (dev->data->dev_conf.rxmode.mq_mode) {
2286 igb_rss_configure(dev);
2288 case ETH_MQ_RX_VMDQ_ONLY:
2289 /*Configure general VMDQ only RX parameters*/
2290 igb_vmdq_rx_hw_configure(dev);
2292 case ETH_MQ_RX_NONE:
2293 /* if mq_mode is none, disable rss mode.*/
2295 igb_rss_disable(dev);
2304 eth_igb_rx_init(struct rte_eth_dev *dev)
2306 struct rte_eth_rxmode *rxmode;
2307 struct e1000_hw *hw;
2308 struct igb_rx_queue *rxq;
2313 uint16_t rctl_bsize;
2317 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2321 * Make sure receives are disabled while setting
2322 * up the descriptor ring.
2324 rctl = E1000_READ_REG(hw, E1000_RCTL);
2325 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2327 rxmode = &dev->data->dev_conf.rxmode;
2330 * Configure support of jumbo frames, if any.
2332 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2333 rctl |= E1000_RCTL_LPE;
2336 * Set maximum packet length by default, and might be updated
2337 * together with enabling/disabling dual VLAN.
2339 E1000_WRITE_REG(hw, E1000_RLPML,
2340 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2343 rctl &= ~E1000_RCTL_LPE;
2345 /* Configure and enable each RX queue. */
2347 dev->rx_pkt_burst = eth_igb_recv_pkts;
2348 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2352 rxq = dev->data->rx_queues[i];
2356 * i350 and i354 vlan packets have vlan tags byte swapped.
2358 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2359 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2360 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2362 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2365 /* Allocate buffers for descriptor rings and set up queue */
2366 ret = igb_alloc_rx_queue_mbufs(rxq);
2371 * Reset crc_len in case it was changed after queue setup by a
2374 rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
2375 DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
2377 bus_addr = rxq->rx_ring_phys_addr;
2378 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2380 sizeof(union e1000_adv_rx_desc));
2381 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2382 (uint32_t)(bus_addr >> 32));
2383 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2385 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2388 * Configure RX buffer size.
2390 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2391 RTE_PKTMBUF_HEADROOM);
2392 if (buf_size >= 1024) {
2394 * Configure the BSIZEPACKET field of the SRRCTL
2395 * register of the queue.
2396 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2397 * If this field is equal to 0b, then RCTL.BSIZE
2398 * determines the RX packet buffer size.
2400 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2401 E1000_SRRCTL_BSIZEPKT_MASK);
2402 buf_size = (uint16_t) ((srrctl &
2403 E1000_SRRCTL_BSIZEPKT_MASK) <<
2404 E1000_SRRCTL_BSIZEPKT_SHIFT);
2406 /* It adds dual VLAN length for supporting dual VLAN */
2407 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2408 2 * VLAN_TAG_SIZE) > buf_size){
2409 if (!dev->data->scattered_rx)
2411 "forcing scatter mode");
2412 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2413 dev->data->scattered_rx = 1;
2417 * Use BSIZE field of the device RCTL register.
2419 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2420 rctl_bsize = buf_size;
2421 if (!dev->data->scattered_rx)
2422 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2423 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2424 dev->data->scattered_rx = 1;
2427 /* Set if packets are dropped when no descriptors available */
2429 srrctl |= E1000_SRRCTL_DROP_EN;
2431 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2433 /* Enable this RX queue. */
2434 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2435 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2436 rxdctl &= 0xFFF00000;
2437 rxdctl |= (rxq->pthresh & 0x1F);
2438 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2439 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2440 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2443 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2444 if (!dev->data->scattered_rx)
2445 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2446 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2447 dev->data->scattered_rx = 1;
2451 * Setup BSIZE field of RCTL register, if needed.
2452 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2453 * register, since the code above configures the SRRCTL register of
2454 * the RX queue in such a case.
2455 * All configurable sizes are:
2456 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2457 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2458 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2459 * 2048: rctl |= E1000_RCTL_SZ_2048;
2460 * 1024: rctl |= E1000_RCTL_SZ_1024;
2461 * 512: rctl |= E1000_RCTL_SZ_512;
2462 * 256: rctl |= E1000_RCTL_SZ_256;
2464 if (rctl_bsize > 0) {
2465 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2466 rctl |= E1000_RCTL_SZ_512;
2467 else /* 256 <= buf_size < 512 - use 256 */
2468 rctl |= E1000_RCTL_SZ_256;
2472 * Configure RSS if device configured with multiple RX queues.
2474 igb_dev_mq_rx_configure(dev);
2476 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2477 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2480 * Setup the Checksum Register.
2481 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2483 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2484 rxcsum |= E1000_RXCSUM_PCSD;
2486 /* Enable both L3/L4 rx checksum offload */
2487 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2488 rxcsum |= E1000_RXCSUM_IPOFL;
2490 rxcsum &= ~E1000_RXCSUM_IPOFL;
2491 if (rxmode->offloads &
2492 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2493 rxcsum |= E1000_RXCSUM_TUOFL;
2495 rxcsum &= ~E1000_RXCSUM_TUOFL;
2496 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2497 rxcsum |= E1000_RXCSUM_CRCOFL;
2499 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2501 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2503 /* Setup the Receive Control Register. */
2504 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
2505 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2507 /* set STRCRC bit in all queues */
2508 if (hw->mac.type == e1000_i350 ||
2509 hw->mac.type == e1000_i210 ||
2510 hw->mac.type == e1000_i211 ||
2511 hw->mac.type == e1000_i354) {
2512 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2513 rxq = dev->data->rx_queues[i];
2514 uint32_t dvmolr = E1000_READ_REG(hw,
2515 E1000_DVMOLR(rxq->reg_idx));
2516 dvmolr |= E1000_DVMOLR_STRCRC;
2517 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2521 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2523 /* clear STRCRC bit in all queues */
2524 if (hw->mac.type == e1000_i350 ||
2525 hw->mac.type == e1000_i210 ||
2526 hw->mac.type == e1000_i211 ||
2527 hw->mac.type == e1000_i354) {
2528 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2529 rxq = dev->data->rx_queues[i];
2530 uint32_t dvmolr = E1000_READ_REG(hw,
2531 E1000_DVMOLR(rxq->reg_idx));
2532 dvmolr &= ~E1000_DVMOLR_STRCRC;
2533 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2538 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2539 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2540 E1000_RCTL_RDMTS_HALF |
2541 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2543 /* Make sure VLAN Filters are off. */
2544 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2545 rctl &= ~E1000_RCTL_VFE;
2546 /* Don't store bad packets. */
2547 rctl &= ~E1000_RCTL_SBP;
2549 /* Enable Receives. */
2550 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2553 * Setup the HW Rx Head and Tail Descriptor Pointers.
2554 * This needs to be done after enable.
2556 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2557 rxq = dev->data->rx_queues[i];
2558 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2559 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2565 /*********************************************************************
2567 * Enable transmit unit.
2569 **********************************************************************/
2571 eth_igb_tx_init(struct rte_eth_dev *dev)
2573 struct e1000_hw *hw;
2574 struct igb_tx_queue *txq;
2579 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2581 /* Setup the Base and Length of the Tx Descriptor Rings. */
2582 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2584 txq = dev->data->tx_queues[i];
2585 bus_addr = txq->tx_ring_phys_addr;
2587 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2589 sizeof(union e1000_adv_tx_desc));
2590 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2591 (uint32_t)(bus_addr >> 32));
2592 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2594 /* Setup the HW Tx Head and Tail descriptor pointers. */
2595 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2596 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2598 /* Setup Transmit threshold registers. */
2599 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2600 txdctl |= txq->pthresh & 0x1F;
2601 txdctl |= ((txq->hthresh & 0x1F) << 8);
2602 txdctl |= ((txq->wthresh & 0x1F) << 16);
2603 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2604 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2607 /* Program the Transmit Control Register. */
2608 tctl = E1000_READ_REG(hw, E1000_TCTL);
2609 tctl &= ~E1000_TCTL_CT;
2610 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2611 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2613 e1000_config_collision_dist(hw);
2615 /* This write will effectively turn on the transmit unit. */
2616 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2619 /*********************************************************************
2621 * Enable VF receive unit.
2623 **********************************************************************/
2625 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2627 struct e1000_hw *hw;
2628 struct igb_rx_queue *rxq;
2631 uint16_t rctl_bsize;
2635 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2638 e1000_rlpml_set_vf(hw,
2639 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2642 /* Configure and enable each RX queue. */
2644 dev->rx_pkt_burst = eth_igb_recv_pkts;
2645 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2649 rxq = dev->data->rx_queues[i];
2653 * i350VF LB vlan packets have vlan tags byte swapped.
2655 if (hw->mac.type == e1000_vfadapt_i350) {
2656 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2657 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2659 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2662 /* Allocate buffers for descriptor rings and set up queue */
2663 ret = igb_alloc_rx_queue_mbufs(rxq);
2667 bus_addr = rxq->rx_ring_phys_addr;
2668 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2670 sizeof(union e1000_adv_rx_desc));
2671 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2672 (uint32_t)(bus_addr >> 32));
2673 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2675 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2678 * Configure RX buffer size.
2680 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2681 RTE_PKTMBUF_HEADROOM);
2682 if (buf_size >= 1024) {
2684 * Configure the BSIZEPACKET field of the SRRCTL
2685 * register of the queue.
2686 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2687 * If this field is equal to 0b, then RCTL.BSIZE
2688 * determines the RX packet buffer size.
2690 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2691 E1000_SRRCTL_BSIZEPKT_MASK);
2692 buf_size = (uint16_t) ((srrctl &
2693 E1000_SRRCTL_BSIZEPKT_MASK) <<
2694 E1000_SRRCTL_BSIZEPKT_SHIFT);
2696 /* It adds dual VLAN length for supporting dual VLAN */
2697 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2698 2 * VLAN_TAG_SIZE) > buf_size){
2699 if (!dev->data->scattered_rx)
2701 "forcing scatter mode");
2702 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2703 dev->data->scattered_rx = 1;
2707 * Use BSIZE field of the device RCTL register.
2709 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2710 rctl_bsize = buf_size;
2711 if (!dev->data->scattered_rx)
2712 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2713 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2714 dev->data->scattered_rx = 1;
2717 /* Set if packets are dropped when no descriptors available */
2719 srrctl |= E1000_SRRCTL_DROP_EN;
2721 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2723 /* Enable this RX queue. */
2724 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2725 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2726 rxdctl &= 0xFFF00000;
2727 rxdctl |= (rxq->pthresh & 0x1F);
2728 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2729 if (hw->mac.type == e1000_vfadapt) {
2731 * Workaround of 82576 VF Erratum
2732 * force set WTHRESH to 1
2733 * to avoid Write-Back not triggered sometimes
2736 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2739 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2740 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2743 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2744 if (!dev->data->scattered_rx)
2745 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2746 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2747 dev->data->scattered_rx = 1;
2751 * Setup the HW Rx Head and Tail Descriptor Pointers.
2752 * This needs to be done after enable.
2754 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2755 rxq = dev->data->rx_queues[i];
2756 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2757 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2763 /*********************************************************************
2765 * Enable VF transmit unit.
2767 **********************************************************************/
2769 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2771 struct e1000_hw *hw;
2772 struct igb_tx_queue *txq;
2776 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2778 /* Setup the Base and Length of the Tx Descriptor Rings. */
2779 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2782 txq = dev->data->tx_queues[i];
2783 bus_addr = txq->tx_ring_phys_addr;
2784 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2786 sizeof(union e1000_adv_tx_desc));
2787 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2788 (uint32_t)(bus_addr >> 32));
2789 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2791 /* Setup the HW Tx Head and Tail descriptor pointers. */
2792 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2793 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2795 /* Setup Transmit threshold registers. */
2796 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2797 txdctl |= txq->pthresh & 0x1F;
2798 txdctl |= ((txq->hthresh & 0x1F) << 8);
2799 if (hw->mac.type == e1000_82576) {
2801 * Workaround of 82576 VF Erratum
2802 * force set WTHRESH to 1
2803 * to avoid Write-Back not triggered sometimes
2806 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2809 txdctl |= ((txq->wthresh & 0x1F) << 16);
2810 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2811 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2817 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2818 struct rte_eth_rxq_info *qinfo)
2820 struct igb_rx_queue *rxq;
2822 rxq = dev->data->rx_queues[queue_id];
2824 qinfo->mp = rxq->mb_pool;
2825 qinfo->scattered_rx = dev->data->scattered_rx;
2826 qinfo->nb_desc = rxq->nb_rx_desc;
2828 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2829 qinfo->conf.rx_drop_en = rxq->drop_en;
2830 qinfo->conf.offloads = rxq->offloads;
2834 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2835 struct rte_eth_txq_info *qinfo)
2837 struct igb_tx_queue *txq;
2839 txq = dev->data->tx_queues[queue_id];
2841 qinfo->nb_desc = txq->nb_tx_desc;
2843 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2844 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2845 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2846 qinfo->conf.offloads = txq->offloads;
2850 igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
2851 const struct rte_flow_action_rss *in)
2853 if (in->key_len > RTE_DIM(out->key) ||
2854 in->queue_num > RTE_DIM(out->queue))
2856 out->conf = (struct rte_flow_action_rss){
2860 .key_len = in->key_len,
2861 .queue_num = in->queue_num,
2862 .key = memcpy(out->key, in->key, in->key_len),
2863 .queue = memcpy(out->queue, in->queue,
2864 sizeof(*in->queue) * in->queue_num),
2870 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2871 const struct rte_flow_action_rss *with)
2873 return (comp->func == with->func &&
2874 comp->level == with->level &&
2875 comp->types == with->types &&
2876 comp->key_len == with->key_len &&
2877 comp->queue_num == with->queue_num &&
2878 !memcmp(comp->key, with->key, with->key_len) &&
2879 !memcmp(comp->queue, with->queue,
2880 sizeof(*with->queue) * with->queue_num));
2884 igb_config_rss_filter(struct rte_eth_dev *dev,
2885 struct igb_rte_flow_rss_conf *conf, bool add)
2889 struct rte_eth_rss_conf rss_conf = {
2890 .rss_key = conf->conf.key_len ?
2891 (void *)(uintptr_t)conf->conf.key : NULL,
2892 .rss_key_len = conf->conf.key_len,
2893 .rss_hf = conf->conf.types,
2895 struct e1000_filter_info *filter_info =
2896 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2897 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2899 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2902 if (igb_action_rss_same(&filter_info->rss_info.conf,
2904 igb_rss_disable(dev);
2905 memset(&filter_info->rss_info, 0,
2906 sizeof(struct igb_rte_flow_rss_conf));
2912 if (filter_info->rss_info.conf.queue_num)
2915 /* Fill in redirection table. */
2916 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2917 for (i = 0, j = 0; i < 128; i++, j++) {
2924 if (j == conf->conf.queue_num)
2926 q_idx = conf->conf.queue[j];
2927 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2929 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2932 /* Configure the RSS key and the RSS protocols used to compute
2933 * the RSS hash of input packets.
2935 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2936 igb_rss_disable(dev);
2939 if (rss_conf.rss_key == NULL)
2940 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2941 igb_hw_rss_hash_set(hw, &rss_conf);
2943 if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))