4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
72 #include "e1000_logs.h"
73 #include "base/e1000_api.h"
74 #include "e1000_ethdev.h"
75 #include "base/e1000_osdep.h"
77 #define E1000_TXD_VLAN_SHIFT 16
79 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
81 #define E1000_TX_OFFLOAD_MASK ( \
86 #define E1000_TX_OFFLOAD_NOTSUP_MASK \
87 (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
90 * Structure associated with each descriptor of the RX ring of a RX queue.
93 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
97 * Structure associated with each descriptor of the TX ring of a TX queue.
100 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
101 uint16_t next_id; /**< Index of next descriptor in ring. */
102 uint16_t last_id; /**< Index of last scattered descriptor. */
106 * Structure associated with each RX queue.
109 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
110 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
111 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
112 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
113 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
114 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
115 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
116 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
117 uint16_t nb_rx_desc; /**< number of RX descriptors. */
118 uint16_t rx_tail; /**< current value of RDT register. */
119 uint16_t nb_rx_hold; /**< number of held free RX desc. */
120 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
121 uint16_t queue_id; /**< RX queue index. */
122 uint8_t port_id; /**< Device port identifier. */
123 uint8_t pthresh; /**< Prefetch threshold register. */
124 uint8_t hthresh; /**< Host threshold register. */
125 uint8_t wthresh; /**< Write-back threshold register. */
126 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
130 * Hardware context number
133 EM_CTX_0 = 0, /**< CTX0 */
134 EM_CTX_NUM = 1, /**< CTX NUM */
137 /** Offload features */
138 union em_vlan_macip {
141 uint16_t l3_len:9; /**< L3 (IP) Header Length. */
142 uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
144 /**< VLAN Tag Control Identifier (CPU order). */
149 * Compare mask for vlan_macip_len.data,
150 * should be in sync with em_vlan_macip.f layout.
152 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
153 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
154 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
155 /** MAC+IP length. */
156 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
159 * Structure to check if new context need be built
162 uint64_t flags; /**< ol_flags related to context build. */
163 uint32_t cmp_mask; /**< compare mask */
164 union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
168 * Structure associated with each TX queue.
171 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
172 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
173 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
174 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
175 uint16_t nb_tx_desc; /**< number of TX descriptors. */
176 uint16_t tx_tail; /**< Current value of TDT register. */
177 /**< Start freeing TX buffers if there are less free descriptors than
179 uint16_t tx_free_thresh;
180 /**< Number of TX descriptors to use before RS bit is set. */
181 uint16_t tx_rs_thresh;
182 /** Number of TX descriptors used since RS bit was set. */
184 /** Index to last TX descriptor to have been cleaned. */
185 uint16_t last_desc_cleaned;
186 /** Total number of TX descriptors ready to be allocated. */
188 uint16_t queue_id; /**< TX queue index. */
189 uint8_t port_id; /**< Device port identifier. */
190 uint8_t pthresh; /**< Prefetch threshold register. */
191 uint8_t hthresh; /**< Host threshold register. */
192 uint8_t wthresh; /**< Write-back threshold register. */
193 struct em_ctx_info ctx_cache;
194 /**< Hardware context history.*/
198 #define RTE_PMD_USE_PREFETCH
201 #ifdef RTE_PMD_USE_PREFETCH
202 #define rte_em_prefetch(p) rte_prefetch0(p)
204 #define rte_em_prefetch(p) do {} while(0)
207 #ifdef RTE_PMD_PACKET_PREFETCH
208 #define rte_packet_prefetch(p) rte_prefetch1(p)
210 #define rte_packet_prefetch(p) do {} while(0)
213 #ifndef DEFAULT_TX_FREE_THRESH
214 #define DEFAULT_TX_FREE_THRESH 32
215 #endif /* DEFAULT_TX_FREE_THRESH */
217 #ifndef DEFAULT_TX_RS_THRESH
218 #define DEFAULT_TX_RS_THRESH 32
219 #endif /* DEFAULT_TX_RS_THRESH */
222 /*********************************************************************
226 **********************************************************************/
229 * Populates TX context descriptor.
232 em_set_xmit_ctx(struct em_tx_queue* txq,
233 volatile struct e1000_context_desc *ctx_txd,
235 union em_vlan_macip hdrlen)
237 uint32_t cmp_mask, cmd_len;
238 uint16_t ipcse, l2len;
239 struct e1000_context_desc ctx;
242 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
244 l2len = hdrlen.f.l2_len;
245 ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
247 /* setup IPCS* fields */
248 ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
249 ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
250 offsetof(struct ipv4_hdr, hdr_checksum));
253 * When doing checksum or TCP segmentation with IPv6 headers,
254 * IPCSE field should be set t0 0.
256 if (flags & PKT_TX_IP_CKSUM) {
257 ctx.lower_setup.ip_fields.ipcse =
258 (uint16_t)rte_cpu_to_le_16(ipcse - 1);
259 cmd_len |= E1000_TXD_CMD_IP;
260 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
262 ctx.lower_setup.ip_fields.ipcse = 0;
265 /* setup TUCS* fields */
266 ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
267 ctx.upper_setup.tcp_fields.tucse = 0;
269 switch (flags & PKT_TX_L4_MASK) {
270 case PKT_TX_UDP_CKSUM:
271 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
272 offsetof(struct udp_hdr, dgram_cksum));
273 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
275 case PKT_TX_TCP_CKSUM:
276 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
277 offsetof(struct tcp_hdr, cksum));
278 cmd_len |= E1000_TXD_CMD_TCP;
279 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
282 ctx.upper_setup.tcp_fields.tucso = 0;
285 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
286 ctx.tcp_seg_setup.data = 0;
290 txq->ctx_cache.flags = flags;
291 txq->ctx_cache.cmp_mask = cmp_mask;
292 txq->ctx_cache.hdrlen = hdrlen;
296 * Check which hardware context can be used. Use the existing match
297 * or create a new context descriptor.
299 static inline uint32_t
300 what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
301 union em_vlan_macip hdrlen)
303 /* If match with the current context */
304 if (likely (txq->ctx_cache.flags == flags &&
305 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
306 txq->ctx_cache.cmp_mask) == 0))
313 /* Reset transmit descriptors after they have been used */
315 em_xmit_cleanup(struct em_tx_queue *txq)
317 struct em_tx_entry *sw_ring = txq->sw_ring;
318 volatile struct e1000_data_desc *txr = txq->tx_ring;
319 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
320 uint16_t nb_tx_desc = txq->nb_tx_desc;
321 uint16_t desc_to_clean_to;
322 uint16_t nb_tx_to_clean;
324 /* Determine the last descriptor needing to be cleaned */
325 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
326 if (desc_to_clean_to >= nb_tx_desc)
327 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
329 /* Check to make sure the last descriptor to clean is done */
330 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
331 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
333 PMD_TX_FREE_LOG(DEBUG,
334 "TX descriptor %4u is not done"
335 "(port=%d queue=%d)", desc_to_clean_to,
336 txq->port_id, txq->queue_id);
337 /* Failed to clean any descriptors, better luck next time */
341 /* Figure out how many descriptors will be cleaned */
342 if (last_desc_cleaned > desc_to_clean_to)
343 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
346 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
349 PMD_TX_FREE_LOG(DEBUG,
350 "Cleaning %4u TX descriptors: %4u to %4u "
351 "(port=%d queue=%d)", nb_tx_to_clean,
352 last_desc_cleaned, desc_to_clean_to, txq->port_id,
356 * The last descriptor to clean is done, so that means all the
357 * descriptors from the last descriptor that was cleaned
358 * up to the last descriptor with the RS bit set
359 * are done. Only reset the threshold descriptor.
361 txr[desc_to_clean_to].upper.fields.status = 0;
363 /* Update the txq to reflect the last descriptor that was cleaned */
364 txq->last_desc_cleaned = desc_to_clean_to;
365 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
371 static inline uint32_t
372 tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
374 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
375 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
378 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
379 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
384 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
387 struct em_tx_queue *txq;
388 struct em_tx_entry *sw_ring;
389 struct em_tx_entry *txe, *txn;
390 volatile struct e1000_data_desc *txr;
391 volatile struct e1000_data_desc *txd;
392 struct rte_mbuf *tx_pkt;
393 struct rte_mbuf *m_seg;
394 uint64_t buf_dma_addr;
396 uint32_t cmd_type_len;
406 union em_vlan_macip hdrlen;
409 sw_ring = txq->sw_ring;
411 tx_id = txq->tx_tail;
412 txe = &sw_ring[tx_id];
414 /* Determine if the descriptor ring needs to be cleaned. */
415 if (txq->nb_tx_free < txq->tx_free_thresh)
416 em_xmit_cleanup(txq);
419 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
423 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
426 * Determine how many (if any) context descriptors
427 * are needed for offload functionality.
429 ol_flags = tx_pkt->ol_flags;
431 /* If hardware offload required */
432 tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
434 hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
435 hdrlen.f.l2_len = tx_pkt->l2_len;
436 hdrlen.f.l3_len = tx_pkt->l3_len;
437 /* If new context to be built or reuse the exist ctx. */
438 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
440 /* Only allocate context descriptor if required*/
441 new_ctx = (ctx == EM_CTX_NUM);
445 * Keep track of how many descriptors are used this loop
446 * This will always be the number of segments + the number of
447 * Context descriptors required to transmit the packet
449 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
452 * The number of descriptors that must be allocated for a
453 * packet is the number of segments of that packet, plus 1
454 * Context Descriptor for the hardware offload, if any.
455 * Determine the last TX descriptor to allocate in the TX ring
456 * for the packet, starting from the current position (tx_id)
459 tx_last = (uint16_t) (tx_id + nb_used - 1);
462 if (tx_last >= txq->nb_tx_desc)
463 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
465 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
466 " tx_first=%u tx_last=%u",
467 (unsigned) txq->port_id,
468 (unsigned) txq->queue_id,
469 (unsigned) tx_pkt->pkt_len,
474 * Make sure there are enough TX descriptors available to
475 * transmit the entire packet.
476 * nb_used better be less than or equal to txq->tx_rs_thresh
478 while (unlikely (nb_used > txq->nb_tx_free)) {
479 PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
480 "nb_used=%4u nb_free=%4u "
481 "(port=%d queue=%d)",
482 nb_used, txq->nb_tx_free,
483 txq->port_id, txq->queue_id);
485 if (em_xmit_cleanup(txq) != 0) {
486 /* Could not clean any descriptors */
494 * By now there are enough free TX descriptors to transmit
499 * Set common flags of all TX Data Descriptors.
501 * The following bits must be set in all Data Descriptors:
502 * - E1000_TXD_DTYP_DATA
503 * - E1000_TXD_DTYP_DEXT
505 * The following bits must be set in the first Data Descriptor
506 * and are ignored in the other ones:
507 * - E1000_TXD_POPTS_IXSM
508 * - E1000_TXD_POPTS_TXSM
510 * The following bits must be set in the last Data Descriptor
511 * and are ignored in the other ones:
512 * - E1000_TXD_CMD_VLE
513 * - E1000_TXD_CMD_IFCS
515 * The following bits must only be set in the last Data
517 * - E1000_TXD_CMD_EOP
519 * The following bits can be set in any Data Descriptor, but
520 * are only set in the last Data Descriptor:
523 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
527 /* Set VLAN Tag offload fields. */
528 if (ol_flags & PKT_TX_VLAN_PKT) {
529 cmd_type_len |= E1000_TXD_CMD_VLE;
530 popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
535 * Setup the TX Context Descriptor if required
538 volatile struct e1000_context_desc *ctx_txd;
540 ctx_txd = (volatile struct e1000_context_desc *)
543 txn = &sw_ring[txe->next_id];
544 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
546 if (txe->mbuf != NULL) {
547 rte_pktmbuf_free_seg(txe->mbuf);
551 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
554 txe->last_id = tx_last;
555 tx_id = txe->next_id;
560 * Setup the TX Data Descriptor,
561 * This path will go through
562 * whatever new/reuse the context descriptor
564 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
570 txn = &sw_ring[txe->next_id];
572 if (txe->mbuf != NULL)
573 rte_pktmbuf_free_seg(txe->mbuf);
577 * Set up Transmit Data Descriptor.
579 slen = m_seg->data_len;
580 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
582 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
583 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
584 txd->upper.data = rte_cpu_to_le_32(popts_spec);
586 txe->last_id = tx_last;
587 tx_id = txe->next_id;
590 } while (m_seg != NULL);
593 * The last packet data descriptor needs End Of Packet (EOP)
595 cmd_type_len |= E1000_TXD_CMD_EOP;
596 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
597 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
599 /* Set RS bit only on threshold packets' last descriptor */
600 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
601 PMD_TX_FREE_LOG(DEBUG,
602 "Setting RS bit on TXD id=%4u "
603 "(port=%d queue=%d)",
604 tx_last, txq->port_id, txq->queue_id);
606 cmd_type_len |= E1000_TXD_CMD_RS;
608 /* Update txq RS bit counters */
611 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
617 * Set the Transmit Descriptor Tail (TDT)
619 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
620 (unsigned) txq->port_id, (unsigned) txq->queue_id,
621 (unsigned) tx_id, (unsigned) nb_tx);
622 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
623 txq->tx_tail = tx_id;
628 /*********************************************************************
632 **********************************************************************/
634 eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
640 for (i = 0; i < nb_pkts; i++) {
643 if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
644 rte_errno = -ENOTSUP;
648 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
649 ret = rte_validate_tx_offload(m);
655 ret = rte_net_intel_cksum_prepare(m);
665 /*********************************************************************
669 **********************************************************************/
671 static inline uint64_t
672 rx_desc_status_to_pkt_flags(uint32_t rx_status)
676 /* Check if VLAN present */
677 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
678 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
683 static inline uint64_t
684 rx_desc_error_to_pkt_flags(uint32_t rx_error)
686 uint64_t pkt_flags = 0;
688 if (rx_error & E1000_RXD_ERR_IPE)
689 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
690 if (rx_error & E1000_RXD_ERR_TCPE)
691 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
696 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
699 volatile struct e1000_rx_desc *rx_ring;
700 volatile struct e1000_rx_desc *rxdp;
701 struct em_rx_queue *rxq;
702 struct em_rx_entry *sw_ring;
703 struct em_rx_entry *rxe;
704 struct rte_mbuf *rxm;
705 struct rte_mbuf *nmb;
706 struct e1000_rx_desc rxd;
718 rx_id = rxq->rx_tail;
719 rx_ring = rxq->rx_ring;
720 sw_ring = rxq->sw_ring;
721 while (nb_rx < nb_pkts) {
723 * The order of operations here is important as the DD status
724 * bit must not be read after any other descriptor fields.
725 * rx_ring and rxdp are pointing to volatile data so the order
726 * of accesses cannot be reordered by the compiler. If they were
727 * not volatile, they could be reordered which could lead to
728 * using invalid descriptor fields when read from rxd.
730 rxdp = &rx_ring[rx_id];
731 status = rxdp->status;
732 if (! (status & E1000_RXD_STAT_DD))
739 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
740 * likely to be invalid and to be dropped by the various
741 * validation checks performed by the network stack.
743 * Allocate a new mbuf to replenish the RX ring descriptor.
744 * If the allocation fails:
745 * - arrange for that RX descriptor to be the first one
746 * being parsed the next time the receive function is
747 * invoked [on the same queue].
749 * - Stop parsing the RX ring and return immediately.
751 * This policy do not drop the packet received in the RX
752 * descriptor for which the allocation of a new mbuf failed.
753 * Thus, it allows that packet to be later retrieved if
754 * mbuf have been freed in the mean time.
755 * As a side effect, holding RX descriptors instead of
756 * systematically giving them back to the NIC may lead to
757 * RX ring exhaustion situations.
758 * However, the NIC can gracefully prevent such situations
759 * to happen by sending specific "back-pressure" flow control
760 * frames to its peer(s).
762 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
763 "status=0x%x pkt_len=%u",
764 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
765 (unsigned) rx_id, (unsigned) status,
766 (unsigned) rte_le_to_cpu_16(rxd.length));
768 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
770 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
772 (unsigned) rxq->port_id,
773 (unsigned) rxq->queue_id);
774 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
779 rxe = &sw_ring[rx_id];
781 if (rx_id == rxq->nb_rx_desc)
784 /* Prefetch next mbuf while processing current one. */
785 rte_em_prefetch(sw_ring[rx_id].mbuf);
788 * When next RX descriptor is on a cache-line boundary,
789 * prefetch the next 4 RX descriptors and the next 8 pointers
792 if ((rx_id & 0x3) == 0) {
793 rte_em_prefetch(&rx_ring[rx_id]);
794 rte_em_prefetch(&sw_ring[rx_id]);
797 /* Rearm RXD: attach new mbuf and reset status to zero. */
802 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
803 rxdp->buffer_addr = dma_addr;
807 * Initialize the returned mbuf.
808 * 1) setup generic mbuf fields:
809 * - number of segments,
812 * - RX port identifier.
813 * 2) integrate hardware offload data, if any:
815 * - IP checksum flag,
816 * - VLAN TCI, if any,
819 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
821 rxm->data_off = RTE_PKTMBUF_HEADROOM;
822 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
825 rxm->pkt_len = pkt_len;
826 rxm->data_len = pkt_len;
827 rxm->port = rxq->port_id;
829 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
830 rxm->ol_flags = rxm->ol_flags |
831 rx_desc_error_to_pkt_flags(rxd.errors);
833 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
834 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
837 * Store the mbuf address into the next entry of the array
838 * of returned packets.
840 rx_pkts[nb_rx++] = rxm;
842 rxq->rx_tail = rx_id;
845 * If the number of free RX descriptors is greater than the RX free
846 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
848 * Update the RDT with the value of the last processed RX descriptor
849 * minus 1, to guarantee that the RDT register is never equal to the
850 * RDH register, which creates a "full" ring situtation from the
851 * hardware point of view...
853 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
854 if (nb_hold > rxq->rx_free_thresh) {
855 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
856 "nb_hold=%u nb_rx=%u",
857 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
858 (unsigned) rx_id, (unsigned) nb_hold,
860 rx_id = (uint16_t) ((rx_id == 0) ?
861 (rxq->nb_rx_desc - 1) : (rx_id - 1));
862 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
865 rxq->nb_rx_hold = nb_hold;
870 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
873 struct em_rx_queue *rxq;
874 volatile struct e1000_rx_desc *rx_ring;
875 volatile struct e1000_rx_desc *rxdp;
876 struct em_rx_entry *sw_ring;
877 struct em_rx_entry *rxe;
878 struct rte_mbuf *first_seg;
879 struct rte_mbuf *last_seg;
880 struct rte_mbuf *rxm;
881 struct rte_mbuf *nmb;
882 struct e1000_rx_desc rxd;
883 uint64_t dma; /* Physical address of mbuf data buffer */
894 rx_id = rxq->rx_tail;
895 rx_ring = rxq->rx_ring;
896 sw_ring = rxq->sw_ring;
899 * Retrieve RX context of current packet, if any.
901 first_seg = rxq->pkt_first_seg;
902 last_seg = rxq->pkt_last_seg;
904 while (nb_rx < nb_pkts) {
907 * The order of operations here is important as the DD status
908 * bit must not be read after any other descriptor fields.
909 * rx_ring and rxdp are pointing to volatile data so the order
910 * of accesses cannot be reordered by the compiler. If they were
911 * not volatile, they could be reordered which could lead to
912 * using invalid descriptor fields when read from rxd.
914 rxdp = &rx_ring[rx_id];
915 status = rxdp->status;
916 if (! (status & E1000_RXD_STAT_DD))
923 * Allocate a new mbuf to replenish the RX ring descriptor.
924 * If the allocation fails:
925 * - arrange for that RX descriptor to be the first one
926 * being parsed the next time the receive function is
927 * invoked [on the same queue].
929 * - Stop parsing the RX ring and return immediately.
931 * This policy does not drop the packet received in the RX
932 * descriptor for which the allocation of a new mbuf failed.
933 * Thus, it allows that packet to be later retrieved if
934 * mbuf have been freed in the mean time.
935 * As a side effect, holding RX descriptors instead of
936 * systematically giving them back to the NIC may lead to
937 * RX ring exhaustion situations.
938 * However, the NIC can gracefully prevent such situations
939 * to happen by sending specific "back-pressure" flow control
940 * frames to its peer(s).
942 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
943 "status=0x%x data_len=%u",
944 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
945 (unsigned) rx_id, (unsigned) status,
946 (unsigned) rte_le_to_cpu_16(rxd.length));
948 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
950 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
951 "queue_id=%u", (unsigned) rxq->port_id,
952 (unsigned) rxq->queue_id);
953 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
958 rxe = &sw_ring[rx_id];
960 if (rx_id == rxq->nb_rx_desc)
963 /* Prefetch next mbuf while processing current one. */
964 rte_em_prefetch(sw_ring[rx_id].mbuf);
967 * When next RX descriptor is on a cache-line boundary,
968 * prefetch the next 4 RX descriptors and the next 8 pointers
971 if ((rx_id & 0x3) == 0) {
972 rte_em_prefetch(&rx_ring[rx_id]);
973 rte_em_prefetch(&sw_ring[rx_id]);
977 * Update RX descriptor with the physical address of the new
978 * data buffer of the new allocated mbuf.
982 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
983 rxdp->buffer_addr = dma;
987 * Set data length & data buffer address of mbuf.
989 data_len = rte_le_to_cpu_16(rxd.length);
990 rxm->data_len = data_len;
991 rxm->data_off = RTE_PKTMBUF_HEADROOM;
994 * If this is the first buffer of the received packet,
995 * set the pointer to the first mbuf of the packet and
996 * initialize its context.
997 * Otherwise, update the total length and the number of segments
998 * of the current scattered packet, and update the pointer to
999 * the last mbuf of the current packet.
1001 if (first_seg == NULL) {
1003 first_seg->pkt_len = data_len;
1004 first_seg->nb_segs = 1;
1006 first_seg->pkt_len += data_len;
1007 first_seg->nb_segs++;
1008 last_seg->next = rxm;
1012 * If this is not the last buffer of the received packet,
1013 * update the pointer to the last mbuf of the current scattered
1014 * packet and continue to parse the RX ring.
1016 if (! (status & E1000_RXD_STAT_EOP)) {
1022 * This is the last buffer of the received packet.
1023 * If the CRC is not stripped by the hardware:
1024 * - Subtract the CRC length from the total packet length.
1025 * - If the last buffer only contains the whole CRC or a part
1026 * of it, free the mbuf associated to the last buffer.
1027 * If part of the CRC is also contained in the previous
1028 * mbuf, subtract the length of that CRC part from the
1029 * data length of the previous mbuf.
1032 if (unlikely(rxq->crc_len > 0)) {
1033 first_seg->pkt_len -= ETHER_CRC_LEN;
1034 if (data_len <= ETHER_CRC_LEN) {
1035 rte_pktmbuf_free_seg(rxm);
1036 first_seg->nb_segs--;
1037 last_seg->data_len = (uint16_t)
1038 (last_seg->data_len -
1039 (ETHER_CRC_LEN - data_len));
1040 last_seg->next = NULL;
1043 (uint16_t) (data_len - ETHER_CRC_LEN);
1047 * Initialize the first mbuf of the returned packet:
1048 * - RX port identifier,
1049 * - hardware offload data, if any:
1050 * - IP checksum flag,
1053 first_seg->port = rxq->port_id;
1055 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1056 first_seg->ol_flags = first_seg->ol_flags |
1057 rx_desc_error_to_pkt_flags(rxd.errors);
1059 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1060 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
1062 /* Prefetch data of first segment, if configured to do so. */
1063 rte_packet_prefetch((char *)first_seg->buf_addr +
1064 first_seg->data_off);
1067 * Store the mbuf address into the next entry of the array
1068 * of returned packets.
1070 rx_pkts[nb_rx++] = first_seg;
1073 * Setup receipt context for a new packet.
1079 * Record index of the next RX descriptor to probe.
1081 rxq->rx_tail = rx_id;
1084 * Save receive context.
1086 rxq->pkt_first_seg = first_seg;
1087 rxq->pkt_last_seg = last_seg;
1090 * If the number of free RX descriptors is greater than the RX free
1091 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1093 * Update the RDT with the value of the last processed RX descriptor
1094 * minus 1, to guarantee that the RDT register is never equal to the
1095 * RDH register, which creates a "full" ring situtation from the
1096 * hardware point of view...
1098 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1099 if (nb_hold > rxq->rx_free_thresh) {
1100 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1101 "nb_hold=%u nb_rx=%u",
1102 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1103 (unsigned) rx_id, (unsigned) nb_hold,
1105 rx_id = (uint16_t) ((rx_id == 0) ?
1106 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1107 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1110 rxq->nb_rx_hold = nb_hold;
1114 #define EM_MAX_BUF_SIZE 16384
1115 #define EM_RCTL_FLXBUF_STEP 1024
1118 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1122 if (txq->sw_ring != NULL) {
1123 for (i = 0; i != txq->nb_tx_desc; i++) {
1124 if (txq->sw_ring[i].mbuf != NULL) {
1125 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1126 txq->sw_ring[i].mbuf = NULL;
1133 em_tx_queue_release(struct em_tx_queue *txq)
1136 em_tx_queue_release_mbufs(txq);
1137 rte_free(txq->sw_ring);
1143 eth_em_tx_queue_release(void *txq)
1145 em_tx_queue_release(txq);
1148 /* (Re)set dynamic em_tx_queue fields to defaults */
1150 em_reset_tx_queue(struct em_tx_queue *txq)
1152 uint16_t i, nb_desc, prev;
1153 static const struct e1000_data_desc txd_init = {
1154 .upper.fields = {.status = E1000_TXD_STAT_DD},
1157 nb_desc = txq->nb_tx_desc;
1159 /* Initialize ring entries */
1161 prev = (uint16_t) (nb_desc - 1);
1163 for (i = 0; i < nb_desc; i++) {
1164 txq->tx_ring[i] = txd_init;
1165 txq->sw_ring[i].mbuf = NULL;
1166 txq->sw_ring[i].last_id = i;
1167 txq->sw_ring[prev].next_id = i;
1172 * Always allow 1 descriptor to be un-allocated to avoid
1173 * a H/W race condition
1175 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1176 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1177 txq->nb_tx_used = 0;
1180 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1184 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1187 unsigned int socket_id,
1188 const struct rte_eth_txconf *tx_conf)
1190 const struct rte_memzone *tz;
1191 struct em_tx_queue *txq;
1192 struct e1000_hw *hw;
1194 uint16_t tx_rs_thresh, tx_free_thresh;
1196 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1199 * Validate number of transmit descriptors.
1200 * It must not exceed hardware maximum, and must be multiple
1203 if (nb_desc % EM_TXD_ALIGN != 0 ||
1204 (nb_desc > E1000_MAX_RING_DESC) ||
1205 (nb_desc < E1000_MIN_RING_DESC)) {
1209 tx_free_thresh = tx_conf->tx_free_thresh;
1210 if (tx_free_thresh == 0)
1211 tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
1212 DEFAULT_TX_FREE_THRESH);
1214 tx_rs_thresh = tx_conf->tx_rs_thresh;
1215 if (tx_rs_thresh == 0)
1216 tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
1217 DEFAULT_TX_RS_THRESH);
1219 if (tx_free_thresh >= (nb_desc - 3)) {
1220 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
1221 "number of TX descriptors minus 3. "
1222 "(tx_free_thresh=%u port=%d queue=%d)",
1223 (unsigned int)tx_free_thresh,
1224 (int)dev->data->port_id, (int)queue_idx);
1227 if (tx_rs_thresh > tx_free_thresh) {
1228 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
1229 "tx_free_thresh. (tx_free_thresh=%u "
1230 "tx_rs_thresh=%u port=%d queue=%d)",
1231 (unsigned int)tx_free_thresh,
1232 (unsigned int)tx_rs_thresh,
1233 (int)dev->data->port_id,
1239 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1240 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1241 * by the NIC and all descriptors are written back after the NIC
1242 * accumulates WTHRESH descriptors.
1244 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1245 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1246 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1247 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1248 (int)dev->data->port_id, (int)queue_idx);
1252 /* Free memory prior to re-allocation if needed... */
1253 if (dev->data->tx_queues[queue_idx] != NULL) {
1254 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1255 dev->data->tx_queues[queue_idx] = NULL;
1259 * Allocate TX ring hardware descriptors. A memzone large enough to
1260 * handle the maximum ring size is allocated in order to allow for
1261 * resizing in later calls to the queue setup function.
1263 tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
1264 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1265 RTE_CACHE_LINE_SIZE, socket_id);
1269 /* Allocate the tx queue data structure. */
1270 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1271 RTE_CACHE_LINE_SIZE)) == NULL)
1274 /* Allocate software ring */
1275 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1276 sizeof(txq->sw_ring[0]) * nb_desc,
1277 RTE_CACHE_LINE_SIZE)) == NULL) {
1278 em_tx_queue_release(txq);
1282 txq->nb_tx_desc = nb_desc;
1283 txq->tx_free_thresh = tx_free_thresh;
1284 txq->tx_rs_thresh = tx_rs_thresh;
1285 txq->pthresh = tx_conf->tx_thresh.pthresh;
1286 txq->hthresh = tx_conf->tx_thresh.hthresh;
1287 txq->wthresh = tx_conf->tx_thresh.wthresh;
1288 txq->queue_id = queue_idx;
1289 txq->port_id = dev->data->port_id;
1291 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1292 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1293 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1295 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1296 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1298 em_reset_tx_queue(txq);
1300 dev->data->tx_queues[queue_idx] = txq;
1305 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1309 if (rxq->sw_ring != NULL) {
1310 for (i = 0; i != rxq->nb_rx_desc; i++) {
1311 if (rxq->sw_ring[i].mbuf != NULL) {
1312 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1313 rxq->sw_ring[i].mbuf = NULL;
1320 em_rx_queue_release(struct em_rx_queue *rxq)
1323 em_rx_queue_release_mbufs(rxq);
1324 rte_free(rxq->sw_ring);
1330 eth_em_rx_queue_release(void *rxq)
1332 em_rx_queue_release(rxq);
1335 /* Reset dynamic em_rx_queue fields back to defaults */
1337 em_reset_rx_queue(struct em_rx_queue *rxq)
1340 rxq->nb_rx_hold = 0;
1341 rxq->pkt_first_seg = NULL;
1342 rxq->pkt_last_seg = NULL;
1346 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1349 unsigned int socket_id,
1350 const struct rte_eth_rxconf *rx_conf,
1351 struct rte_mempool *mp)
1353 const struct rte_memzone *rz;
1354 struct em_rx_queue *rxq;
1355 struct e1000_hw *hw;
1358 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1361 * Validate number of receive descriptors.
1362 * It must not exceed hardware maximum, and must be multiple
1365 if (nb_desc % EM_RXD_ALIGN != 0 ||
1366 (nb_desc > E1000_MAX_RING_DESC) ||
1367 (nb_desc < E1000_MIN_RING_DESC)) {
1372 * EM devices don't support drop_en functionality
1374 if (rx_conf->rx_drop_en) {
1375 PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
1380 /* Free memory prior to re-allocation if needed. */
1381 if (dev->data->rx_queues[queue_idx] != NULL) {
1382 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1383 dev->data->rx_queues[queue_idx] = NULL;
1386 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1387 rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
1388 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1389 RTE_CACHE_LINE_SIZE, socket_id);
1393 /* Allocate the RX queue data structure. */
1394 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1395 RTE_CACHE_LINE_SIZE)) == NULL)
1398 /* Allocate software ring. */
1399 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1400 sizeof (rxq->sw_ring[0]) * nb_desc,
1401 RTE_CACHE_LINE_SIZE)) == NULL) {
1402 em_rx_queue_release(rxq);
1407 rxq->nb_rx_desc = nb_desc;
1408 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1409 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1410 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1411 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1412 rxq->queue_id = queue_idx;
1413 rxq->port_id = dev->data->port_id;
1414 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
1417 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1418 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1419 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1420 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1422 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1423 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1425 dev->data->rx_queues[queue_idx] = rxq;
1426 em_reset_rx_queue(rxq);
1432 eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1434 #define EM_RXQ_SCAN_INTERVAL 4
1435 volatile struct e1000_rx_desc *rxdp;
1436 struct em_rx_queue *rxq;
1439 rxq = dev->data->rx_queues[rx_queue_id];
1440 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1442 while ((desc < rxq->nb_rx_desc) &&
1443 (rxdp->status & E1000_RXD_STAT_DD)) {
1444 desc += EM_RXQ_SCAN_INTERVAL;
1445 rxdp += EM_RXQ_SCAN_INTERVAL;
1446 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1447 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1448 desc - rxq->nb_rx_desc]);
1455 eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
1457 volatile struct e1000_rx_desc *rxdp;
1458 struct em_rx_queue *rxq = rx_queue;
1461 if (unlikely(offset >= rxq->nb_rx_desc))
1463 desc = rxq->rx_tail + offset;
1464 if (desc >= rxq->nb_rx_desc)
1465 desc -= rxq->nb_rx_desc;
1467 rxdp = &rxq->rx_ring[desc];
1468 return !!(rxdp->status & E1000_RXD_STAT_DD);
1472 em_dev_clear_queues(struct rte_eth_dev *dev)
1475 struct em_tx_queue *txq;
1476 struct em_rx_queue *rxq;
1478 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1479 txq = dev->data->tx_queues[i];
1481 em_tx_queue_release_mbufs(txq);
1482 em_reset_tx_queue(txq);
1486 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1487 rxq = dev->data->rx_queues[i];
1489 em_rx_queue_release_mbufs(rxq);
1490 em_reset_rx_queue(rxq);
1496 em_dev_free_queues(struct rte_eth_dev *dev)
1500 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1501 eth_em_rx_queue_release(dev->data->rx_queues[i]);
1502 dev->data->rx_queues[i] = NULL;
1504 dev->data->nb_rx_queues = 0;
1506 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1507 eth_em_tx_queue_release(dev->data->tx_queues[i]);
1508 dev->data->tx_queues[i] = NULL;
1510 dev->data->nb_tx_queues = 0;
1514 * Takes as input/output parameter RX buffer size.
1515 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1518 em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
1521 * For BSIZE & BSEX all configurable sizes are:
1522 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1523 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1524 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1525 * 2048: rctl |= E1000_RCTL_SZ_2048;
1526 * 1024: rctl |= E1000_RCTL_SZ_1024;
1527 * 512: rctl |= E1000_RCTL_SZ_512;
1528 * 256: rctl |= E1000_RCTL_SZ_256;
1530 static const struct {
1533 } bufsz_to_rctl[] = {
1534 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1535 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1536 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1537 {2048, E1000_RCTL_SZ_2048},
1538 {1024, E1000_RCTL_SZ_1024},
1539 {512, E1000_RCTL_SZ_512},
1540 {256, E1000_RCTL_SZ_256},
1544 uint32_t rctl_bsize;
1546 rctl_bsize = *bufsz;
1549 * Starting from 82571 it is possible to specify RX buffer size
1550 * by RCTL.FLXBUF. When this field is different from zero, the
1551 * RX buffer size = RCTL.FLXBUF * 1K
1552 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1553 * It is working ok on real HW, but by some reason doesn't work
1554 * on VMware emulated 82574L.
1555 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1556 * If you don't plan to use it on VMware emulated 82574L and
1557 * would like to specify RX buffer size in 1K granularity,
1558 * uncomment the following lines:
1559 * ***************************************************************
1560 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1561 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1562 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1563 * *bufsz = rctl_bsize;
1564 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1565 * E1000_RCTL_FLXBUF_MASK);
1567 * ***************************************************************
1570 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1572 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1573 *bufsz = bufsz_to_rctl[i].bufsz;
1574 return bufsz_to_rctl[i].rctl;
1578 /* Should never happen. */
1583 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1585 struct em_rx_entry *rxe = rxq->sw_ring;
1588 static const struct e1000_rx_desc rxd_init = {
1592 /* Initialize software ring entries */
1593 for (i = 0; i < rxq->nb_rx_desc; i++) {
1594 volatile struct e1000_rx_desc *rxd;
1595 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1598 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1599 "queue_id=%hu", rxq->queue_id);
1604 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1606 /* Clear HW ring memory */
1607 rxq->rx_ring[i] = rxd_init;
1609 rxd = &rxq->rx_ring[i];
1610 rxd->buffer_addr = dma_addr;
1617 /*********************************************************************
1619 * Enable receive unit.
1621 **********************************************************************/
1623 eth_em_rx_init(struct rte_eth_dev *dev)
1625 struct e1000_hw *hw;
1626 struct em_rx_queue *rxq;
1630 uint32_t rctl_bsize;
1634 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1637 * Make sure receives are disabled while setting
1638 * up the descriptor ring.
1640 rctl = E1000_READ_REG(hw, E1000_RCTL);
1641 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1643 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1645 /* Disable extended descriptor type. */
1646 rfctl &= ~E1000_RFCTL_EXTEN;
1647 /* Disable accelerated acknowledge */
1648 if (hw->mac.type == e1000_82574)
1649 rfctl |= E1000_RFCTL_ACK_DIS;
1651 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1654 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1655 * long latencies are observed, like Lenovo X60. This
1656 * change eliminates the problem, but since having positive
1657 * values in RDTR is a known source of problems on other
1658 * platforms another solution is being sought.
1660 if (hw->mac.type == e1000_82573)
1661 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1663 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1665 /* Determine RX bufsize. */
1666 rctl_bsize = EM_MAX_BUF_SIZE;
1667 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1670 rxq = dev->data->rx_queues[i];
1671 buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
1672 RTE_PKTMBUF_HEADROOM;
1673 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1676 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1678 /* Configure and enable each RX queue. */
1679 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1683 rxq = dev->data->rx_queues[i];
1685 /* Allocate buffers for descriptor rings and setup queue */
1686 ret = em_alloc_rx_queue_mbufs(rxq);
1691 * Reset crc_len in case it was changed after queue setup by a
1695 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1698 bus_addr = rxq->rx_ring_phys_addr;
1699 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1701 sizeof(*rxq->rx_ring));
1702 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1703 (uint32_t)(bus_addr >> 32));
1704 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1706 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1707 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1709 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1710 rxdctl &= 0xFE000000;
1711 rxdctl |= rxq->pthresh & 0x3F;
1712 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1713 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1714 rxdctl |= E1000_RXDCTL_GRAN;
1715 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1718 * Due to EM devices not having any sort of hardware
1719 * limit for packet length, jumbo frame of any size
1720 * can be accepted, thus we have to enable scattered
1721 * rx if jumbo frames are enabled (or if buffer size
1722 * is too small to accommodate non-jumbo packets)
1723 * to avoid splitting packets that don't fit into
1726 if (dev->data->dev_conf.rxmode.jumbo_frame ||
1727 rctl_bsize < ETHER_MAX_LEN) {
1728 if (!dev->data->scattered_rx)
1729 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1731 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1732 dev->data->scattered_rx = 1;
1736 if (dev->data->dev_conf.rxmode.enable_scatter) {
1737 if (!dev->data->scattered_rx)
1738 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1739 dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
1740 dev->data->scattered_rx = 1;
1744 * Setup the Checksum Register.
1745 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1747 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1749 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1750 rxcsum |= E1000_RXCSUM_IPOFL;
1752 rxcsum &= ~E1000_RXCSUM_IPOFL;
1753 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1755 /* No MRQ or RSS support for now */
1757 /* Set early receive threshold on appropriate hw */
1758 if ((hw->mac.type == e1000_ich9lan ||
1759 hw->mac.type == e1000_pch2lan ||
1760 hw->mac.type == e1000_ich10lan) &&
1761 dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1762 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1763 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1764 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1767 if (hw->mac.type == e1000_pch2lan) {
1768 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1769 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1771 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1774 /* Setup the Receive Control Register. */
1775 if (dev->data->dev_conf.rxmode.hw_strip_crc)
1776 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1778 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1780 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1781 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1782 E1000_RCTL_RDMTS_HALF |
1783 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1785 /* Make sure VLAN Filters are off. */
1786 rctl &= ~E1000_RCTL_VFE;
1787 /* Don't store bad packets. */
1788 rctl &= ~E1000_RCTL_SBP;
1789 /* Legacy descriptor type. */
1790 rctl &= ~E1000_RCTL_DTYP_MASK;
1793 * Configure support of jumbo frames, if any.
1795 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1796 rctl |= E1000_RCTL_LPE;
1798 rctl &= ~E1000_RCTL_LPE;
1800 /* Enable Receives. */
1801 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1806 /*********************************************************************
1808 * Enable transmit unit.
1810 **********************************************************************/
1812 eth_em_tx_init(struct rte_eth_dev *dev)
1814 struct e1000_hw *hw;
1815 struct em_tx_queue *txq;
1820 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822 /* Setup the Base and Length of the Tx Descriptor Rings. */
1823 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1826 txq = dev->data->tx_queues[i];
1827 bus_addr = txq->tx_ring_phys_addr;
1828 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1830 sizeof(*txq->tx_ring));
1831 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1832 (uint32_t)(bus_addr >> 32));
1833 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1835 /* Setup the HW Tx Head and Tail descriptor pointers. */
1836 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1837 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1839 /* Setup Transmit threshold registers. */
1840 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1842 * bit 22 is reserved, on some models should always be 0,
1843 * on others - always 1.
1845 txdctl &= E1000_TXDCTL_COUNT_DESC;
1846 txdctl |= txq->pthresh & 0x3F;
1847 txdctl |= (txq->hthresh & 0x3F) << 8;
1848 txdctl |= (txq->wthresh & 0x3F) << 16;
1849 txdctl |= E1000_TXDCTL_GRAN;
1850 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1853 /* Program the Transmit Control Register. */
1854 tctl = E1000_READ_REG(hw, E1000_TCTL);
1855 tctl &= ~E1000_TCTL_CT;
1856 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1857 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1859 /* This write will effectively turn on the transmit unit. */
1860 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1864 em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1865 struct rte_eth_rxq_info *qinfo)
1867 struct em_rx_queue *rxq;
1869 rxq = dev->data->rx_queues[queue_id];
1871 qinfo->mp = rxq->mb_pool;
1872 qinfo->scattered_rx = dev->data->scattered_rx;
1873 qinfo->nb_desc = rxq->nb_rx_desc;
1874 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1878 em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1879 struct rte_eth_txq_info *qinfo)
1881 struct em_tx_queue *txq;
1883 txq = dev->data->tx_queues[queue_id];
1885 qinfo->nb_desc = txq->nb_tx_desc;
1887 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1888 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1889 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1890 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1891 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;