1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
14 #include "ngbe_logs.h"
15 #include "base/ngbe.h"
16 #include "ngbe_ethdev.h"
17 #include "ngbe_rxtx.h"
19 /* Bit Mask to indicate what bits required for building Tx context */
20 static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
21 RTE_MBUF_F_TX_OUTER_IPV6 |
22 RTE_MBUF_F_TX_OUTER_IPV4 |
26 RTE_MBUF_F_TX_L4_MASK |
27 RTE_MBUF_F_TX_TCP_SEG |
28 RTE_MBUF_F_TX_TUNNEL_MASK |
29 RTE_MBUF_F_TX_OUTER_IP_CKSUM);
30 #define NGBE_TX_OFFLOAD_NOTSUP_MASK \
31 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK)
34 * Prefetch a cache line into all cache levels.
36 #define rte_ngbe_prefetch(p) rte_prefetch0(p)
38 /*********************************************************************
42 **********************************************************************/
45 * Check for descriptors with their DD bit set and free mbufs.
46 * Return the total number of buffers freed.
48 static __rte_always_inline int
49 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
51 struct ngbe_tx_entry *txep;
54 struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
56 /* check DD bit on threshold descriptor */
57 status = txq->tx_ring[txq->tx_next_dd].dw3;
58 if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
59 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
60 ngbe_set32_masked(txq->tdc_reg_addr,
61 NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
66 * first buffer to free from S/W ring is at index
67 * tx_next_dd - (tx_free_thresh-1)
69 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
70 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
71 /* free buffers one at a time */
72 m = rte_pktmbuf_prefree_seg(txep->mbuf);
75 if (unlikely(m == NULL))
78 if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
79 (nb_free > 0 && m->pool != free[0]->pool)) {
80 rte_mempool_put_bulk(free[0]->pool,
81 (void **)free, nb_free);
89 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
91 /* buffers were freed, update counters */
92 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
93 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
94 if (txq->tx_next_dd >= txq->nb_tx_desc)
95 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
97 return txq->tx_free_thresh;
100 /* Populate 4 descriptors with data from 4 mbufs */
102 tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
104 uint64_t buf_dma_addr;
108 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
109 buf_dma_addr = rte_mbuf_data_iova(*pkts);
110 pkt_len = (*pkts)->data_len;
112 /* write data to descriptor */
113 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
114 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
115 NGBE_TXD_DATLEN(pkt_len));
116 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
118 rte_prefetch0(&(*pkts)->pool);
122 /* Populate 1 descriptor with data from 1 mbuf */
124 tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
126 uint64_t buf_dma_addr;
129 buf_dma_addr = rte_mbuf_data_iova(*pkts);
130 pkt_len = (*pkts)->data_len;
132 /* write data to descriptor */
133 txdp->qw0 = cpu_to_le64(buf_dma_addr);
134 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
135 NGBE_TXD_DATLEN(pkt_len));
136 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
138 rte_prefetch0(&(*pkts)->pool);
142 * Fill H/W descriptor ring with mbuf data.
143 * Copy mbuf pointers to the S/W ring.
146 ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
149 volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
150 struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
151 const int N_PER_LOOP = 4;
152 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
153 int mainpart, leftover;
157 * Process most of the packets in chunks of N pkts. Any
158 * leftover packets will get processed one at a time.
160 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
161 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
162 for (i = 0; i < mainpart; i += N_PER_LOOP) {
163 /* Copy N mbuf pointers to the S/W ring */
164 for (j = 0; j < N_PER_LOOP; ++j)
165 (txep + i + j)->mbuf = *(pkts + i + j);
166 tx4(txdp + i, pkts + i);
169 if (unlikely(leftover > 0)) {
170 for (i = 0; i < leftover; ++i) {
171 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
172 tx1(txdp + mainpart + i, pkts + mainpart + i);
177 static inline uint16_t
178 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
181 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
185 * Begin scanning the H/W ring for done descriptors when the
186 * number of available descriptors drops below tx_free_thresh.
187 * For each done descriptor, free the associated buffer.
189 if (txq->nb_tx_free < txq->tx_free_thresh)
190 ngbe_tx_free_bufs(txq);
192 /* Only use descriptors that are available */
193 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
194 if (unlikely(nb_pkts == 0))
197 /* Use exactly nb_pkts descriptors */
198 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
201 * At this point, we know there are enough descriptors in the
202 * ring to transmit all the packets. This assumes that each
203 * mbuf contains a single segment, and that no new offloads
204 * are expected, which would require a new context descriptor.
208 * See if we're going to wrap-around. If so, handle the top
209 * of the descriptor ring first, then do the bottom. If not,
210 * the processing looks just like the "bottom" part anyway...
212 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
213 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
214 ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
218 /* Fill H/W descriptor ring with mbuf data */
219 ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
220 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
223 * Check for wrap-around. This would only happen if we used
224 * up to the last descriptor in the ring, no more, no less.
226 if (txq->tx_tail >= txq->nb_tx_desc)
229 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
230 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
231 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
233 /* update tail pointer */
235 ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
241 ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
246 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
247 if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
248 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
250 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
252 while (nb_pkts != 0) {
255 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
256 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
257 nb_tx = (uint16_t)(nb_tx + ret);
258 nb_pkts = (uint16_t)(nb_pkts - ret);
267 ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
268 volatile struct ngbe_tx_ctx_desc *ctx_txd,
269 uint64_t ol_flags, union ngbe_tx_offload tx_offload)
271 union ngbe_tx_offload tx_offload_mask;
272 uint32_t type_tucmd_mlhl;
273 uint32_t mss_l4len_idx;
275 uint32_t vlan_macip_lens;
276 uint32_t tunnel_seed;
278 ctx_idx = txq->ctx_curr;
279 tx_offload_mask.data[0] = 0;
280 tx_offload_mask.data[1] = 0;
282 /* Specify which HW CTX to upload. */
283 mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
284 type_tucmd_mlhl = NGBE_TXD_CTXT;
286 tx_offload_mask.ptid |= ~0;
287 type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
289 /* check if TCP segmentation required for this packet */
290 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
291 tx_offload_mask.l2_len |= ~0;
292 tx_offload_mask.l3_len |= ~0;
293 tx_offload_mask.l4_len |= ~0;
294 tx_offload_mask.tso_segsz |= ~0;
295 mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
296 mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
297 } else { /* no TSO, check if hardware checksum is needed */
298 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
299 tx_offload_mask.l2_len |= ~0;
300 tx_offload_mask.l3_len |= ~0;
303 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
304 case RTE_MBUF_F_TX_UDP_CKSUM:
306 NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
307 tx_offload_mask.l2_len |= ~0;
308 tx_offload_mask.l3_len |= ~0;
310 case RTE_MBUF_F_TX_TCP_CKSUM:
312 NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
313 tx_offload_mask.l2_len |= ~0;
314 tx_offload_mask.l3_len |= ~0;
316 case RTE_MBUF_F_TX_SCTP_CKSUM:
318 NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
319 tx_offload_mask.l2_len |= ~0;
320 tx_offload_mask.l3_len |= ~0;
327 vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
329 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
330 tx_offload_mask.outer_tun_len |= ~0;
331 tx_offload_mask.outer_l2_len |= ~0;
332 tx_offload_mask.outer_l3_len |= ~0;
333 tx_offload_mask.l2_len |= ~0;
334 tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
335 tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
337 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
338 case RTE_MBUF_F_TX_TUNNEL_IPIP:
339 /* for non UDP / GRE tunneling, set to 0b */
342 PMD_TX_LOG(ERR, "Tunnel type not supported");
345 vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
348 vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
351 if (ol_flags & RTE_MBUF_F_TX_VLAN) {
352 tx_offload_mask.vlan_tci |= ~0;
353 vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
356 txq->ctx_cache[ctx_idx].flags = ol_flags;
357 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
358 tx_offload_mask.data[0] & tx_offload.data[0];
359 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
360 tx_offload_mask.data[1] & tx_offload.data[1];
361 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
363 ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
364 ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
365 ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
366 ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
370 * Check which hardware context can be used. Use the existing match
371 * or create a new context descriptor.
373 static inline uint32_t
374 what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
375 union ngbe_tx_offload tx_offload)
377 /* If match with the current used context */
378 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
379 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
380 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
381 & tx_offload.data[0])) &&
382 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
383 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
384 & tx_offload.data[1]))))
385 return txq->ctx_curr;
387 /* What if match with the next context */
389 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
390 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
391 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
392 & tx_offload.data[0])) &&
393 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
394 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
395 & tx_offload.data[1]))))
396 return txq->ctx_curr;
398 /* Mismatch, use the previous context */
402 static inline uint32_t
403 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
407 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
409 tmp |= NGBE_TXD_L4CS;
411 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
413 tmp |= NGBE_TXD_IPCS;
415 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
417 tmp |= NGBE_TXD_EIPCS;
419 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
421 /* implies IPv4 cksum */
422 if (ol_flags & RTE_MBUF_F_TX_IPV4)
423 tmp |= NGBE_TXD_IPCS;
424 tmp |= NGBE_TXD_L4CS;
426 if (ol_flags & RTE_MBUF_F_TX_VLAN)
432 static inline uint32_t
433 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
435 uint32_t cmdtype = 0;
437 if (ol_flags & RTE_MBUF_F_TX_VLAN)
438 cmdtype |= NGBE_TXD_VLE;
439 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
440 cmdtype |= NGBE_TXD_TSE;
444 static inline uint8_t
445 tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
450 return ngbe_encode_ptype(ptype);
452 /* Only support flags in NGBE_TX_OFFLOAD_MASK */
453 tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK);
456 ptype = RTE_PTYPE_L2_ETHER;
457 if (oflags & RTE_MBUF_F_TX_VLAN)
458 ptype |= RTE_PTYPE_L2_ETHER_VLAN;
461 if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
462 ptype |= RTE_PTYPE_L3_IPV4;
463 else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6))
464 ptype |= RTE_PTYPE_L3_IPV6;
466 if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
467 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
468 else if (oflags & (RTE_MBUF_F_TX_IPV6))
469 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
472 switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
473 case RTE_MBUF_F_TX_TCP_CKSUM:
474 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
476 case RTE_MBUF_F_TX_UDP_CKSUM:
477 ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
479 case RTE_MBUF_F_TX_SCTP_CKSUM:
480 ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
484 if (oflags & RTE_MBUF_F_TX_TCP_SEG)
485 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
488 switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
489 case RTE_MBUF_F_TX_TUNNEL_IPIP:
490 case RTE_MBUF_F_TX_TUNNEL_IP:
491 ptype |= RTE_PTYPE_L2_ETHER |
497 return ngbe_encode_ptype(ptype);
500 /* Reset transmit descriptors after they have been used */
502 ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
504 struct ngbe_tx_entry *sw_ring = txq->sw_ring;
505 volatile struct ngbe_tx_desc *txr = txq->tx_ring;
506 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
507 uint16_t nb_tx_desc = txq->nb_tx_desc;
508 uint16_t desc_to_clean_to;
509 uint16_t nb_tx_to_clean;
512 /* Determine the last descriptor needing to be cleaned */
513 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
514 if (desc_to_clean_to >= nb_tx_desc)
515 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
517 /* Check to make sure the last descriptor to clean is done */
518 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
519 status = txr[desc_to_clean_to].dw3;
520 if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
522 "Tx descriptor %4u is not done"
523 "(port=%d queue=%d)",
525 txq->port_id, txq->queue_id);
526 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
527 ngbe_set32_masked(txq->tdc_reg_addr,
528 NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
529 /* Failed to clean any descriptors, better luck next time */
533 /* Figure out how many descriptors will be cleaned */
534 if (last_desc_cleaned > desc_to_clean_to)
535 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
538 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
542 "Cleaning %4u Tx descriptors: %4u to %4u (port=%d queue=%d)",
543 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
544 txq->port_id, txq->queue_id);
547 * The last descriptor to clean is done, so that means all the
548 * descriptors from the last descriptor that was cleaned
549 * up to the last descriptor with the RS bit set
550 * are done. Only reset the threshold descriptor.
552 txr[desc_to_clean_to].dw3 = 0;
554 /* Update the txq to reflect the last descriptor that was cleaned */
555 txq->last_desc_cleaned = desc_to_clean_to;
556 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
563 ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
566 struct ngbe_tx_queue *txq;
567 struct ngbe_tx_entry *sw_ring;
568 struct ngbe_tx_entry *txe, *txn;
569 volatile struct ngbe_tx_desc *txr;
570 volatile struct ngbe_tx_desc *txd;
571 struct rte_mbuf *tx_pkt;
572 struct rte_mbuf *m_seg;
573 uint64_t buf_dma_addr;
574 uint32_t olinfo_status;
575 uint32_t cmd_type_len;
586 union ngbe_tx_offload tx_offload;
588 tx_offload.data[0] = 0;
589 tx_offload.data[1] = 0;
591 sw_ring = txq->sw_ring;
593 tx_id = txq->tx_tail;
594 txe = &sw_ring[tx_id];
596 /* Determine if the descriptor ring needs to be cleaned. */
597 if (txq->nb_tx_free < txq->tx_free_thresh)
598 ngbe_xmit_cleanup(txq);
600 rte_prefetch0(&txe->mbuf->pool);
603 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
606 pkt_len = tx_pkt->pkt_len;
609 * Determine how many (if any) context descriptors
610 * are needed for offload functionality.
612 ol_flags = tx_pkt->ol_flags;
614 /* If hardware offload required */
615 tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
617 tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
618 tx_pkt->packet_type);
619 tx_offload.l2_len = tx_pkt->l2_len;
620 tx_offload.l3_len = tx_pkt->l3_len;
621 tx_offload.l4_len = tx_pkt->l4_len;
622 tx_offload.vlan_tci = tx_pkt->vlan_tci;
623 tx_offload.tso_segsz = tx_pkt->tso_segsz;
624 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
625 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
626 tx_offload.outer_tun_len = 0;
628 /* If new context need be built or reuse the exist ctx*/
629 ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
630 /* Only allocate context descriptor if required */
631 new_ctx = (ctx == NGBE_CTX_NUM);
636 * Keep track of how many descriptors are used this loop
637 * This will always be the number of segments + the number of
638 * Context descriptors required to transmit the packet
640 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
643 * The number of descriptors that must be allocated for a
644 * packet is the number of segments of that packet, plus 1
645 * Context Descriptor for the hardware offload, if any.
646 * Determine the last Tx descriptor to allocate in the Tx ring
647 * for the packet, starting from the current position (tx_id)
650 tx_last = (uint16_t)(tx_id + nb_used - 1);
653 if (tx_last >= txq->nb_tx_desc)
654 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
656 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
657 " tx_first=%u tx_last=%u",
658 (uint16_t)txq->port_id,
659 (uint16_t)txq->queue_id,
665 * Make sure there are enough Tx descriptors available to
666 * transmit the entire packet.
667 * nb_used better be less than or equal to txq->tx_free_thresh
669 if (nb_used > txq->nb_tx_free) {
671 "Not enough free Tx descriptors "
672 "nb_used=%4u nb_free=%4u "
673 "(port=%d queue=%d)",
674 nb_used, txq->nb_tx_free,
675 txq->port_id, txq->queue_id);
677 if (ngbe_xmit_cleanup(txq) != 0) {
678 /* Could not clean any descriptors */
684 /* nb_used better be <= txq->tx_free_thresh */
685 if (unlikely(nb_used > txq->tx_free_thresh)) {
687 "The number of descriptors needed to "
688 "transmit the packet exceeds the "
689 "RS bit threshold. This will impact "
691 "nb_used=%4u nb_free=%4u "
692 "tx_free_thresh=%4u. "
693 "(port=%d queue=%d)",
694 nb_used, txq->nb_tx_free,
696 txq->port_id, txq->queue_id);
698 * Loop here until there are enough Tx
699 * descriptors or until the ring cannot be
702 while (nb_used > txq->nb_tx_free) {
703 if (ngbe_xmit_cleanup(txq) != 0) {
705 * Could not clean any
717 * By now there are enough free Tx descriptors to transmit
722 * Set common flags of all Tx Data Descriptors.
724 * The following bits must be set in the first Data Descriptor
725 * and are ignored in the other ones:
728 * The following bits must only be set in the last Data
732 cmd_type_len = NGBE_TXD_FCS;
736 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
737 /* when TSO is on, paylen in descriptor is the
738 * not the packet len but the tcp payload len
740 pkt_len -= (tx_offload.l2_len +
741 tx_offload.l3_len + tx_offload.l4_len);
743 (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
744 ? tx_offload.outer_l2_len +
745 tx_offload.outer_l3_len : 0;
749 * Setup the Tx Context Descriptor if required
752 volatile struct ngbe_tx_ctx_desc *ctx_txd;
754 ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
757 txn = &sw_ring[txe->next_id];
758 rte_prefetch0(&txn->mbuf->pool);
760 if (txe->mbuf != NULL) {
761 rte_pktmbuf_free_seg(txe->mbuf);
765 ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
768 txe->last_id = tx_last;
769 tx_id = txe->next_id;
774 * Setup the Tx Data Descriptor,
775 * This path will go through
776 * whatever new/reuse the context descriptor
778 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
780 tx_desc_cksum_flags_to_olinfo(ol_flags);
781 olinfo_status |= NGBE_TXD_IDX(ctx);
784 olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
789 txn = &sw_ring[txe->next_id];
790 rte_prefetch0(&txn->mbuf->pool);
792 if (txe->mbuf != NULL)
793 rte_pktmbuf_free_seg(txe->mbuf);
797 * Set up Transmit Data Descriptor.
799 slen = m_seg->data_len;
800 buf_dma_addr = rte_mbuf_data_iova(m_seg);
801 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
802 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
803 txd->dw3 = rte_cpu_to_le_32(olinfo_status);
804 txe->last_id = tx_last;
805 tx_id = txe->next_id;
808 } while (m_seg != NULL);
811 * The last packet data descriptor needs End Of Packet (EOP)
813 cmd_type_len |= NGBE_TXD_EOP;
814 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
816 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
824 * Set the Transmit Descriptor Tail (TDT)
826 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
827 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
828 (uint16_t)tx_id, (uint16_t)nb_tx);
829 ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
830 txq->tx_tail = tx_id;
835 /*********************************************************************
839 **********************************************************************/
841 ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
846 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
848 for (i = 0; i < nb_pkts; i++) {
850 ol_flags = m->ol_flags;
853 * Check if packet meets requirements for number of segments
855 * NOTE: for ngbe it's always (40 - WTHRESH) for both TSO and
859 if (m->nb_segs > NGBE_TX_MAX_SEG - txq->wthresh) {
864 if (ol_flags & NGBE_TX_OFFLOAD_NOTSUP_MASK) {
865 rte_errno = -ENOTSUP;
869 #ifdef RTE_ETHDEV_DEBUG_TX
870 ret = rte_validate_tx_offload(m);
876 ret = rte_net_intel_cksum_prepare(m);
886 /*********************************************************************
890 **********************************************************************/
891 static inline uint32_t
892 ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
894 uint16_t ptid = NGBE_RXD_PTID(pkt_info);
898 return ngbe_decode_ptype(ptid);
901 static inline uint64_t
902 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
907 * Check if VLAN present only.
908 * Do not check whether L3/L4 rx checksum done by NIC or not,
909 * That can be found from rte_eth_rxmode.offloads flag
911 pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
912 vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
918 static inline uint64_t
919 rx_desc_error_to_pkt_flags(uint32_t rx_status)
921 uint64_t pkt_flags = 0;
923 /* checksum offload can't be disabled */
924 if (rx_status & NGBE_RXD_STAT_IPCS)
925 pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
926 ? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
928 if (rx_status & NGBE_RXD_STAT_L4CS)
929 pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
930 ? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
932 if (rx_status & NGBE_RXD_STAT_EIPCS &&
933 rx_status & NGBE_RXD_ERR_EIPCS)
934 pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
940 * LOOK_AHEAD defines how many desc statuses to check beyond the
941 * current descriptor.
942 * It must be a pound define for optimal performance.
943 * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
944 * function only works with LOOK_AHEAD=8.
947 #if (LOOK_AHEAD != 8)
948 #error "PMD NGBE: LOOK_AHEAD must be 8\n"
951 ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
953 volatile struct ngbe_rx_desc *rxdp;
954 struct ngbe_rx_entry *rxep;
959 uint32_t s[LOOK_AHEAD];
960 uint32_t pkt_info[LOOK_AHEAD];
964 /* get references to current descriptor and S/W ring entry */
965 rxdp = &rxq->rx_ring[rxq->rx_tail];
966 rxep = &rxq->sw_ring[rxq->rx_tail];
968 status = rxdp->qw1.lo.status;
969 /* check to make sure there is at least 1 packet to receive */
970 if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
974 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
975 * reference packets that are ready to be received.
977 for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
978 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
979 /* Read desc statuses backwards to avoid race condition */
980 for (j = 0; j < LOOK_AHEAD; j++)
981 s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
983 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
985 /* Compute how many status bits were set */
986 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
987 (s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
990 for (j = 0; j < nb_dd; j++)
991 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
995 /* Translate descriptor info to mbuf format */
996 for (j = 0; j < nb_dd; ++j) {
998 pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
1000 mb->data_len = pkt_len;
1001 mb->pkt_len = pkt_len;
1002 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1004 /* convert descriptor fields to rte mbuf flags */
1005 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1007 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1008 mb->ol_flags = pkt_flags;
1010 ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1014 /* Move mbuf pointers from the S/W ring to the stage */
1015 for (j = 0; j < LOOK_AHEAD; ++j)
1016 rxq->rx_stage[i + j] = rxep[j].mbuf;
1018 /* stop if all requested packets could not be received */
1019 if (nb_dd != LOOK_AHEAD)
1023 /* clear software ring entries so we can cleanup correctly */
1024 for (i = 0; i < nb_rx; ++i)
1025 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1031 ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
1033 volatile struct ngbe_rx_desc *rxdp;
1034 struct ngbe_rx_entry *rxep;
1035 struct rte_mbuf *mb;
1040 /* allocate buffers in bulk directly into the S/W ring */
1041 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1042 rxep = &rxq->sw_ring[alloc_idx];
1043 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1044 rxq->rx_free_thresh);
1045 if (unlikely(diag != 0))
1048 rxdp = &rxq->rx_ring[alloc_idx];
1049 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1050 /* populate the static rte mbuf fields */
1053 mb->port = rxq->port_id;
1055 rte_mbuf_refcnt_set(mb, 1);
1056 mb->data_off = RTE_PKTMBUF_HEADROOM;
1058 /* populate the descriptors */
1059 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1060 NGBE_RXD_HDRADDR(&rxdp[i], 0);
1061 NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1064 /* update state of internal queue structure */
1065 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1066 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1067 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1073 static inline uint16_t
1074 ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1077 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1080 /* how many packets are ready to return? */
1081 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1083 /* copy mbuf pointers to the application's packet list */
1084 for (i = 0; i < nb_pkts; ++i)
1085 rx_pkts[i] = stage[i];
1087 /* update internal queue state */
1088 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1089 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1094 static inline uint16_t
1095 ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1098 struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
1099 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1102 /* Any previously recv'd pkts will be returned from the Rx stage */
1103 if (rxq->rx_nb_avail)
1104 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1106 /* Scan the H/W ring for packets to receive */
1107 nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
1109 /* update internal queue state */
1110 rxq->rx_next_avail = 0;
1111 rxq->rx_nb_avail = nb_rx;
1112 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1114 /* if required, allocate new buffers to replenish descriptors */
1115 if (rxq->rx_tail > rxq->rx_free_trigger) {
1116 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1118 if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
1121 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1122 "queue_id=%u", (uint16_t)rxq->port_id,
1123 (uint16_t)rxq->queue_id);
1125 dev->data->rx_mbuf_alloc_failed +=
1126 rxq->rx_free_thresh;
1129 * Need to rewind any previous receives if we cannot
1130 * allocate new buffers to replenish the old ones.
1132 rxq->rx_nb_avail = 0;
1133 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1134 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1135 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1140 /* update tail pointer */
1142 ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1145 if (rxq->rx_tail >= rxq->nb_rx_desc)
1148 /* received any packets this loop? */
1149 if (rxq->rx_nb_avail)
1150 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1155 /* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
1157 ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1162 if (unlikely(nb_pkts == 0))
1165 if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
1166 return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1168 /* request is relatively large, chunk it up */
1173 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
1174 ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1175 nb_rx = (uint16_t)(nb_rx + ret);
1176 nb_pkts = (uint16_t)(nb_pkts - ret);
1185 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1188 struct ngbe_rx_queue *rxq;
1189 volatile struct ngbe_rx_desc *rx_ring;
1190 volatile struct ngbe_rx_desc *rxdp;
1191 struct ngbe_rx_entry *sw_ring;
1192 struct ngbe_rx_entry *rxe;
1193 struct rte_mbuf *rxm;
1194 struct rte_mbuf *nmb;
1195 struct ngbe_rx_desc rxd;
1208 rx_id = rxq->rx_tail;
1209 rx_ring = rxq->rx_ring;
1210 sw_ring = rxq->sw_ring;
1211 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1212 while (nb_rx < nb_pkts) {
1214 * The order of operations here is important as the DD status
1215 * bit must not be read after any other descriptor fields.
1216 * rx_ring and rxdp are pointing to volatile data so the order
1217 * of accesses cannot be reordered by the compiler. If they were
1218 * not volatile, they could be reordered which could lead to
1219 * using invalid descriptor fields when read from rxd.
1221 rxdp = &rx_ring[rx_id];
1222 staterr = rxdp->qw1.lo.status;
1223 if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
1230 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
1231 * is likely to be invalid and to be dropped by the various
1232 * validation checks performed by the network stack.
1234 * Allocate a new mbuf to replenish the RX ring descriptor.
1235 * If the allocation fails:
1236 * - arrange for that Rx descriptor to be the first one
1237 * being parsed the next time the receive function is
1238 * invoked [on the same queue].
1240 * - Stop parsing the Rx ring and return immediately.
1242 * This policy do not drop the packet received in the Rx
1243 * descriptor for which the allocation of a new mbuf failed.
1244 * Thus, it allows that packet to be later retrieved if
1245 * mbuf have been freed in the mean time.
1246 * As a side effect, holding Rx descriptors instead of
1247 * systematically giving them back to the NIC may lead to
1248 * Rx ring exhaustion situations.
1249 * However, the NIC can gracefully prevent such situations
1250 * to happen by sending specific "back-pressure" flow control
1251 * frames to its peer(s).
1254 "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
1255 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1256 (uint16_t)rx_id, (uint32_t)staterr,
1257 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1259 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1262 "Rx mbuf alloc failed port_id=%u queue_id=%u",
1263 (uint16_t)rxq->port_id,
1264 (uint16_t)rxq->queue_id);
1265 dev->data->rx_mbuf_alloc_failed++;
1270 rxe = &sw_ring[rx_id];
1272 if (rx_id == rxq->nb_rx_desc)
1275 /* Prefetch next mbuf while processing current one. */
1276 rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
1279 * When next Rx descriptor is on a cache-line boundary,
1280 * prefetch the next 4 Rx descriptors and the next 8 pointers
1283 if ((rx_id & 0x3) == 0) {
1284 rte_ngbe_prefetch(&rx_ring[rx_id]);
1285 rte_ngbe_prefetch(&sw_ring[rx_id]);
1290 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1291 NGBE_RXD_HDRADDR(rxdp, 0);
1292 NGBE_RXD_PKTADDR(rxdp, dma_addr);
1295 * Initialize the returned mbuf.
1296 * 1) setup generic mbuf fields:
1297 * - number of segments,
1300 * - Rx port identifier.
1301 * 2) integrate hardware offload data, if any:
1302 * - IP checksum flag,
1303 * - VLAN TCI, if any,
1306 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1308 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1309 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1312 rxm->pkt_len = pkt_len;
1313 rxm->data_len = pkt_len;
1314 rxm->port = rxq->port_id;
1316 pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1317 /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
1318 rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1320 pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1322 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1323 rxm->ol_flags = pkt_flags;
1324 rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1328 * Store the mbuf address into the next entry of the array
1329 * of returned packets.
1331 rx_pkts[nb_rx++] = rxm;
1333 rxq->rx_tail = rx_id;
1336 * If the number of free Rx descriptors is greater than the Rx free
1337 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1339 * Update the RDT with the value of the last processed Rx descriptor
1340 * minus 1, to guarantee that the RDT register is never equal to the
1341 * RDH register, which creates a "full" ring situation from the
1342 * hardware point of view...
1344 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1345 if (nb_hold > rxq->rx_free_thresh) {
1347 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
1348 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1349 (uint16_t)rx_id, (uint16_t)nb_hold,
1351 rx_id = (uint16_t)((rx_id == 0) ?
1352 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1353 ngbe_set32(rxq->rdt_reg_addr, rx_id);
1356 rxq->nb_rx_hold = nb_hold;
1361 * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1363 * Fill the following info in the HEAD buffer of the Rx cluster:
1364 * - RX port identifier
1365 * - hardware offload data, if any:
1366 * - IP checksum flag
1367 * - VLAN TCI, if any
1369 * @head HEAD of the packet cluster
1370 * @desc HW descriptor to get data from
1371 * @rxq Pointer to the Rx queue
1374 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
1375 struct ngbe_rx_queue *rxq, uint32_t staterr)
1380 head->port = rxq->port_id;
1382 /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
1383 * set in the pkt_flags field.
1385 head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1386 pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1387 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1388 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1389 head->ol_flags = pkt_flags;
1390 head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1395 * ngbe_recv_pkts_sc - receive handler for scatter case.
1397 * @rx_queue Rx queue handle
1398 * @rx_pkts table of received packets
1399 * @nb_pkts size of rx_pkts table
1400 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1402 * Returns the number of received packets/clusters (according to the "bulk
1403 * receive" interface).
1405 static inline uint16_t
1406 ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1409 struct ngbe_rx_queue *rxq = rx_queue;
1410 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1411 volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
1412 struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
1413 struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1414 uint16_t rx_id = rxq->rx_tail;
1416 uint16_t nb_hold = rxq->nb_rx_hold;
1417 uint16_t prev_id = rxq->rx_tail;
1419 while (nb_rx < nb_pkts) {
1421 struct ngbe_rx_entry *rxe;
1422 struct ngbe_scattered_rx_entry *sc_entry;
1423 struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
1424 struct ngbe_rx_entry *next_rxe = NULL;
1425 struct rte_mbuf *first_seg;
1426 struct rte_mbuf *rxm;
1427 struct rte_mbuf *nmb = NULL;
1428 struct ngbe_rx_desc rxd;
1431 volatile struct ngbe_rx_desc *rxdp;
1435 rxdp = &rx_ring[rx_id];
1436 staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1438 if (!(staterr & NGBE_RXD_STAT_DD))
1443 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1444 "staterr=0x%x data_len=%u",
1445 rxq->port_id, rxq->queue_id, rx_id, staterr,
1446 rte_le_to_cpu_16(rxd.qw1.hi.len));
1449 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1451 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed "
1452 "port_id=%u queue_id=%u",
1453 rxq->port_id, rxq->queue_id);
1455 dev->data->rx_mbuf_alloc_failed++;
1458 } else if (nb_hold > rxq->rx_free_thresh) {
1459 uint16_t next_rdt = rxq->rx_free_trigger;
1461 if (!ngbe_rx_alloc_bufs(rxq, false)) {
1463 ngbe_set32_relaxed(rxq->rdt_reg_addr,
1465 nb_hold -= rxq->rx_free_thresh;
1467 PMD_RX_LOG(DEBUG, "Rx bulk alloc failed "
1468 "port_id=%u queue_id=%u",
1469 rxq->port_id, rxq->queue_id);
1471 dev->data->rx_mbuf_alloc_failed++;
1477 rxe = &sw_ring[rx_id];
1478 eop = staterr & NGBE_RXD_STAT_EOP;
1480 next_id = rx_id + 1;
1481 if (next_id == rxq->nb_rx_desc)
1484 /* Prefetch next mbuf while processing current one. */
1485 rte_ngbe_prefetch(sw_ring[next_id].mbuf);
1488 * When next Rx descriptor is on a cache-line boundary,
1489 * prefetch the next 4 RX descriptors and the next 4 pointers
1492 if ((next_id & 0x3) == 0) {
1493 rte_ngbe_prefetch(&rx_ring[next_id]);
1494 rte_ngbe_prefetch(&sw_ring[next_id]);
1501 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1503 * Update Rx descriptor with the physical address of the
1504 * new data buffer of the new allocated mbuf.
1508 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1509 NGBE_RXD_HDRADDR(rxdp, 0);
1510 NGBE_RXD_PKTADDR(rxdp, dma);
1516 * Set data length & data buffer address of mbuf.
1518 data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1519 rxm->data_len = data_len;
1525 next_sc_entry = &sw_sc_ring[nextp_id];
1526 next_rxe = &sw_ring[nextp_id];
1527 rte_ngbe_prefetch(next_rxe);
1530 sc_entry = &sw_sc_ring[rx_id];
1531 first_seg = sc_entry->fbuf;
1532 sc_entry->fbuf = NULL;
1535 * If this is the first buffer of the received packet,
1536 * set the pointer to the first mbuf of the packet and
1537 * initialize its context.
1538 * Otherwise, update the total length and the number of segments
1539 * of the current scattered packet, and update the pointer to
1540 * the last mbuf of the current packet.
1542 if (first_seg == NULL) {
1544 first_seg->pkt_len = data_len;
1545 first_seg->nb_segs = 1;
1547 first_seg->pkt_len += data_len;
1548 first_seg->nb_segs++;
1555 * If this is not the last buffer of the received packet, update
1556 * the pointer to the first mbuf at the NEXTP entry in the
1557 * sw_sc_ring and continue to parse the Rx ring.
1559 if (!eop && next_rxe) {
1560 rxm->next = next_rxe->mbuf;
1561 next_sc_entry->fbuf = first_seg;
1565 /* Initialize the first mbuf of the returned packet */
1566 ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1568 /* Deal with the case, when HW CRC srip is disabled. */
1569 first_seg->pkt_len -= rxq->crc_len;
1570 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1571 struct rte_mbuf *lp;
1573 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1576 first_seg->nb_segs--;
1577 lp->data_len -= rxq->crc_len - rxm->data_len;
1579 rte_pktmbuf_free_seg(rxm);
1581 rxm->data_len -= rxq->crc_len;
1584 /* Prefetch data of first segment, if configured to do so. */
1585 rte_packet_prefetch((char *)first_seg->buf_addr +
1586 first_seg->data_off);
1589 * Store the mbuf address into the next entry of the array
1590 * of returned packets.
1592 rx_pkts[nb_rx++] = first_seg;
1596 * Record index of the next Rx descriptor to probe.
1598 rxq->rx_tail = rx_id;
1601 * If the number of free Rx descriptors is greater than the Rx free
1602 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1604 * Update the RDT with the value of the last processed Rx descriptor
1605 * minus 1, to guarantee that the RDT register is never equal to the
1606 * RDH register, which creates a "full" ring situation from the
1607 * hardware point of view...
1609 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1610 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1611 "nb_hold=%u nb_rx=%u",
1612 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1615 ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1619 rxq->nb_rx_hold = nb_hold;
1624 ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1627 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
1631 ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1634 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
1637 /*********************************************************************
1639 * Queue management functions
1641 **********************************************************************/
1644 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
1648 if (txq->sw_ring != NULL) {
1649 for (i = 0; i < txq->nb_tx_desc; i++) {
1650 if (txq->sw_ring[i].mbuf != NULL) {
1651 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1652 txq->sw_ring[i].mbuf = NULL;
1659 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
1662 rte_free(txq->sw_ring);
1666 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
1669 if (txq->ops != NULL) {
1670 txq->ops->release_mbufs(txq);
1671 txq->ops->free_swring(txq);
1678 ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1680 ngbe_tx_queue_release(dev->data->tx_queues[qid]);
1683 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
1685 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
1687 static const struct ngbe_tx_desc zeroed_desc = {0};
1688 struct ngbe_tx_entry *txe = txq->sw_ring;
1691 /* Zero out HW ring memory */
1692 for (i = 0; i < txq->nb_tx_desc; i++)
1693 txq->tx_ring[i] = zeroed_desc;
1695 /* Initialize SW ring entries */
1696 prev = (uint16_t)(txq->nb_tx_desc - 1);
1697 for (i = 0; i < txq->nb_tx_desc; i++) {
1698 /* the ring can also be modified by hardware */
1699 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
1701 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
1704 txe[prev].next_id = i;
1708 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
1712 * Always allow 1 descriptor to be un-allocated to avoid
1713 * a H/W race condition
1715 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1716 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1718 memset((void *)&txq->ctx_cache, 0,
1719 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
1722 static const struct ngbe_txq_ops def_txq_ops = {
1723 .release_mbufs = ngbe_tx_queue_release_mbufs,
1724 .free_swring = ngbe_tx_free_swring,
1725 .reset = ngbe_reset_tx_queue,
1728 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1729 * the queue parameters. Used in tx_queue_setup by primary process and then
1730 * in dev_init by secondary process when attaching to an existing ethdev.
1733 ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
1735 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1736 if (txq->offloads == 0 &&
1737 txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
1738 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1739 dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
1740 dev->tx_pkt_prepare = NULL;
1742 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1744 " - offloads = 0x%" PRIx64,
1747 " - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
1748 (unsigned long)txq->tx_free_thresh,
1749 (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
1750 dev->tx_pkt_burst = ngbe_xmit_pkts;
1751 dev->tx_pkt_prepare = ngbe_prep_pkts;
1755 static const struct {
1756 eth_tx_burst_t pkt_burst;
1758 } ngbe_tx_burst_infos[] = {
1759 { ngbe_xmit_pkts_simple, "Scalar Simple"},
1760 { ngbe_xmit_pkts, "Scalar"},
1764 ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1765 struct rte_eth_burst_mode *mode)
1767 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
1771 for (i = 0; i < RTE_DIM(ngbe_tx_burst_infos); ++i) {
1772 if (pkt_burst == ngbe_tx_burst_infos[i].pkt_burst) {
1773 snprintf(mode->info, sizeof(mode->info), "%s",
1774 ngbe_tx_burst_infos[i].info);
1784 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1786 uint64_t tx_offload_capa;
1787 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1790 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1791 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1792 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1793 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1794 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1795 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1796 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1797 RTE_ETH_TX_OFFLOAD_UDP_TSO |
1798 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
1799 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
1800 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1801 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1804 tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
1806 return tx_offload_capa;
1810 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1813 unsigned int socket_id,
1814 const struct rte_eth_txconf *tx_conf)
1816 const struct rte_memzone *tz;
1817 struct ngbe_tx_queue *txq;
1819 uint16_t tx_free_thresh;
1822 PMD_INIT_FUNC_TRACE();
1823 hw = ngbe_dev_hw(dev);
1825 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1828 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
1829 * descriptors are used or if the number of descriptors required
1830 * to transmit a packet is greater than the number of free Tx
1832 * One descriptor in the Tx ring is used as a sentinel to avoid a
1833 * H/W race condition, hence the maximum threshold constraints.
1834 * When set to zero use default values.
1836 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1837 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1838 if (tx_free_thresh >= (nb_desc - 3)) {
1840 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
1841 (unsigned int)tx_free_thresh,
1842 (int)dev->data->port_id, (int)queue_idx);
1846 if (nb_desc % tx_free_thresh != 0) {
1848 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
1849 (unsigned int)tx_free_thresh,
1850 (int)dev->data->port_id, (int)queue_idx);
1854 /* Free memory prior to re-allocation if needed... */
1855 if (dev->data->tx_queues[queue_idx] != NULL) {
1856 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1857 dev->data->tx_queues[queue_idx] = NULL;
1860 /* First allocate the Tx queue data structure */
1861 txq = rte_zmalloc_socket("ethdev Tx queue",
1862 sizeof(struct ngbe_tx_queue),
1863 RTE_CACHE_LINE_SIZE, socket_id);
1868 * Allocate Tx ring hardware descriptors. A memzone large enough to
1869 * handle the maximum ring size is allocated in order to allow for
1870 * resizing in later calls to the queue setup function.
1872 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1873 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
1874 NGBE_ALIGN, socket_id);
1876 ngbe_tx_queue_release(txq);
1880 txq->nb_tx_desc = nb_desc;
1881 txq->tx_free_thresh = tx_free_thresh;
1882 txq->pthresh = tx_conf->tx_thresh.pthresh;
1883 txq->hthresh = tx_conf->tx_thresh.hthresh;
1884 txq->wthresh = tx_conf->tx_thresh.wthresh;
1885 txq->queue_id = queue_idx;
1886 txq->reg_idx = queue_idx;
1887 txq->port_id = dev->data->port_id;
1888 txq->offloads = offloads;
1889 txq->ops = &def_txq_ops;
1890 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1892 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
1893 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
1895 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
1896 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
1898 /* Allocate software ring */
1899 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1900 sizeof(struct ngbe_tx_entry) * nb_desc,
1901 RTE_CACHE_LINE_SIZE, socket_id);
1902 if (txq->sw_ring == NULL) {
1903 ngbe_tx_queue_release(txq);
1907 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1908 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1910 /* set up scalar Tx function as appropriate */
1911 ngbe_set_tx_function(dev, txq);
1913 txq->ops->reset(txq);
1915 dev->data->tx_queues[queue_idx] = txq;
1921 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
1923 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
1924 * in the sw_sc_ring is not set to NULL but rather points to the next
1925 * mbuf of this RSC aggregation (that has not been completed yet and still
1926 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
1927 * will just free first "nb_segs" segments of the cluster explicitly by calling
1928 * an rte_pktmbuf_free_seg().
1930 * @m scattered cluster head
1933 ngbe_free_sc_cluster(struct rte_mbuf *m)
1935 uint16_t i, nb_segs = m->nb_segs;
1936 struct rte_mbuf *next_seg;
1938 for (i = 0; i < nb_segs; i++) {
1940 rte_pktmbuf_free_seg(m);
1946 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
1950 if (rxq->sw_ring != NULL) {
1951 for (i = 0; i < rxq->nb_rx_desc; i++) {
1952 if (rxq->sw_ring[i].mbuf != NULL) {
1953 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1954 rxq->sw_ring[i].mbuf = NULL;
1957 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1958 struct rte_mbuf *mb;
1960 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1961 rte_pktmbuf_free_seg(mb);
1963 rxq->rx_nb_avail = 0;
1966 if (rxq->sw_sc_ring != NULL)
1967 for (i = 0; i < rxq->nb_rx_desc; i++)
1968 if (rxq->sw_sc_ring[i].fbuf != NULL) {
1969 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
1970 rxq->sw_sc_ring[i].fbuf = NULL;
1975 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
1978 ngbe_rx_queue_release_mbufs(rxq);
1979 rte_free(rxq->sw_ring);
1980 rte_free(rxq->sw_sc_ring);
1986 ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1988 ngbe_rx_queue_release(dev->data->rx_queues[qid]);
1992 * Check if Rx Burst Bulk Alloc function can be used.
1994 * 0: the preconditions are satisfied and the bulk allocation function
1996 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1997 * function must be used.
2000 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
2005 * Make sure the following pre-conditions are satisfied:
2006 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
2007 * rxq->rx_free_thresh < rxq->nb_rx_desc
2008 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2009 * Scattered packets are not supported. This should be checked
2010 * outside of this function.
2012 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
2014 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
2015 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
2017 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
2019 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
2020 rxq->rx_free_thresh, rxq->nb_rx_desc);
2022 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
2024 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
2025 rxq->nb_rx_desc, rxq->rx_free_thresh);
2032 /* Reset dynamic ngbe_rx_queue fields back to defaults */
2034 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
2036 static const struct ngbe_rx_desc zeroed_desc = {
2037 {{0}, {0} }, {{0}, {0} } };
2039 uint16_t len = rxq->nb_rx_desc;
2042 * By default, the Rx queue setup function allocates enough memory for
2043 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
2044 * extra memory at the end of the descriptor ring to be zero'd out.
2046 if (adapter->rx_bulk_alloc_allowed)
2047 /* zero out extra memory */
2048 len += RTE_PMD_NGBE_RX_MAX_BURST;
2051 * Zero out HW ring memory. Zero out extra memory at the end of
2052 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2053 * reads extra memory as zeros.
2055 for (i = 0; i < len; i++)
2056 rxq->rx_ring[i] = zeroed_desc;
2059 * initialize extra software ring entries. Space for these extra
2060 * entries is always allocated
2062 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2063 for (i = rxq->nb_rx_desc; i < len; ++i)
2064 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2066 rxq->rx_nb_avail = 0;
2067 rxq->rx_next_avail = 0;
2068 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2070 rxq->nb_rx_hold = 0;
2071 rxq->pkt_first_seg = NULL;
2072 rxq->pkt_last_seg = NULL;
2076 ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
2078 return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2082 ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2085 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2087 offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
2088 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2089 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2090 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
2091 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
2092 RTE_ETH_RX_OFFLOAD_SCATTER;
2095 offloads |= (RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
2096 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
2102 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2105 unsigned int socket_id,
2106 const struct rte_eth_rxconf *rx_conf,
2107 struct rte_mempool *mp)
2109 const struct rte_memzone *rz;
2110 struct ngbe_rx_queue *rxq;
2113 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2116 PMD_INIT_FUNC_TRACE();
2117 hw = ngbe_dev_hw(dev);
2119 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2121 /* Free memory prior to re-allocation if needed... */
2122 if (dev->data->rx_queues[queue_idx] != NULL) {
2123 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2124 dev->data->rx_queues[queue_idx] = NULL;
2127 /* First allocate the Rx queue data structure */
2128 rxq = rte_zmalloc_socket("ethdev RX queue",
2129 sizeof(struct ngbe_rx_queue),
2130 RTE_CACHE_LINE_SIZE, socket_id);
2134 rxq->nb_rx_desc = nb_desc;
2135 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2136 rxq->queue_id = queue_idx;
2137 rxq->reg_idx = queue_idx;
2138 rxq->port_id = dev->data->port_id;
2139 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2140 rxq->crc_len = RTE_ETHER_CRC_LEN;
2143 rxq->drop_en = rx_conf->rx_drop_en;
2144 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2145 rxq->offloads = offloads;
2148 * Allocate Rx ring hardware descriptors. A memzone large enough to
2149 * handle the maximum ring size is allocated in order to allow for
2150 * resizing in later calls to the queue setup function.
2152 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2153 RX_RING_SZ, NGBE_ALIGN, socket_id);
2155 ngbe_rx_queue_release(rxq);
2160 * Zero init all the descriptors in the ring.
2162 memset(rz->addr, 0, RX_RING_SZ);
2164 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
2165 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
2167 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2168 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
2171 * Certain constraints must be met in order to use the bulk buffer
2172 * allocation Rx burst function. If any of Rx queues doesn't meet them
2173 * the feature should be disabled for the whole port.
2175 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2177 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
2178 rxq->queue_id, rxq->port_id);
2179 adapter->rx_bulk_alloc_allowed = false;
2183 * Allocate software ring. Allow for space at the end of the
2184 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2185 * function does not access an invalid memory region.
2188 if (adapter->rx_bulk_alloc_allowed)
2189 len += RTE_PMD_NGBE_RX_MAX_BURST;
2191 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2192 sizeof(struct ngbe_rx_entry) * len,
2193 RTE_CACHE_LINE_SIZE, socket_id);
2194 if (rxq->sw_ring == NULL) {
2195 ngbe_rx_queue_release(rxq);
2200 * Always allocate even if it's not going to be needed in order to
2201 * simplify the code.
2203 * This ring is used in Scattered Rx cases and Scattered Rx may
2204 * be requested in ngbe_dev_rx_init(), which is called later from
2208 rte_zmalloc_socket("rxq->sw_sc_ring",
2209 sizeof(struct ngbe_scattered_rx_entry) * len,
2210 RTE_CACHE_LINE_SIZE, socket_id);
2211 if (rxq->sw_sc_ring == NULL) {
2212 ngbe_rx_queue_release(rxq);
2217 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2218 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2219 rxq->rx_ring_phys_addr);
2221 dev->data->rx_queues[queue_idx] = rxq;
2223 ngbe_reset_rx_queue(adapter, rxq);
2229 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
2232 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2234 PMD_INIT_FUNC_TRACE();
2236 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2237 struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
2240 txq->ops->release_mbufs(txq);
2241 txq->ops->reset(txq);
2245 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2246 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
2249 ngbe_rx_queue_release_mbufs(rxq);
2250 ngbe_reset_rx_queue(adapter, rxq);
2256 ngbe_dev_free_queues(struct rte_eth_dev *dev)
2260 PMD_INIT_FUNC_TRACE();
2262 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2263 ngbe_dev_rx_queue_release(dev, i);
2264 dev->data->rx_queues[i] = NULL;
2266 dev->data->nb_rx_queues = 0;
2268 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2269 ngbe_dev_tx_queue_release(dev, i);
2270 dev->data->tx_queues[i] = NULL;
2272 dev->data->nb_tx_queues = 0;
2275 void ngbe_configure_port(struct rte_eth_dev *dev)
2277 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2279 uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
2284 PMD_INIT_FUNC_TRACE();
2286 /* default outer vlan tpid */
2287 wr32(hw, NGBE_EXTAG,
2288 NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
2289 NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
2291 /* default inner vlan tpid */
2292 wr32m(hw, NGBE_VLANCTL,
2293 NGBE_VLANCTL_TPID_MASK,
2294 NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
2295 wr32m(hw, NGBE_DMATXCTRL,
2296 NGBE_DMATXCTRL_TPID_MASK,
2297 NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
2299 /* default vlan tpid filters */
2300 for (i = 0; i < 8; i++) {
2301 wr32m(hw, NGBE_TAGTPID(i / 2),
2302 (i % 2 ? NGBE_TAGTPID_MSB_MASK
2303 : NGBE_TAGTPID_LSB_MASK),
2304 (i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
2305 : NGBE_TAGTPID_LSB(tpids[i])));
2310 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
2312 struct ngbe_rx_entry *rxe = rxq->sw_ring;
2316 /* Initialize software ring entries */
2317 for (i = 0; i < rxq->nb_rx_desc; i++) {
2318 /* the ring can also be modified by hardware */
2319 volatile struct ngbe_rx_desc *rxd;
2320 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2323 PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
2324 (unsigned int)rxq->queue_id,
2325 (unsigned int)rxq->port_id);
2329 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2330 mbuf->port = rxq->port_id;
2333 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2334 rxd = &rxq->rx_ring[i];
2335 NGBE_RXD_HDRADDR(rxd, 0);
2336 NGBE_RXD_PKTADDR(rxd, dma_addr);
2344 ngbe_set_rx_function(struct rte_eth_dev *dev)
2346 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2348 if (dev->data->scattered_rx) {
2350 * Set the scattered callback: there are bulk and
2351 * single allocation versions.
2353 if (adapter->rx_bulk_alloc_allowed) {
2354 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
2355 "allocation callback (port=%d).",
2356 dev->data->port_id);
2357 dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
2359 PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
2360 "single allocation) "
2361 "Scattered Rx callback "
2363 dev->data->port_id);
2365 dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
2368 * Below we set "simple" callbacks according to port/queues parameters.
2369 * If parameters allow we are going to choose between the following
2372 * - Single buffer allocation (the simplest one)
2374 } else if (adapter->rx_bulk_alloc_allowed) {
2375 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2376 "satisfied. Rx Burst Bulk Alloc function "
2377 "will be used on port=%d.",
2378 dev->data->port_id);
2380 dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
2382 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
2383 "satisfied, or Scattered Rx is requested "
2385 dev->data->port_id);
2387 dev->rx_pkt_burst = ngbe_recv_pkts;
2391 static const struct {
2392 eth_rx_burst_t pkt_burst;
2394 } ngbe_rx_burst_infos[] = {
2395 { ngbe_recv_pkts_sc_single_alloc, "Scalar Scattered"},
2396 { ngbe_recv_pkts_sc_bulk_alloc, "Scalar Scattered Bulk Alloc"},
2397 { ngbe_recv_pkts_bulk_alloc, "Scalar Bulk Alloc"},
2398 { ngbe_recv_pkts, "Scalar"},
2402 ngbe_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2403 struct rte_eth_burst_mode *mode)
2405 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2409 for (i = 0; i < RTE_DIM(ngbe_rx_burst_infos); ++i) {
2410 if (pkt_burst == ngbe_rx_burst_infos[i].pkt_burst) {
2411 snprintf(mode->info, sizeof(mode->info), "%s",
2412 ngbe_rx_burst_infos[i].info);
2422 * Initializes Receive Unit.
2425 ngbe_dev_rx_init(struct rte_eth_dev *dev)
2428 struct ngbe_rx_queue *rxq;
2437 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2439 PMD_INIT_FUNC_TRACE();
2440 hw = ngbe_dev_hw(dev);
2443 * Make sure receives are disabled while setting
2444 * up the Rx context (registers, descriptor rings, etc.).
2446 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
2447 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
2449 /* Enable receipt of broadcasted frames */
2450 fctrl = rd32(hw, NGBE_PSRCTL);
2451 fctrl |= NGBE_PSRCTL_BCA;
2452 wr32(hw, NGBE_PSRCTL, fctrl);
2455 * Configure CRC stripping, if any.
2457 hlreg0 = rd32(hw, NGBE_SECRXCTL);
2458 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2459 hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
2461 hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
2462 hlreg0 &= ~NGBE_SECRXCTL_XDSA;
2463 wr32(hw, NGBE_SECRXCTL, hlreg0);
2466 * Configure jumbo frame support, if any.
2468 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2469 NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
2472 * If loopback mode is configured, set LPBK bit.
2474 hlreg0 = rd32(hw, NGBE_PSRCTL);
2475 if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
2476 hlreg0 |= NGBE_PSRCTL_LBENA;
2478 hlreg0 &= ~NGBE_PSRCTL_LBENA;
2480 wr32(hw, NGBE_PSRCTL, hlreg0);
2483 * Assume no header split and no VLAN strip support
2484 * on any Rx queue first .
2486 rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2488 /* Setup Rx queues */
2489 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2490 rxq = dev->data->rx_queues[i];
2493 * Reset crc_len in case it was changed after queue setup by a
2494 * call to configure.
2496 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2497 rxq->crc_len = RTE_ETHER_CRC_LEN;
2501 /* Setup the Base and Length of the Rx Descriptor Rings */
2502 bus_addr = rxq->rx_ring_phys_addr;
2503 wr32(hw, NGBE_RXBAL(rxq->reg_idx),
2504 (uint32_t)(bus_addr & BIT_MASK32));
2505 wr32(hw, NGBE_RXBAH(rxq->reg_idx),
2506 (uint32_t)(bus_addr >> 32));
2507 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
2508 wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
2510 srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
2512 /* Set if packets are dropped when no descriptors available */
2514 srrctl |= NGBE_RXCFG_DROP;
2517 * Configure the Rx buffer size in the PKTLEN field of
2518 * the RXCFG register of the queue.
2519 * The value is in 1 KB resolution. Valid values can be from
2522 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2523 RTE_PKTMBUF_HEADROOM);
2524 buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
2525 srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
2527 wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
2529 /* It adds dual VLAN length for supporting dual VLAN */
2530 if (dev->data->mtu + NGBE_ETH_OVERHEAD +
2531 2 * NGBE_VLAN_TAG_SIZE > buf_size)
2532 dev->data->scattered_rx = 1;
2533 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2534 rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2537 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
2538 dev->data->scattered_rx = 1;
2540 * Setup the Checksum Register.
2541 * Enable IP/L4 checksum computation by hardware if requested to do so.
2543 rxcsum = rd32(hw, NGBE_PSRCTL);
2544 rxcsum |= NGBE_PSRCTL_PCSD;
2545 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
2546 rxcsum |= NGBE_PSRCTL_L4CSUM;
2548 rxcsum &= ~NGBE_PSRCTL_L4CSUM;
2550 wr32(hw, NGBE_PSRCTL, rxcsum);
2553 rdrxctl = rd32(hw, NGBE_SECRXCTL);
2554 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2555 rdrxctl &= ~NGBE_SECRXCTL_CRCSTRIP;
2557 rdrxctl |= NGBE_SECRXCTL_CRCSTRIP;
2558 wr32(hw, NGBE_SECRXCTL, rdrxctl);
2561 ngbe_set_rx_function(dev);
2567 * Initializes Transmit Unit.
2570 ngbe_dev_tx_init(struct rte_eth_dev *dev)
2573 struct ngbe_tx_queue *txq;
2577 PMD_INIT_FUNC_TRACE();
2578 hw = ngbe_dev_hw(dev);
2580 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
2581 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
2583 /* Setup the Base and Length of the Tx Descriptor Rings */
2584 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2585 txq = dev->data->tx_queues[i];
2587 bus_addr = txq->tx_ring_phys_addr;
2588 wr32(hw, NGBE_TXBAL(txq->reg_idx),
2589 (uint32_t)(bus_addr & BIT_MASK32));
2590 wr32(hw, NGBE_TXBAH(txq->reg_idx),
2591 (uint32_t)(bus_addr >> 32));
2592 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
2593 NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
2594 /* Setup the HW Tx Head and TX Tail descriptor pointers */
2595 wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
2596 wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
2601 * Set up link loopback mode Tx->Rx.
2604 ngbe_setup_loopback_link(struct ngbe_hw *hw)
2606 PMD_INIT_FUNC_TRACE();
2608 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_LB, NGBE_MACRXCFG_LB);
2614 * Start Transmit and Receive Units.
2617 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
2620 struct ngbe_tx_queue *txq;
2621 struct ngbe_rx_queue *rxq;
2627 PMD_INIT_FUNC_TRACE();
2628 hw = ngbe_dev_hw(dev);
2630 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2631 txq = dev->data->tx_queues[i];
2632 /* Setup Transmit Threshold Registers */
2633 wr32m(hw, NGBE_TXCFG(txq->reg_idx),
2634 NGBE_TXCFG_HTHRESH_MASK |
2635 NGBE_TXCFG_WTHRESH_MASK,
2636 NGBE_TXCFG_HTHRESH(txq->hthresh) |
2637 NGBE_TXCFG_WTHRESH(txq->wthresh));
2640 dmatxctl = rd32(hw, NGBE_DMATXCTRL);
2641 dmatxctl |= NGBE_DMATXCTRL_ENA;
2642 wr32(hw, NGBE_DMATXCTRL, dmatxctl);
2644 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2645 txq = dev->data->tx_queues[i];
2646 if (txq->tx_deferred_start == 0) {
2647 ret = ngbe_dev_tx_queue_start(dev, i);
2653 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2654 rxq = dev->data->rx_queues[i];
2655 if (rxq->rx_deferred_start == 0) {
2656 ret = ngbe_dev_rx_queue_start(dev, i);
2662 /* Enable Receive engine */
2663 rxctrl = rd32(hw, NGBE_PBRXCTL);
2664 rxctrl |= NGBE_PBRXCTL_ENA;
2665 hw->mac.enable_rx_dma(hw, rxctrl);
2667 /* If loopback mode is enabled, set up the link accordingly */
2668 if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
2669 ngbe_setup_loopback_link(hw);
2675 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
2677 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2678 *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
2679 *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
2680 *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
2684 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
2686 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2687 wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
2688 wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
2689 wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
2693 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
2695 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2696 *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
2697 *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
2698 *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
2702 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
2704 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2705 wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
2706 wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
2707 wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
2711 * Start Receive Units for specified queue.
2714 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2716 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2717 struct ngbe_rx_queue *rxq;
2721 PMD_INIT_FUNC_TRACE();
2723 rxq = dev->data->rx_queues[rx_queue_id];
2725 /* Allocate buffers for descriptor rings */
2726 if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
2727 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
2731 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2732 rxdctl |= NGBE_RXCFG_ENA;
2733 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
2735 /* Wait until Rx Enable ready */
2736 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2739 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2740 } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
2742 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
2744 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
2745 wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
2746 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2752 * Stop Receive Units for specified queue.
2755 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2757 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2758 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2759 struct ngbe_rx_queue *rxq;
2763 PMD_INIT_FUNC_TRACE();
2765 rxq = dev->data->rx_queues[rx_queue_id];
2767 ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
2768 wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
2770 /* Wait until Rx Enable bit clear */
2771 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2774 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2775 } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
2777 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
2779 rte_delay_us(RTE_NGBE_WAIT_100_US);
2780 ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
2782 ngbe_rx_queue_release_mbufs(rxq);
2783 ngbe_reset_rx_queue(adapter, rxq);
2784 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
2790 * Start Transmit Units for specified queue.
2793 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2795 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2796 struct ngbe_tx_queue *txq;
2800 PMD_INIT_FUNC_TRACE();
2802 txq = dev->data->tx_queues[tx_queue_id];
2803 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
2805 /* Wait until Tx Enable ready */
2806 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2809 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
2810 } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
2812 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
2816 wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
2817 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2823 * Stop Transmit Units for specified queue.
2826 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2828 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2829 struct ngbe_tx_queue *txq;
2831 uint32_t txtdh, txtdt;
2834 PMD_INIT_FUNC_TRACE();
2836 txq = dev->data->tx_queues[tx_queue_id];
2838 /* Wait until Tx queue is empty */
2839 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2841 rte_delay_us(RTE_NGBE_WAIT_100_US);
2842 txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
2843 txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
2844 } while (--poll_ms && (txtdh != txtdt));
2846 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
2849 ngbe_dev_save_tx_queue(hw, txq->reg_idx);
2850 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
2852 /* Wait until Tx Enable bit clear */
2853 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2856 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
2857 } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
2859 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
2862 rte_delay_us(RTE_NGBE_WAIT_100_US);
2863 ngbe_dev_store_tx_queue(hw, txq->reg_idx);
2865 if (txq->ops != NULL) {
2866 txq->ops->release_mbufs(txq);
2867 txq->ops->reset(txq);
2869 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;