1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
14 #include "ngbe_logs.h"
15 #include "base/ngbe.h"
16 #include "ngbe_ethdev.h"
17 #include "ngbe_rxtx.h"
19 /* Bit Mask to indicate what bits required for building Tx context */
20 static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
21 RTE_MBUF_F_TX_OUTER_IPV6 |
22 RTE_MBUF_F_TX_OUTER_IPV4 |
25 RTE_MBUF_F_TX_L4_MASK |
26 RTE_MBUF_F_TX_TCP_SEG |
27 RTE_MBUF_F_TX_TUNNEL_MASK |
28 RTE_MBUF_F_TX_OUTER_IP_CKSUM);
29 #define NGBE_TX_OFFLOAD_NOTSUP_MASK \
30 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK)
33 * Prefetch a cache line into all cache levels.
35 #define rte_ngbe_prefetch(p) rte_prefetch0(p)
37 /*********************************************************************
41 **********************************************************************/
44 * Check for descriptors with their DD bit set and free mbufs.
45 * Return the total number of buffers freed.
47 static __rte_always_inline int
48 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
50 struct ngbe_tx_entry *txep;
53 struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
55 /* check DD bit on threshold descriptor */
56 status = txq->tx_ring[txq->tx_next_dd].dw3;
57 if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
58 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
59 ngbe_set32_masked(txq->tdc_reg_addr,
60 NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
65 * first buffer to free from S/W ring is at index
66 * tx_next_dd - (tx_free_thresh-1)
68 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
69 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
70 /* free buffers one at a time */
71 m = rte_pktmbuf_prefree_seg(txep->mbuf);
74 if (unlikely(m == NULL))
77 if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
78 (nb_free > 0 && m->pool != free[0]->pool)) {
79 rte_mempool_put_bulk(free[0]->pool,
80 (void **)free, nb_free);
88 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
90 /* buffers were freed, update counters */
91 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
92 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
93 if (txq->tx_next_dd >= txq->nb_tx_desc)
94 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
96 return txq->tx_free_thresh;
99 /* Populate 4 descriptors with data from 4 mbufs */
101 tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
103 uint64_t buf_dma_addr;
107 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
108 buf_dma_addr = rte_mbuf_data_iova(*pkts);
109 pkt_len = (*pkts)->data_len;
111 /* write data to descriptor */
112 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
113 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
114 NGBE_TXD_DATLEN(pkt_len));
115 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
117 rte_prefetch0(&(*pkts)->pool);
121 /* Populate 1 descriptor with data from 1 mbuf */
123 tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
125 uint64_t buf_dma_addr;
128 buf_dma_addr = rte_mbuf_data_iova(*pkts);
129 pkt_len = (*pkts)->data_len;
131 /* write data to descriptor */
132 txdp->qw0 = cpu_to_le64(buf_dma_addr);
133 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
134 NGBE_TXD_DATLEN(pkt_len));
135 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
137 rte_prefetch0(&(*pkts)->pool);
141 * Fill H/W descriptor ring with mbuf data.
142 * Copy mbuf pointers to the S/W ring.
145 ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
148 volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
149 struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
150 const int N_PER_LOOP = 4;
151 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
152 int mainpart, leftover;
156 * Process most of the packets in chunks of N pkts. Any
157 * leftover packets will get processed one at a time.
159 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
160 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
161 for (i = 0; i < mainpart; i += N_PER_LOOP) {
162 /* Copy N mbuf pointers to the S/W ring */
163 for (j = 0; j < N_PER_LOOP; ++j)
164 (txep + i + j)->mbuf = *(pkts + i + j);
165 tx4(txdp + i, pkts + i);
168 if (unlikely(leftover > 0)) {
169 for (i = 0; i < leftover; ++i) {
170 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
171 tx1(txdp + mainpart + i, pkts + mainpart + i);
176 static inline uint16_t
177 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
180 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
184 * Begin scanning the H/W ring for done descriptors when the
185 * number of available descriptors drops below tx_free_thresh.
186 * For each done descriptor, free the associated buffer.
188 if (txq->nb_tx_free < txq->tx_free_thresh)
189 ngbe_tx_free_bufs(txq);
191 /* Only use descriptors that are available */
192 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
193 if (unlikely(nb_pkts == 0))
196 /* Use exactly nb_pkts descriptors */
197 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
200 * At this point, we know there are enough descriptors in the
201 * ring to transmit all the packets. This assumes that each
202 * mbuf contains a single segment, and that no new offloads
203 * are expected, which would require a new context descriptor.
207 * See if we're going to wrap-around. If so, handle the top
208 * of the descriptor ring first, then do the bottom. If not,
209 * the processing looks just like the "bottom" part anyway...
211 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
212 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
213 ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
217 /* Fill H/W descriptor ring with mbuf data */
218 ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
219 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
222 * Check for wrap-around. This would only happen if we used
223 * up to the last descriptor in the ring, no more, no less.
225 if (txq->tx_tail >= txq->nb_tx_desc)
228 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
229 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
230 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
232 /* update tail pointer */
234 ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
240 ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
245 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
246 if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
247 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
249 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
251 while (nb_pkts != 0) {
254 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
255 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
256 nb_tx = (uint16_t)(nb_tx + ret);
257 nb_pkts = (uint16_t)(nb_pkts - ret);
266 ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
267 volatile struct ngbe_tx_ctx_desc *ctx_txd,
268 uint64_t ol_flags, union ngbe_tx_offload tx_offload)
270 union ngbe_tx_offload tx_offload_mask;
271 uint32_t type_tucmd_mlhl;
272 uint32_t mss_l4len_idx;
274 uint32_t vlan_macip_lens;
275 uint32_t tunnel_seed;
277 ctx_idx = txq->ctx_curr;
278 tx_offload_mask.data[0] = 0;
279 tx_offload_mask.data[1] = 0;
281 /* Specify which HW CTX to upload. */
282 mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
283 type_tucmd_mlhl = NGBE_TXD_CTXT;
285 tx_offload_mask.ptid |= ~0;
286 type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
288 /* check if TCP segmentation required for this packet */
289 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
290 tx_offload_mask.l2_len |= ~0;
291 tx_offload_mask.l3_len |= ~0;
292 tx_offload_mask.l4_len |= ~0;
293 tx_offload_mask.tso_segsz |= ~0;
294 mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
295 mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
296 } else { /* no TSO, check if hardware checksum is needed */
297 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
298 tx_offload_mask.l2_len |= ~0;
299 tx_offload_mask.l3_len |= ~0;
302 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
303 case RTE_MBUF_F_TX_UDP_CKSUM:
305 NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
306 tx_offload_mask.l2_len |= ~0;
307 tx_offload_mask.l3_len |= ~0;
309 case RTE_MBUF_F_TX_TCP_CKSUM:
311 NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
312 tx_offload_mask.l2_len |= ~0;
313 tx_offload_mask.l3_len |= ~0;
315 case RTE_MBUF_F_TX_SCTP_CKSUM:
317 NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
318 tx_offload_mask.l2_len |= ~0;
319 tx_offload_mask.l3_len |= ~0;
326 vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
328 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
329 tx_offload_mask.outer_tun_len |= ~0;
330 tx_offload_mask.outer_l2_len |= ~0;
331 tx_offload_mask.outer_l3_len |= ~0;
332 tx_offload_mask.l2_len |= ~0;
333 tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
334 tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
336 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
337 case RTE_MBUF_F_TX_TUNNEL_IPIP:
338 /* for non UDP / GRE tunneling, set to 0b */
341 PMD_TX_LOG(ERR, "Tunnel type not supported");
344 vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
347 vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
350 txq->ctx_cache[ctx_idx].flags = ol_flags;
351 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
352 tx_offload_mask.data[0] & tx_offload.data[0];
353 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
354 tx_offload_mask.data[1] & tx_offload.data[1];
355 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
357 ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
358 ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
359 ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
360 ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
364 * Check which hardware context can be used. Use the existing match
365 * or create a new context descriptor.
367 static inline uint32_t
368 what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
369 union ngbe_tx_offload tx_offload)
371 /* If match with the current used context */
372 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
373 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
374 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
375 & tx_offload.data[0])) &&
376 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
377 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
378 & tx_offload.data[1]))))
379 return txq->ctx_curr;
381 /* What if match with the next context */
383 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
384 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
385 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
386 & tx_offload.data[0])) &&
387 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
388 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
389 & tx_offload.data[1]))))
390 return txq->ctx_curr;
392 /* Mismatch, use the previous context */
396 static inline uint32_t
397 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
401 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
403 tmp |= NGBE_TXD_L4CS;
405 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
407 tmp |= NGBE_TXD_IPCS;
409 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
411 tmp |= NGBE_TXD_EIPCS;
413 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
415 /* implies IPv4 cksum */
416 if (ol_flags & RTE_MBUF_F_TX_IPV4)
417 tmp |= NGBE_TXD_IPCS;
418 tmp |= NGBE_TXD_L4CS;
424 static inline uint32_t
425 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
427 uint32_t cmdtype = 0;
429 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
430 cmdtype |= NGBE_TXD_TSE;
434 static inline uint8_t
435 tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
440 return ngbe_encode_ptype(ptype);
442 /* Only support flags in NGBE_TX_OFFLOAD_MASK */
443 tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK);
446 ptype = RTE_PTYPE_L2_ETHER;
449 if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
450 ptype |= RTE_PTYPE_L3_IPV4;
451 else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6))
452 ptype |= RTE_PTYPE_L3_IPV6;
454 if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
455 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
456 else if (oflags & (RTE_MBUF_F_TX_IPV6))
457 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
460 switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
461 case RTE_MBUF_F_TX_TCP_CKSUM:
462 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
464 case RTE_MBUF_F_TX_UDP_CKSUM:
465 ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
467 case RTE_MBUF_F_TX_SCTP_CKSUM:
468 ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
472 if (oflags & RTE_MBUF_F_TX_TCP_SEG)
473 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
476 switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
477 case RTE_MBUF_F_TX_TUNNEL_IPIP:
478 case RTE_MBUF_F_TX_TUNNEL_IP:
479 ptype |= RTE_PTYPE_L2_ETHER |
485 return ngbe_encode_ptype(ptype);
488 /* Reset transmit descriptors after they have been used */
490 ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
492 struct ngbe_tx_entry *sw_ring = txq->sw_ring;
493 volatile struct ngbe_tx_desc *txr = txq->tx_ring;
494 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
495 uint16_t nb_tx_desc = txq->nb_tx_desc;
496 uint16_t desc_to_clean_to;
497 uint16_t nb_tx_to_clean;
500 /* Determine the last descriptor needing to be cleaned */
501 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
502 if (desc_to_clean_to >= nb_tx_desc)
503 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
505 /* Check to make sure the last descriptor to clean is done */
506 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
507 status = txr[desc_to_clean_to].dw3;
508 if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
510 "Tx descriptor %4u is not done"
511 "(port=%d queue=%d)",
513 txq->port_id, txq->queue_id);
514 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
515 ngbe_set32_masked(txq->tdc_reg_addr,
516 NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
517 /* Failed to clean any descriptors, better luck next time */
521 /* Figure out how many descriptors will be cleaned */
522 if (last_desc_cleaned > desc_to_clean_to)
523 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
526 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
530 "Cleaning %4u Tx descriptors: %4u to %4u (port=%d queue=%d)",
531 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
532 txq->port_id, txq->queue_id);
535 * The last descriptor to clean is done, so that means all the
536 * descriptors from the last descriptor that was cleaned
537 * up to the last descriptor with the RS bit set
538 * are done. Only reset the threshold descriptor.
540 txr[desc_to_clean_to].dw3 = 0;
542 /* Update the txq to reflect the last descriptor that was cleaned */
543 txq->last_desc_cleaned = desc_to_clean_to;
544 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
551 ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
554 struct ngbe_tx_queue *txq;
555 struct ngbe_tx_entry *sw_ring;
556 struct ngbe_tx_entry *txe, *txn;
557 volatile struct ngbe_tx_desc *txr;
558 volatile struct ngbe_tx_desc *txd;
559 struct rte_mbuf *tx_pkt;
560 struct rte_mbuf *m_seg;
561 uint64_t buf_dma_addr;
562 uint32_t olinfo_status;
563 uint32_t cmd_type_len;
574 union ngbe_tx_offload tx_offload;
576 tx_offload.data[0] = 0;
577 tx_offload.data[1] = 0;
579 sw_ring = txq->sw_ring;
581 tx_id = txq->tx_tail;
582 txe = &sw_ring[tx_id];
584 /* Determine if the descriptor ring needs to be cleaned. */
585 if (txq->nb_tx_free < txq->tx_free_thresh)
586 ngbe_xmit_cleanup(txq);
588 rte_prefetch0(&txe->mbuf->pool);
591 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
594 pkt_len = tx_pkt->pkt_len;
597 * Determine how many (if any) context descriptors
598 * are needed for offload functionality.
600 ol_flags = tx_pkt->ol_flags;
602 /* If hardware offload required */
603 tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
605 tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
606 tx_pkt->packet_type);
607 tx_offload.l2_len = tx_pkt->l2_len;
608 tx_offload.l3_len = tx_pkt->l3_len;
609 tx_offload.l4_len = tx_pkt->l4_len;
610 tx_offload.tso_segsz = tx_pkt->tso_segsz;
611 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
612 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
613 tx_offload.outer_tun_len = 0;
615 /* If new context need be built or reuse the exist ctx*/
616 ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
617 /* Only allocate context descriptor if required */
618 new_ctx = (ctx == NGBE_CTX_NUM);
623 * Keep track of how many descriptors are used this loop
624 * This will always be the number of segments + the number of
625 * Context descriptors required to transmit the packet
627 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
630 * The number of descriptors that must be allocated for a
631 * packet is the number of segments of that packet, plus 1
632 * Context Descriptor for the hardware offload, if any.
633 * Determine the last Tx descriptor to allocate in the Tx ring
634 * for the packet, starting from the current position (tx_id)
637 tx_last = (uint16_t)(tx_id + nb_used - 1);
640 if (tx_last >= txq->nb_tx_desc)
641 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
643 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
644 " tx_first=%u tx_last=%u",
645 (uint16_t)txq->port_id,
646 (uint16_t)txq->queue_id,
652 * Make sure there are enough Tx descriptors available to
653 * transmit the entire packet.
654 * nb_used better be less than or equal to txq->tx_free_thresh
656 if (nb_used > txq->nb_tx_free) {
658 "Not enough free Tx descriptors "
659 "nb_used=%4u nb_free=%4u "
660 "(port=%d queue=%d)",
661 nb_used, txq->nb_tx_free,
662 txq->port_id, txq->queue_id);
664 if (ngbe_xmit_cleanup(txq) != 0) {
665 /* Could not clean any descriptors */
671 /* nb_used better be <= txq->tx_free_thresh */
672 if (unlikely(nb_used > txq->tx_free_thresh)) {
674 "The number of descriptors needed to "
675 "transmit the packet exceeds the "
676 "RS bit threshold. This will impact "
678 "nb_used=%4u nb_free=%4u "
679 "tx_free_thresh=%4u. "
680 "(port=%d queue=%d)",
681 nb_used, txq->nb_tx_free,
683 txq->port_id, txq->queue_id);
685 * Loop here until there are enough Tx
686 * descriptors or until the ring cannot be
689 while (nb_used > txq->nb_tx_free) {
690 if (ngbe_xmit_cleanup(txq) != 0) {
692 * Could not clean any
704 * By now there are enough free Tx descriptors to transmit
709 * Set common flags of all Tx Data Descriptors.
711 * The following bits must be set in the first Data Descriptor
712 * and are ignored in the other ones:
715 * The following bits must only be set in the last Data
719 cmd_type_len = NGBE_TXD_FCS;
723 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
724 /* when TSO is on, paylen in descriptor is the
725 * not the packet len but the tcp payload len
727 pkt_len -= (tx_offload.l2_len +
728 tx_offload.l3_len + tx_offload.l4_len);
730 (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
731 ? tx_offload.outer_l2_len +
732 tx_offload.outer_l3_len : 0;
736 * Setup the Tx Context Descriptor if required
739 volatile struct ngbe_tx_ctx_desc *ctx_txd;
741 ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
744 txn = &sw_ring[txe->next_id];
745 rte_prefetch0(&txn->mbuf->pool);
747 if (txe->mbuf != NULL) {
748 rte_pktmbuf_free_seg(txe->mbuf);
752 ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
755 txe->last_id = tx_last;
756 tx_id = txe->next_id;
761 * Setup the Tx Data Descriptor,
762 * This path will go through
763 * whatever new/reuse the context descriptor
765 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
767 tx_desc_cksum_flags_to_olinfo(ol_flags);
768 olinfo_status |= NGBE_TXD_IDX(ctx);
771 olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
776 txn = &sw_ring[txe->next_id];
777 rte_prefetch0(&txn->mbuf->pool);
779 if (txe->mbuf != NULL)
780 rte_pktmbuf_free_seg(txe->mbuf);
784 * Set up Transmit Data Descriptor.
786 slen = m_seg->data_len;
787 buf_dma_addr = rte_mbuf_data_iova(m_seg);
788 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
789 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
790 txd->dw3 = rte_cpu_to_le_32(olinfo_status);
791 txe->last_id = tx_last;
792 tx_id = txe->next_id;
795 } while (m_seg != NULL);
798 * The last packet data descriptor needs End Of Packet (EOP)
800 cmd_type_len |= NGBE_TXD_EOP;
801 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
803 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
811 * Set the Transmit Descriptor Tail (TDT)
813 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
814 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
815 (uint16_t)tx_id, (uint16_t)nb_tx);
816 ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
817 txq->tx_tail = tx_id;
822 /*********************************************************************
826 **********************************************************************/
828 ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
833 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
835 for (i = 0; i < nb_pkts; i++) {
837 ol_flags = m->ol_flags;
840 * Check if packet meets requirements for number of segments
842 * NOTE: for ngbe it's always (40 - WTHRESH) for both TSO and
846 if (m->nb_segs > NGBE_TX_MAX_SEG - txq->wthresh) {
851 if (ol_flags & NGBE_TX_OFFLOAD_NOTSUP_MASK) {
852 rte_errno = -ENOTSUP;
856 #ifdef RTE_ETHDEV_DEBUG_TX
857 ret = rte_validate_tx_offload(m);
863 ret = rte_net_intel_cksum_prepare(m);
873 /*********************************************************************
877 **********************************************************************/
878 static inline uint32_t
879 ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
881 uint16_t ptid = NGBE_RXD_PTID(pkt_info);
885 return ngbe_decode_ptype(ptid);
888 static inline uint64_t
889 rx_desc_error_to_pkt_flags(uint32_t rx_status)
891 uint64_t pkt_flags = 0;
893 /* checksum offload can't be disabled */
894 if (rx_status & NGBE_RXD_STAT_IPCS)
895 pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
896 ? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
898 if (rx_status & NGBE_RXD_STAT_L4CS)
899 pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
900 ? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
902 if (rx_status & NGBE_RXD_STAT_EIPCS &&
903 rx_status & NGBE_RXD_ERR_EIPCS)
904 pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
910 * LOOK_AHEAD defines how many desc statuses to check beyond the
911 * current descriptor.
912 * It must be a pound define for optimal performance.
913 * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
914 * function only works with LOOK_AHEAD=8.
917 #if (LOOK_AHEAD != 8)
918 #error "PMD NGBE: LOOK_AHEAD must be 8\n"
921 ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
923 volatile struct ngbe_rx_desc *rxdp;
924 struct ngbe_rx_entry *rxep;
929 uint32_t s[LOOK_AHEAD];
930 uint32_t pkt_info[LOOK_AHEAD];
934 /* get references to current descriptor and S/W ring entry */
935 rxdp = &rxq->rx_ring[rxq->rx_tail];
936 rxep = &rxq->sw_ring[rxq->rx_tail];
938 status = rxdp->qw1.lo.status;
939 /* check to make sure there is at least 1 packet to receive */
940 if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
944 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
945 * reference packets that are ready to be received.
947 for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
948 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
949 /* Read desc statuses backwards to avoid race condition */
950 for (j = 0; j < LOOK_AHEAD; j++)
951 s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
953 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
955 /* Compute how many status bits were set */
956 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
957 (s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
960 for (j = 0; j < nb_dd; j++)
961 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
965 /* Translate descriptor info to mbuf format */
966 for (j = 0; j < nb_dd; ++j) {
968 pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
970 mb->data_len = pkt_len;
971 mb->pkt_len = pkt_len;
973 /* convert descriptor fields to rte mbuf flags */
974 pkt_flags = rx_desc_error_to_pkt_flags(s[j]);
975 mb->ol_flags = pkt_flags;
977 ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
981 /* Move mbuf pointers from the S/W ring to the stage */
982 for (j = 0; j < LOOK_AHEAD; ++j)
983 rxq->rx_stage[i + j] = rxep[j].mbuf;
985 /* stop if all requested packets could not be received */
986 if (nb_dd != LOOK_AHEAD)
990 /* clear software ring entries so we can cleanup correctly */
991 for (i = 0; i < nb_rx; ++i)
992 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
998 ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
1000 volatile struct ngbe_rx_desc *rxdp;
1001 struct ngbe_rx_entry *rxep;
1002 struct rte_mbuf *mb;
1007 /* allocate buffers in bulk directly into the S/W ring */
1008 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1009 rxep = &rxq->sw_ring[alloc_idx];
1010 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1011 rxq->rx_free_thresh);
1012 if (unlikely(diag != 0))
1015 rxdp = &rxq->rx_ring[alloc_idx];
1016 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1017 /* populate the static rte mbuf fields */
1020 mb->port = rxq->port_id;
1022 rte_mbuf_refcnt_set(mb, 1);
1023 mb->data_off = RTE_PKTMBUF_HEADROOM;
1025 /* populate the descriptors */
1026 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1027 NGBE_RXD_HDRADDR(&rxdp[i], 0);
1028 NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1031 /* update state of internal queue structure */
1032 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1033 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1034 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1040 static inline uint16_t
1041 ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1044 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1047 /* how many packets are ready to return? */
1048 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1050 /* copy mbuf pointers to the application's packet list */
1051 for (i = 0; i < nb_pkts; ++i)
1052 rx_pkts[i] = stage[i];
1054 /* update internal queue state */
1055 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1056 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1061 static inline uint16_t
1062 ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1065 struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
1066 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1069 /* Any previously recv'd pkts will be returned from the Rx stage */
1070 if (rxq->rx_nb_avail)
1071 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1073 /* Scan the H/W ring for packets to receive */
1074 nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
1076 /* update internal queue state */
1077 rxq->rx_next_avail = 0;
1078 rxq->rx_nb_avail = nb_rx;
1079 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1081 /* if required, allocate new buffers to replenish descriptors */
1082 if (rxq->rx_tail > rxq->rx_free_trigger) {
1083 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1085 if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
1088 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1089 "queue_id=%u", (uint16_t)rxq->port_id,
1090 (uint16_t)rxq->queue_id);
1092 dev->data->rx_mbuf_alloc_failed +=
1093 rxq->rx_free_thresh;
1096 * Need to rewind any previous receives if we cannot
1097 * allocate new buffers to replenish the old ones.
1099 rxq->rx_nb_avail = 0;
1100 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1101 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1102 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1107 /* update tail pointer */
1109 ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1112 if (rxq->rx_tail >= rxq->nb_rx_desc)
1115 /* received any packets this loop? */
1116 if (rxq->rx_nb_avail)
1117 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1122 /* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
1124 ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1129 if (unlikely(nb_pkts == 0))
1132 if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
1133 return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1135 /* request is relatively large, chunk it up */
1140 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
1141 ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1142 nb_rx = (uint16_t)(nb_rx + ret);
1143 nb_pkts = (uint16_t)(nb_pkts - ret);
1152 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1155 struct ngbe_rx_queue *rxq;
1156 volatile struct ngbe_rx_desc *rx_ring;
1157 volatile struct ngbe_rx_desc *rxdp;
1158 struct ngbe_rx_entry *sw_ring;
1159 struct ngbe_rx_entry *rxe;
1160 struct rte_mbuf *rxm;
1161 struct rte_mbuf *nmb;
1162 struct ngbe_rx_desc rxd;
1175 rx_id = rxq->rx_tail;
1176 rx_ring = rxq->rx_ring;
1177 sw_ring = rxq->sw_ring;
1178 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1179 while (nb_rx < nb_pkts) {
1181 * The order of operations here is important as the DD status
1182 * bit must not be read after any other descriptor fields.
1183 * rx_ring and rxdp are pointing to volatile data so the order
1184 * of accesses cannot be reordered by the compiler. If they were
1185 * not volatile, they could be reordered which could lead to
1186 * using invalid descriptor fields when read from rxd.
1188 rxdp = &rx_ring[rx_id];
1189 staterr = rxdp->qw1.lo.status;
1190 if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
1197 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
1198 * is likely to be invalid and to be dropped by the various
1199 * validation checks performed by the network stack.
1201 * Allocate a new mbuf to replenish the RX ring descriptor.
1202 * If the allocation fails:
1203 * - arrange for that Rx descriptor to be the first one
1204 * being parsed the next time the receive function is
1205 * invoked [on the same queue].
1207 * - Stop parsing the Rx ring and return immediately.
1209 * This policy do not drop the packet received in the Rx
1210 * descriptor for which the allocation of a new mbuf failed.
1211 * Thus, it allows that packet to be later retrieved if
1212 * mbuf have been freed in the mean time.
1213 * As a side effect, holding Rx descriptors instead of
1214 * systematically giving them back to the NIC may lead to
1215 * Rx ring exhaustion situations.
1216 * However, the NIC can gracefully prevent such situations
1217 * to happen by sending specific "back-pressure" flow control
1218 * frames to its peer(s).
1221 "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
1222 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1223 (uint16_t)rx_id, (uint32_t)staterr,
1224 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1226 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1229 "Rx mbuf alloc failed port_id=%u queue_id=%u",
1230 (uint16_t)rxq->port_id,
1231 (uint16_t)rxq->queue_id);
1232 dev->data->rx_mbuf_alloc_failed++;
1237 rxe = &sw_ring[rx_id];
1239 if (rx_id == rxq->nb_rx_desc)
1242 /* Prefetch next mbuf while processing current one. */
1243 rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
1246 * When next Rx descriptor is on a cache-line boundary,
1247 * prefetch the next 4 Rx descriptors and the next 8 pointers
1250 if ((rx_id & 0x3) == 0) {
1251 rte_ngbe_prefetch(&rx_ring[rx_id]);
1252 rte_ngbe_prefetch(&sw_ring[rx_id]);
1257 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1258 NGBE_RXD_HDRADDR(rxdp, 0);
1259 NGBE_RXD_PKTADDR(rxdp, dma_addr);
1262 * Initialize the returned mbuf.
1263 * 1) setup generic mbuf fields:
1264 * - number of segments,
1267 * - Rx port identifier.
1268 * 2) integrate hardware offload data, if any:
1269 * - IP checksum flag,
1272 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1274 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1275 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1278 rxm->pkt_len = pkt_len;
1279 rxm->data_len = pkt_len;
1280 rxm->port = rxq->port_id;
1282 pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1283 pkt_flags = rx_desc_error_to_pkt_flags(staterr);
1284 rxm->ol_flags = pkt_flags;
1285 rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1289 * Store the mbuf address into the next entry of the array
1290 * of returned packets.
1292 rx_pkts[nb_rx++] = rxm;
1294 rxq->rx_tail = rx_id;
1297 * If the number of free Rx descriptors is greater than the Rx free
1298 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1300 * Update the RDT with the value of the last processed Rx descriptor
1301 * minus 1, to guarantee that the RDT register is never equal to the
1302 * RDH register, which creates a "full" ring situation from the
1303 * hardware point of view...
1305 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1306 if (nb_hold > rxq->rx_free_thresh) {
1308 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
1309 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1310 (uint16_t)rx_id, (uint16_t)nb_hold,
1312 rx_id = (uint16_t)((rx_id == 0) ?
1313 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1314 ngbe_set32(rxq->rdt_reg_addr, rx_id);
1317 rxq->nb_rx_hold = nb_hold;
1322 * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1324 * Fill the following info in the HEAD buffer of the Rx cluster:
1325 * - RX port identifier
1326 * - hardware offload data, if any:
1327 * - IP checksum flag
1329 * @head HEAD of the packet cluster
1330 * @desc HW descriptor to get data from
1331 * @rxq Pointer to the Rx queue
1334 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
1335 struct ngbe_rx_queue *rxq, uint32_t staterr)
1340 head->port = rxq->port_id;
1342 pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1343 pkt_flags = rx_desc_error_to_pkt_flags(staterr);
1344 head->ol_flags = pkt_flags;
1345 head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1350 * ngbe_recv_pkts_sc - receive handler for scatter case.
1352 * @rx_queue Rx queue handle
1353 * @rx_pkts table of received packets
1354 * @nb_pkts size of rx_pkts table
1355 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1357 * Returns the number of received packets/clusters (according to the "bulk
1358 * receive" interface).
1360 static inline uint16_t
1361 ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1364 struct ngbe_rx_queue *rxq = rx_queue;
1365 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1366 volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
1367 struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
1368 struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1369 uint16_t rx_id = rxq->rx_tail;
1371 uint16_t nb_hold = rxq->nb_rx_hold;
1372 uint16_t prev_id = rxq->rx_tail;
1374 while (nb_rx < nb_pkts) {
1376 struct ngbe_rx_entry *rxe;
1377 struct ngbe_scattered_rx_entry *sc_entry;
1378 struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
1379 struct ngbe_rx_entry *next_rxe = NULL;
1380 struct rte_mbuf *first_seg;
1381 struct rte_mbuf *rxm;
1382 struct rte_mbuf *nmb = NULL;
1383 struct ngbe_rx_desc rxd;
1386 volatile struct ngbe_rx_desc *rxdp;
1390 rxdp = &rx_ring[rx_id];
1391 staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1393 if (!(staterr & NGBE_RXD_STAT_DD))
1398 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1399 "staterr=0x%x data_len=%u",
1400 rxq->port_id, rxq->queue_id, rx_id, staterr,
1401 rte_le_to_cpu_16(rxd.qw1.hi.len));
1404 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1406 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed "
1407 "port_id=%u queue_id=%u",
1408 rxq->port_id, rxq->queue_id);
1410 dev->data->rx_mbuf_alloc_failed++;
1413 } else if (nb_hold > rxq->rx_free_thresh) {
1414 uint16_t next_rdt = rxq->rx_free_trigger;
1416 if (!ngbe_rx_alloc_bufs(rxq, false)) {
1418 ngbe_set32_relaxed(rxq->rdt_reg_addr,
1420 nb_hold -= rxq->rx_free_thresh;
1422 PMD_RX_LOG(DEBUG, "Rx bulk alloc failed "
1423 "port_id=%u queue_id=%u",
1424 rxq->port_id, rxq->queue_id);
1426 dev->data->rx_mbuf_alloc_failed++;
1432 rxe = &sw_ring[rx_id];
1433 eop = staterr & NGBE_RXD_STAT_EOP;
1435 next_id = rx_id + 1;
1436 if (next_id == rxq->nb_rx_desc)
1439 /* Prefetch next mbuf while processing current one. */
1440 rte_ngbe_prefetch(sw_ring[next_id].mbuf);
1443 * When next Rx descriptor is on a cache-line boundary,
1444 * prefetch the next 4 RX descriptors and the next 4 pointers
1447 if ((next_id & 0x3) == 0) {
1448 rte_ngbe_prefetch(&rx_ring[next_id]);
1449 rte_ngbe_prefetch(&sw_ring[next_id]);
1456 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1458 * Update Rx descriptor with the physical address of the
1459 * new data buffer of the new allocated mbuf.
1463 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1464 NGBE_RXD_HDRADDR(rxdp, 0);
1465 NGBE_RXD_PKTADDR(rxdp, dma);
1471 * Set data length & data buffer address of mbuf.
1473 data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1474 rxm->data_len = data_len;
1480 next_sc_entry = &sw_sc_ring[nextp_id];
1481 next_rxe = &sw_ring[nextp_id];
1482 rte_ngbe_prefetch(next_rxe);
1485 sc_entry = &sw_sc_ring[rx_id];
1486 first_seg = sc_entry->fbuf;
1487 sc_entry->fbuf = NULL;
1490 * If this is the first buffer of the received packet,
1491 * set the pointer to the first mbuf of the packet and
1492 * initialize its context.
1493 * Otherwise, update the total length and the number of segments
1494 * of the current scattered packet, and update the pointer to
1495 * the last mbuf of the current packet.
1497 if (first_seg == NULL) {
1499 first_seg->pkt_len = data_len;
1500 first_seg->nb_segs = 1;
1502 first_seg->pkt_len += data_len;
1503 first_seg->nb_segs++;
1510 * If this is not the last buffer of the received packet, update
1511 * the pointer to the first mbuf at the NEXTP entry in the
1512 * sw_sc_ring and continue to parse the Rx ring.
1514 if (!eop && next_rxe) {
1515 rxm->next = next_rxe->mbuf;
1516 next_sc_entry->fbuf = first_seg;
1520 /* Initialize the first mbuf of the returned packet */
1521 ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1523 /* Deal with the case, when HW CRC srip is disabled. */
1524 first_seg->pkt_len -= rxq->crc_len;
1525 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1526 struct rte_mbuf *lp;
1528 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1531 first_seg->nb_segs--;
1532 lp->data_len -= rxq->crc_len - rxm->data_len;
1534 rte_pktmbuf_free_seg(rxm);
1536 rxm->data_len -= rxq->crc_len;
1539 /* Prefetch data of first segment, if configured to do so. */
1540 rte_packet_prefetch((char *)first_seg->buf_addr +
1541 first_seg->data_off);
1544 * Store the mbuf address into the next entry of the array
1545 * of returned packets.
1547 rx_pkts[nb_rx++] = first_seg;
1551 * Record index of the next Rx descriptor to probe.
1553 rxq->rx_tail = rx_id;
1556 * If the number of free Rx descriptors is greater than the Rx free
1557 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1559 * Update the RDT with the value of the last processed Rx descriptor
1560 * minus 1, to guarantee that the RDT register is never equal to the
1561 * RDH register, which creates a "full" ring situation from the
1562 * hardware point of view...
1564 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1565 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1566 "nb_hold=%u nb_rx=%u",
1567 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1570 ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1574 rxq->nb_rx_hold = nb_hold;
1579 ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1582 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
1586 ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1589 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
1592 /*********************************************************************
1594 * Queue management functions
1596 **********************************************************************/
1599 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
1603 if (txq->sw_ring != NULL) {
1604 for (i = 0; i < txq->nb_tx_desc; i++) {
1605 if (txq->sw_ring[i].mbuf != NULL) {
1606 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1607 txq->sw_ring[i].mbuf = NULL;
1614 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
1617 rte_free(txq->sw_ring);
1621 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
1624 if (txq->ops != NULL) {
1625 txq->ops->release_mbufs(txq);
1626 txq->ops->free_swring(txq);
1633 ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1635 ngbe_tx_queue_release(dev->data->tx_queues[qid]);
1638 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
1640 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
1642 static const struct ngbe_tx_desc zeroed_desc = {0};
1643 struct ngbe_tx_entry *txe = txq->sw_ring;
1646 /* Zero out HW ring memory */
1647 for (i = 0; i < txq->nb_tx_desc; i++)
1648 txq->tx_ring[i] = zeroed_desc;
1650 /* Initialize SW ring entries */
1651 prev = (uint16_t)(txq->nb_tx_desc - 1);
1652 for (i = 0; i < txq->nb_tx_desc; i++) {
1653 /* the ring can also be modified by hardware */
1654 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
1656 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
1659 txe[prev].next_id = i;
1663 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
1667 * Always allow 1 descriptor to be un-allocated to avoid
1668 * a H/W race condition
1670 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1671 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1673 memset((void *)&txq->ctx_cache, 0,
1674 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
1677 static const struct ngbe_txq_ops def_txq_ops = {
1678 .release_mbufs = ngbe_tx_queue_release_mbufs,
1679 .free_swring = ngbe_tx_free_swring,
1680 .reset = ngbe_reset_tx_queue,
1683 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1684 * the queue parameters. Used in tx_queue_setup by primary process and then
1685 * in dev_init by secondary process when attaching to an existing ethdev.
1688 ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
1690 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1691 if (txq->offloads == 0 &&
1692 txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
1693 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1694 dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
1695 dev->tx_pkt_prepare = NULL;
1697 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1699 " - offloads = 0x%" PRIx64,
1702 " - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
1703 (unsigned long)txq->tx_free_thresh,
1704 (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
1705 dev->tx_pkt_burst = ngbe_xmit_pkts;
1706 dev->tx_pkt_prepare = ngbe_prep_pkts;
1710 static const struct {
1711 eth_tx_burst_t pkt_burst;
1713 } ngbe_tx_burst_infos[] = {
1714 { ngbe_xmit_pkts_simple, "Scalar Simple"},
1715 { ngbe_xmit_pkts, "Scalar"},
1719 ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1720 struct rte_eth_burst_mode *mode)
1722 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
1726 for (i = 0; i < RTE_DIM(ngbe_tx_burst_infos); ++i) {
1727 if (pkt_burst == ngbe_tx_burst_infos[i].pkt_burst) {
1728 snprintf(mode->info, sizeof(mode->info), "%s",
1729 ngbe_tx_burst_infos[i].info);
1739 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1741 uint64_t tx_offload_capa;
1746 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1747 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1748 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1749 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1750 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1751 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1752 RTE_ETH_TX_OFFLOAD_UDP_TSO |
1753 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
1754 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
1755 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1756 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1758 return tx_offload_capa;
1762 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1765 unsigned int socket_id,
1766 const struct rte_eth_txconf *tx_conf)
1768 const struct rte_memzone *tz;
1769 struct ngbe_tx_queue *txq;
1771 uint16_t tx_free_thresh;
1774 PMD_INIT_FUNC_TRACE();
1775 hw = ngbe_dev_hw(dev);
1777 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1780 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
1781 * descriptors are used or if the number of descriptors required
1782 * to transmit a packet is greater than the number of free Tx
1784 * One descriptor in the Tx ring is used as a sentinel to avoid a
1785 * H/W race condition, hence the maximum threshold constraints.
1786 * When set to zero use default values.
1788 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1789 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1790 if (tx_free_thresh >= (nb_desc - 3)) {
1792 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
1793 (unsigned int)tx_free_thresh,
1794 (int)dev->data->port_id, (int)queue_idx);
1798 if (nb_desc % tx_free_thresh != 0) {
1800 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
1801 (unsigned int)tx_free_thresh,
1802 (int)dev->data->port_id, (int)queue_idx);
1806 /* Free memory prior to re-allocation if needed... */
1807 if (dev->data->tx_queues[queue_idx] != NULL) {
1808 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1809 dev->data->tx_queues[queue_idx] = NULL;
1812 /* First allocate the Tx queue data structure */
1813 txq = rte_zmalloc_socket("ethdev Tx queue",
1814 sizeof(struct ngbe_tx_queue),
1815 RTE_CACHE_LINE_SIZE, socket_id);
1820 * Allocate Tx ring hardware descriptors. A memzone large enough to
1821 * handle the maximum ring size is allocated in order to allow for
1822 * resizing in later calls to the queue setup function.
1824 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1825 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
1826 NGBE_ALIGN, socket_id);
1828 ngbe_tx_queue_release(txq);
1832 txq->nb_tx_desc = nb_desc;
1833 txq->tx_free_thresh = tx_free_thresh;
1834 txq->pthresh = tx_conf->tx_thresh.pthresh;
1835 txq->hthresh = tx_conf->tx_thresh.hthresh;
1836 txq->wthresh = tx_conf->tx_thresh.wthresh;
1837 txq->queue_id = queue_idx;
1838 txq->reg_idx = queue_idx;
1839 txq->port_id = dev->data->port_id;
1840 txq->offloads = offloads;
1841 txq->ops = &def_txq_ops;
1842 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1844 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
1845 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
1847 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
1848 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
1850 /* Allocate software ring */
1851 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1852 sizeof(struct ngbe_tx_entry) * nb_desc,
1853 RTE_CACHE_LINE_SIZE, socket_id);
1854 if (txq->sw_ring == NULL) {
1855 ngbe_tx_queue_release(txq);
1859 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1860 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1862 /* set up scalar Tx function as appropriate */
1863 ngbe_set_tx_function(dev, txq);
1865 txq->ops->reset(txq);
1867 dev->data->tx_queues[queue_idx] = txq;
1873 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
1875 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
1876 * in the sw_sc_ring is not set to NULL but rather points to the next
1877 * mbuf of this RSC aggregation (that has not been completed yet and still
1878 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
1879 * will just free first "nb_segs" segments of the cluster explicitly by calling
1880 * an rte_pktmbuf_free_seg().
1882 * @m scattered cluster head
1885 ngbe_free_sc_cluster(struct rte_mbuf *m)
1887 uint16_t i, nb_segs = m->nb_segs;
1888 struct rte_mbuf *next_seg;
1890 for (i = 0; i < nb_segs; i++) {
1892 rte_pktmbuf_free_seg(m);
1898 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
1902 if (rxq->sw_ring != NULL) {
1903 for (i = 0; i < rxq->nb_rx_desc; i++) {
1904 if (rxq->sw_ring[i].mbuf != NULL) {
1905 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1906 rxq->sw_ring[i].mbuf = NULL;
1909 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1910 struct rte_mbuf *mb;
1912 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1913 rte_pktmbuf_free_seg(mb);
1915 rxq->rx_nb_avail = 0;
1918 if (rxq->sw_sc_ring != NULL)
1919 for (i = 0; i < rxq->nb_rx_desc; i++)
1920 if (rxq->sw_sc_ring[i].fbuf != NULL) {
1921 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
1922 rxq->sw_sc_ring[i].fbuf = NULL;
1927 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
1930 ngbe_rx_queue_release_mbufs(rxq);
1931 rte_free(rxq->sw_ring);
1932 rte_free(rxq->sw_sc_ring);
1938 ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1940 ngbe_rx_queue_release(dev->data->rx_queues[qid]);
1944 * Check if Rx Burst Bulk Alloc function can be used.
1946 * 0: the preconditions are satisfied and the bulk allocation function
1948 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1949 * function must be used.
1952 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
1957 * Make sure the following pre-conditions are satisfied:
1958 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
1959 * rxq->rx_free_thresh < rxq->nb_rx_desc
1960 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
1961 * Scattered packets are not supported. This should be checked
1962 * outside of this function.
1964 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
1966 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
1967 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
1969 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
1971 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
1972 rxq->rx_free_thresh, rxq->nb_rx_desc);
1974 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
1976 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
1977 rxq->nb_rx_desc, rxq->rx_free_thresh);
1984 /* Reset dynamic ngbe_rx_queue fields back to defaults */
1986 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
1988 static const struct ngbe_rx_desc zeroed_desc = {
1989 {{0}, {0} }, {{0}, {0} } };
1991 uint16_t len = rxq->nb_rx_desc;
1994 * By default, the Rx queue setup function allocates enough memory for
1995 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
1996 * extra memory at the end of the descriptor ring to be zero'd out.
1998 if (adapter->rx_bulk_alloc_allowed)
1999 /* zero out extra memory */
2000 len += RTE_PMD_NGBE_RX_MAX_BURST;
2003 * Zero out HW ring memory. Zero out extra memory at the end of
2004 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2005 * reads extra memory as zeros.
2007 for (i = 0; i < len; i++)
2008 rxq->rx_ring[i] = zeroed_desc;
2011 * initialize extra software ring entries. Space for these extra
2012 * entries is always allocated
2014 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2015 for (i = rxq->nb_rx_desc; i < len; ++i)
2016 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2018 rxq->rx_nb_avail = 0;
2019 rxq->rx_next_avail = 0;
2020 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2022 rxq->nb_rx_hold = 0;
2023 rxq->pkt_first_seg = NULL;
2024 rxq->pkt_last_seg = NULL;
2028 ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
2032 offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
2033 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2034 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2035 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
2036 RTE_ETH_RX_OFFLOAD_SCATTER;
2042 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2045 unsigned int socket_id,
2046 const struct rte_eth_rxconf *rx_conf,
2047 struct rte_mempool *mp)
2049 const struct rte_memzone *rz;
2050 struct ngbe_rx_queue *rxq;
2053 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2055 PMD_INIT_FUNC_TRACE();
2056 hw = ngbe_dev_hw(dev);
2058 /* Free memory prior to re-allocation if needed... */
2059 if (dev->data->rx_queues[queue_idx] != NULL) {
2060 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2061 dev->data->rx_queues[queue_idx] = NULL;
2064 /* First allocate the Rx queue data structure */
2065 rxq = rte_zmalloc_socket("ethdev RX queue",
2066 sizeof(struct ngbe_rx_queue),
2067 RTE_CACHE_LINE_SIZE, socket_id);
2071 rxq->nb_rx_desc = nb_desc;
2072 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2073 rxq->queue_id = queue_idx;
2074 rxq->reg_idx = queue_idx;
2075 rxq->port_id = dev->data->port_id;
2076 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2077 rxq->crc_len = RTE_ETHER_CRC_LEN;
2080 rxq->drop_en = rx_conf->rx_drop_en;
2081 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2084 * Allocate Rx ring hardware descriptors. A memzone large enough to
2085 * handle the maximum ring size is allocated in order to allow for
2086 * resizing in later calls to the queue setup function.
2088 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2089 RX_RING_SZ, NGBE_ALIGN, socket_id);
2091 ngbe_rx_queue_release(rxq);
2096 * Zero init all the descriptors in the ring.
2098 memset(rz->addr, 0, RX_RING_SZ);
2100 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
2101 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
2103 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2104 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
2107 * Certain constraints must be met in order to use the bulk buffer
2108 * allocation Rx burst function. If any of Rx queues doesn't meet them
2109 * the feature should be disabled for the whole port.
2111 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2113 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
2114 rxq->queue_id, rxq->port_id);
2115 adapter->rx_bulk_alloc_allowed = false;
2119 * Allocate software ring. Allow for space at the end of the
2120 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2121 * function does not access an invalid memory region.
2124 if (adapter->rx_bulk_alloc_allowed)
2125 len += RTE_PMD_NGBE_RX_MAX_BURST;
2127 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2128 sizeof(struct ngbe_rx_entry) * len,
2129 RTE_CACHE_LINE_SIZE, socket_id);
2130 if (rxq->sw_ring == NULL) {
2131 ngbe_rx_queue_release(rxq);
2136 * Always allocate even if it's not going to be needed in order to
2137 * simplify the code.
2139 * This ring is used in Scattered Rx cases and Scattered Rx may
2140 * be requested in ngbe_dev_rx_init(), which is called later from
2144 rte_zmalloc_socket("rxq->sw_sc_ring",
2145 sizeof(struct ngbe_scattered_rx_entry) * len,
2146 RTE_CACHE_LINE_SIZE, socket_id);
2147 if (rxq->sw_sc_ring == NULL) {
2148 ngbe_rx_queue_release(rxq);
2153 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2154 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2155 rxq->rx_ring_phys_addr);
2157 dev->data->rx_queues[queue_idx] = rxq;
2159 ngbe_reset_rx_queue(adapter, rxq);
2165 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
2168 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2170 PMD_INIT_FUNC_TRACE();
2172 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2173 struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
2176 txq->ops->release_mbufs(txq);
2177 txq->ops->reset(txq);
2181 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2182 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
2185 ngbe_rx_queue_release_mbufs(rxq);
2186 ngbe_reset_rx_queue(adapter, rxq);
2192 ngbe_dev_free_queues(struct rte_eth_dev *dev)
2196 PMD_INIT_FUNC_TRACE();
2198 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2199 ngbe_dev_rx_queue_release(dev, i);
2200 dev->data->rx_queues[i] = NULL;
2202 dev->data->nb_rx_queues = 0;
2204 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2205 ngbe_dev_tx_queue_release(dev, i);
2206 dev->data->tx_queues[i] = NULL;
2208 dev->data->nb_tx_queues = 0;
2212 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
2214 struct ngbe_rx_entry *rxe = rxq->sw_ring;
2218 /* Initialize software ring entries */
2219 for (i = 0; i < rxq->nb_rx_desc; i++) {
2220 /* the ring can also be modified by hardware */
2221 volatile struct ngbe_rx_desc *rxd;
2222 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2225 PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
2226 (unsigned int)rxq->queue_id,
2227 (unsigned int)rxq->port_id);
2231 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2232 mbuf->port = rxq->port_id;
2235 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2236 rxd = &rxq->rx_ring[i];
2237 NGBE_RXD_HDRADDR(rxd, 0);
2238 NGBE_RXD_PKTADDR(rxd, dma_addr);
2246 ngbe_set_rx_function(struct rte_eth_dev *dev)
2248 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2250 if (dev->data->scattered_rx) {
2252 * Set the scattered callback: there are bulk and
2253 * single allocation versions.
2255 if (adapter->rx_bulk_alloc_allowed) {
2256 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
2257 "allocation callback (port=%d).",
2258 dev->data->port_id);
2259 dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
2261 PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
2262 "single allocation) "
2263 "Scattered Rx callback "
2265 dev->data->port_id);
2267 dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
2270 * Below we set "simple" callbacks according to port/queues parameters.
2271 * If parameters allow we are going to choose between the following
2274 * - Single buffer allocation (the simplest one)
2276 } else if (adapter->rx_bulk_alloc_allowed) {
2277 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2278 "satisfied. Rx Burst Bulk Alloc function "
2279 "will be used on port=%d.",
2280 dev->data->port_id);
2282 dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
2284 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
2285 "satisfied, or Scattered Rx is requested "
2287 dev->data->port_id);
2289 dev->rx_pkt_burst = ngbe_recv_pkts;
2293 static const struct {
2294 eth_rx_burst_t pkt_burst;
2296 } ngbe_rx_burst_infos[] = {
2297 { ngbe_recv_pkts_sc_single_alloc, "Scalar Scattered"},
2298 { ngbe_recv_pkts_sc_bulk_alloc, "Scalar Scattered Bulk Alloc"},
2299 { ngbe_recv_pkts_bulk_alloc, "Scalar Bulk Alloc"},
2300 { ngbe_recv_pkts, "Scalar"},
2304 ngbe_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2305 struct rte_eth_burst_mode *mode)
2307 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2311 for (i = 0; i < RTE_DIM(ngbe_rx_burst_infos); ++i) {
2312 if (pkt_burst == ngbe_rx_burst_infos[i].pkt_burst) {
2313 snprintf(mode->info, sizeof(mode->info), "%s",
2314 ngbe_rx_burst_infos[i].info);
2324 * Initializes Receive Unit.
2327 ngbe_dev_rx_init(struct rte_eth_dev *dev)
2330 struct ngbe_rx_queue *rxq;
2339 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2341 PMD_INIT_FUNC_TRACE();
2342 hw = ngbe_dev_hw(dev);
2345 * Make sure receives are disabled while setting
2346 * up the Rx context (registers, descriptor rings, etc.).
2348 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
2349 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
2351 /* Enable receipt of broadcasted frames */
2352 fctrl = rd32(hw, NGBE_PSRCTL);
2353 fctrl |= NGBE_PSRCTL_BCA;
2354 wr32(hw, NGBE_PSRCTL, fctrl);
2357 * Configure CRC stripping, if any.
2359 hlreg0 = rd32(hw, NGBE_SECRXCTL);
2360 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2361 hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
2363 hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
2364 hlreg0 &= ~NGBE_SECRXCTL_XDSA;
2365 wr32(hw, NGBE_SECRXCTL, hlreg0);
2368 * Configure jumbo frame support, if any.
2370 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2371 NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
2373 /* Setup Rx queues */
2374 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2375 rxq = dev->data->rx_queues[i];
2378 * Reset crc_len in case it was changed after queue setup by a
2379 * call to configure.
2381 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2382 rxq->crc_len = RTE_ETHER_CRC_LEN;
2386 /* Setup the Base and Length of the Rx Descriptor Rings */
2387 bus_addr = rxq->rx_ring_phys_addr;
2388 wr32(hw, NGBE_RXBAL(rxq->reg_idx),
2389 (uint32_t)(bus_addr & BIT_MASK32));
2390 wr32(hw, NGBE_RXBAH(rxq->reg_idx),
2391 (uint32_t)(bus_addr >> 32));
2392 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
2393 wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
2395 srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
2397 /* Set if packets are dropped when no descriptors available */
2399 srrctl |= NGBE_RXCFG_DROP;
2402 * Configure the Rx buffer size in the PKTLEN field of
2403 * the RXCFG register of the queue.
2404 * The value is in 1 KB resolution. Valid values can be from
2407 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2408 RTE_PKTMBUF_HEADROOM);
2409 buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
2410 srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
2412 wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
2415 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
2416 dev->data->scattered_rx = 1;
2418 * Setup the Checksum Register.
2419 * Enable IP/L4 checksum computation by hardware if requested to do so.
2421 rxcsum = rd32(hw, NGBE_PSRCTL);
2422 rxcsum |= NGBE_PSRCTL_PCSD;
2423 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
2424 rxcsum |= NGBE_PSRCTL_L4CSUM;
2426 rxcsum &= ~NGBE_PSRCTL_L4CSUM;
2428 wr32(hw, NGBE_PSRCTL, rxcsum);
2431 rdrxctl = rd32(hw, NGBE_SECRXCTL);
2432 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2433 rdrxctl &= ~NGBE_SECRXCTL_CRCSTRIP;
2435 rdrxctl |= NGBE_SECRXCTL_CRCSTRIP;
2436 wr32(hw, NGBE_SECRXCTL, rdrxctl);
2439 ngbe_set_rx_function(dev);
2445 * Initializes Transmit Unit.
2448 ngbe_dev_tx_init(struct rte_eth_dev *dev)
2451 struct ngbe_tx_queue *txq;
2455 PMD_INIT_FUNC_TRACE();
2456 hw = ngbe_dev_hw(dev);
2458 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
2459 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
2461 /* Setup the Base and Length of the Tx Descriptor Rings */
2462 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2463 txq = dev->data->tx_queues[i];
2465 bus_addr = txq->tx_ring_phys_addr;
2466 wr32(hw, NGBE_TXBAL(txq->reg_idx),
2467 (uint32_t)(bus_addr & BIT_MASK32));
2468 wr32(hw, NGBE_TXBAH(txq->reg_idx),
2469 (uint32_t)(bus_addr >> 32));
2470 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
2471 NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
2472 /* Setup the HW Tx Head and TX Tail descriptor pointers */
2473 wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
2474 wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
2479 * Start Transmit and Receive Units.
2482 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
2485 struct ngbe_tx_queue *txq;
2486 struct ngbe_rx_queue *rxq;
2492 PMD_INIT_FUNC_TRACE();
2493 hw = ngbe_dev_hw(dev);
2495 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2496 txq = dev->data->tx_queues[i];
2497 /* Setup Transmit Threshold Registers */
2498 wr32m(hw, NGBE_TXCFG(txq->reg_idx),
2499 NGBE_TXCFG_HTHRESH_MASK |
2500 NGBE_TXCFG_WTHRESH_MASK,
2501 NGBE_TXCFG_HTHRESH(txq->hthresh) |
2502 NGBE_TXCFG_WTHRESH(txq->wthresh));
2505 dmatxctl = rd32(hw, NGBE_DMATXCTRL);
2506 dmatxctl |= NGBE_DMATXCTRL_ENA;
2507 wr32(hw, NGBE_DMATXCTRL, dmatxctl);
2509 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2510 txq = dev->data->tx_queues[i];
2511 if (txq->tx_deferred_start == 0) {
2512 ret = ngbe_dev_tx_queue_start(dev, i);
2518 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2519 rxq = dev->data->rx_queues[i];
2520 if (rxq->rx_deferred_start == 0) {
2521 ret = ngbe_dev_rx_queue_start(dev, i);
2527 /* Enable Receive engine */
2528 rxctrl = rd32(hw, NGBE_PBRXCTL);
2529 rxctrl |= NGBE_PBRXCTL_ENA;
2530 hw->mac.enable_rx_dma(hw, rxctrl);
2536 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
2538 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2539 *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
2540 *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
2541 *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
2545 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
2547 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2548 wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
2549 wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
2550 wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
2554 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
2556 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2557 *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
2558 *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
2559 *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
2563 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
2565 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2566 wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
2567 wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
2568 wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
2572 * Start Receive Units for specified queue.
2575 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2577 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2578 struct ngbe_rx_queue *rxq;
2582 PMD_INIT_FUNC_TRACE();
2584 rxq = dev->data->rx_queues[rx_queue_id];
2586 /* Allocate buffers for descriptor rings */
2587 if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
2588 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
2592 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2593 rxdctl |= NGBE_RXCFG_ENA;
2594 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
2596 /* Wait until Rx Enable ready */
2597 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2600 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2601 } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
2603 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
2605 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
2606 wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
2607 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2613 * Stop Receive Units for specified queue.
2616 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2618 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2619 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2620 struct ngbe_rx_queue *rxq;
2624 PMD_INIT_FUNC_TRACE();
2626 rxq = dev->data->rx_queues[rx_queue_id];
2628 ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
2629 wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
2631 /* Wait until Rx Enable bit clear */
2632 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2635 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
2636 } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
2638 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
2640 rte_delay_us(RTE_NGBE_WAIT_100_US);
2641 ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
2643 ngbe_rx_queue_release_mbufs(rxq);
2644 ngbe_reset_rx_queue(adapter, rxq);
2645 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
2651 * Start Transmit Units for specified queue.
2654 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2656 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2657 struct ngbe_tx_queue *txq;
2661 PMD_INIT_FUNC_TRACE();
2663 txq = dev->data->tx_queues[tx_queue_id];
2664 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
2666 /* Wait until Tx Enable ready */
2667 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2670 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
2671 } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
2673 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
2677 wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
2678 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2684 * Stop Transmit Units for specified queue.
2687 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2689 struct ngbe_hw *hw = ngbe_dev_hw(dev);
2690 struct ngbe_tx_queue *txq;
2692 uint32_t txtdh, txtdt;
2695 PMD_INIT_FUNC_TRACE();
2697 txq = dev->data->tx_queues[tx_queue_id];
2699 /* Wait until Tx queue is empty */
2700 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2702 rte_delay_us(RTE_NGBE_WAIT_100_US);
2703 txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
2704 txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
2705 } while (--poll_ms && (txtdh != txtdt));
2707 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
2710 ngbe_dev_save_tx_queue(hw, txq->reg_idx);
2711 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
2713 /* Wait until Tx Enable bit clear */
2714 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
2717 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
2718 } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
2720 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
2723 rte_delay_us(RTE_NGBE_WAIT_100_US);
2724 ngbe_dev_store_tx_queue(hw, txq->reg_idx);
2726 if (txq->ops != NULL) {
2727 txq->ops->release_mbufs(txq);
2728 txq->ops->reset(txq);
2730 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;