1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2014-2021 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
9 * vim:shiftwidth=8:noexpandtab
11 * @file dpdk/pmd/nfp_rxtx.c
13 * Netronome vNIC DPDK Poll-Mode Driver: Rx/Tx functions
16 #include <ethdev_driver.h>
17 #include <ethdev_pci.h>
19 #include "nfp_common.h"
25 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
26 static inline void nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq);
27 static inline void nfp_net_set_hash(struct nfp_net_rxq *rxq,
28 struct nfp_net_rx_desc *rxd,
29 struct rte_mbuf *mbuf);
30 static inline void nfp_net_rx_cksum(struct nfp_net_rxq *rxq,
31 struct nfp_net_rx_desc *rxd,
33 static void nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq);
34 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
35 static void nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq);
36 static inline uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq);
37 static inline uint32_t nfp_net_txq_full(struct nfp_net_txq *txq);
38 static inline void nfp_net_tx_tso(struct nfp_net_txq *txq,
39 struct nfp_net_tx_desc *txd,
41 static inline void nfp_net_tx_cksum(struct nfp_net_txq *txq,
42 struct nfp_net_tx_desc *txd,
46 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
48 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
52 PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %u descriptors",
55 for (i = 0; i < rxq->rx_count; i++) {
56 struct nfp_net_rx_desc *rxd;
57 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
60 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
61 (unsigned int)rxq->qidx);
65 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
69 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
70 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
72 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
75 /* Make sure all writes are flushed before telling the hardware */
78 /* Not advertising the whole ring as the firmware gets confused if so */
79 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
82 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
88 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
92 for (i = 0; i < dev->data->nb_rx_queues; i++) {
93 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
100 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
102 struct nfp_net_rxq *rxq;
103 struct nfp_net_rx_desc *rxds;
107 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
114 * Other PMDs are just checking the DD bit in intervals of 4
115 * descriptors and counting all four if the first has the DD
116 * bit on. Of course, this is not accurate but can be good for
117 * performance. But ideally that should be done in descriptors
118 * chunks belonging to the same cache line
121 while (count < rxq->rx_count) {
122 rxds = &rxq->rxds[idx];
123 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
130 if ((idx) == rxq->rx_count)
138 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
140 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
144 * nfp_net_set_hash - Set mbuf hash data
146 * The RSS hash and hash-type are pre-pended to the packet data.
147 * Extract and decode it and set the mbuf fields.
150 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
151 struct rte_mbuf *mbuf)
153 struct nfp_net_hw *hw = rxq->hw;
154 uint8_t *meta_offset;
157 uint32_t hash_type = 0;
159 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
162 /* this is true for new firmwares */
163 if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
164 (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
165 NFP_DESC_META_LEN(rxd))) {
168 * <---- 32 bit ----->
173 * ====================
176 * Field type word contains up to 8 4bit field types
177 * A 4bit field type refers to a data field word
178 * A data field word can have several 4bit field types
180 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
181 meta_offset -= NFP_DESC_META_LEN(rxd);
182 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
184 /* NFP PMD just supports metadata for hashing */
185 switch (meta_info & NFP_NET_META_FIELD_MASK) {
186 case NFP_NET_META_HASH:
187 /* next field type is about the hash type */
188 meta_info >>= NFP_NET_META_FIELD_SIZE;
189 /* hash value is in the data field */
190 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
191 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
194 /* Unsupported metadata can be a performance issue */
198 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
201 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
202 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
205 mbuf->hash.rss = hash;
206 mbuf->ol_flags |= PKT_RX_RSS_HASH;
209 case NFP_NET_RSS_IPV4:
210 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
212 case NFP_NET_RSS_IPV6:
213 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
215 case NFP_NET_RSS_IPV6_EX:
216 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
218 case NFP_NET_RSS_IPV4_TCP:
219 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
221 case NFP_NET_RSS_IPV6_TCP:
222 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
224 case NFP_NET_RSS_IPV4_UDP:
225 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
227 case NFP_NET_RSS_IPV6_UDP:
228 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
231 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
235 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
237 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
240 struct nfp_net_hw *hw = rxq->hw;
242 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
245 /* If IPv4 and IP checksum error, fail */
246 if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
247 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
248 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
250 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
252 /* If neither UDP nor TCP return */
253 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
254 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
257 if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
258 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
260 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
266 * There are some decisions to take:
267 * 1) How to check DD RX descriptors bit
268 * 2) How and when to allocate new mbufs
270 * Current implementation checks just one single DD bit each loop. As each
271 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
272 * a single cache line instead. Tests with this change have not shown any
273 * performance improvement but it requires further investigation. For example,
274 * depending on which descriptor is next, the number of descriptors could be
275 * less than 8 for just checking those in the same cache line. This implies
276 * extra work which could be counterproductive by itself. Indeed, last firmware
277 * changes are just doing this: writing several descriptors with the DD bit
278 * for saving PCIe bandwidth and DMA operations from the NFP.
280 * Mbuf allocation is done when a new packet is received. Then the descriptor
281 * is automatically linked with the new mbuf and the old one is given to the
282 * user. The main drawback with this design is mbuf allocation is heavier than
283 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
284 * cache point of view it does not seem allocating the mbuf early on as we are
285 * doing now have any benefit at all. Again, tests with this change have not
286 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
287 * so looking at the implications of this type of allocation should be studied
292 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
294 struct nfp_net_rxq *rxq;
295 struct nfp_net_rx_desc *rxds;
296 struct nfp_net_rx_buff *rxb;
297 struct nfp_net_hw *hw;
299 struct rte_mbuf *new_mb;
305 if (unlikely(rxq == NULL)) {
307 * DPDK just checks the queue is lower than max queues
308 * enabled. But the queue needs to be configured
310 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
318 while (avail < nb_pkts) {
319 rxb = &rxq->rxbufs[rxq->rd_p];
320 if (unlikely(rxb == NULL)) {
321 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
325 rxds = &rxq->rxds[rxq->rd_p];
326 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
330 * Memory barrier to ensure that we won't do other
331 * reads before the DD bit.
336 * We got a packet. Let's alloc a new mbuf for refilling the
337 * free descriptor ring as soon as possible
339 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
340 if (unlikely(new_mb == NULL)) {
341 RTE_LOG_DP(DEBUG, PMD,
342 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
343 rxq->port_id, (unsigned int)rxq->qidx);
344 nfp_net_mbuf_alloc_failed(rxq);
351 * Grab the mbuf and refill the descriptor with the
352 * previously allocated mbuf
357 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
358 rxds->rxd.data_len, rxq->mbuf_size);
360 /* Size of this segment */
361 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
362 /* Size of the whole packet. We just support 1 segment */
363 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
365 if (unlikely((mb->data_len + hw->rx_offset) >
368 * This should not happen and the user has the
369 * responsibility of avoiding it. But we have
370 * to give some info about the error
373 "mbuf overflow likely due to the RX offset.\n"
374 "\t\tYour mbuf size should have extra space for"
375 " RX offset=%u bytes.\n"
376 "\t\tCurrently you just have %u bytes available"
377 " but the received packet is %u bytes long",
379 rxq->mbuf_size - hw->rx_offset,
384 /* Filling the received mbuf with packet info */
386 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
388 mb->data_off = RTE_PKTMBUF_HEADROOM +
389 NFP_DESC_META_LEN(rxds);
391 /* No scatter mode supported */
395 mb->port = rxq->port_id;
397 /* Checking the RSS flag */
398 nfp_net_set_hash(rxq, rxds, mb);
400 /* Checking the checksum flag */
401 nfp_net_rx_cksum(rxq, rxds, mb);
403 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
404 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
405 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
406 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
409 /* Adding the mbuf to the mbuf array passed by the app */
410 rx_pkts[avail++] = mb;
412 /* Now resetting and updating the descriptor */
415 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
417 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
418 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
421 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
428 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
429 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
431 nb_hold += rxq->nb_rx_hold;
434 * FL descriptors needs to be written before incrementing the
435 * FL queue WR pointer
438 if (nb_hold > rxq->rx_free_thresh) {
439 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
440 rxq->port_id, (unsigned int)rxq->qidx,
441 (unsigned int)nb_hold, (unsigned int)avail);
442 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
445 rxq->nb_rx_hold = nb_hold;
451 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
455 if (rxq->rxbufs == NULL)
458 for (i = 0; i < rxq->rx_count; i++) {
459 if (rxq->rxbufs[i].mbuf) {
460 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
461 rxq->rxbufs[i].mbuf = NULL;
467 nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
469 struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
472 nfp_net_rx_queue_release_mbufs(rxq);
473 rte_free(rxq->rxbufs);
479 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
481 nfp_net_rx_queue_release_mbufs(rxq);
487 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
488 uint16_t queue_idx, uint16_t nb_desc,
489 unsigned int socket_id,
490 const struct rte_eth_rxconf *rx_conf,
491 struct rte_mempool *mp)
493 const struct rte_memzone *tz;
494 struct nfp_net_rxq *rxq;
495 struct nfp_net_hw *hw;
498 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
500 PMD_INIT_FUNC_TRACE();
502 /* Validating number of descriptors */
503 rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
504 if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
505 nb_desc > NFP_NET_MAX_RX_DESC ||
506 nb_desc < NFP_NET_MIN_RX_DESC) {
507 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
512 * Free memory prior to re-allocation if needed. This is the case after
513 * calling nfp_net_stop
515 if (dev->data->rx_queues[queue_idx]) {
516 nfp_net_rx_queue_release(dev, queue_idx);
517 dev->data->rx_queues[queue_idx] = NULL;
520 /* Allocating rx queue data structure */
521 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
522 RTE_CACHE_LINE_SIZE, socket_id);
526 dev->data->rx_queues[queue_idx] = rxq;
528 /* Hw queues mapping based on firmware configuration */
529 rxq->qidx = queue_idx;
530 rxq->fl_qcidx = queue_idx * hw->stride_rx;
531 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
532 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
533 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
536 * Tracking mbuf size for detecting a potential mbuf overflow due to
540 rxq->mbuf_size = rxq->mem_pool->elt_size;
541 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
542 hw->flbufsz = rxq->mbuf_size;
544 rxq->rx_count = nb_desc;
545 rxq->port_id = dev->data->port_id;
546 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
547 rxq->drop_en = rx_conf->rx_drop_en;
550 * Allocate RX ring hardware descriptors. A memzone large enough to
551 * handle the maximum ring size is allocated in order to allow for
552 * resizing in later calls to the queue setup function.
554 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
555 sizeof(struct nfp_net_rx_desc) *
556 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
560 PMD_DRV_LOG(ERR, "Error allocating rx dma");
561 nfp_net_rx_queue_release(dev, queue_idx);
562 dev->data->rx_queues[queue_idx] = NULL;
566 /* Saving physical and virtual addresses for the RX ring */
567 rxq->dma = (uint64_t)tz->iova;
568 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
570 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
571 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
572 sizeof(*rxq->rxbufs) * nb_desc,
573 RTE_CACHE_LINE_SIZE, socket_id);
574 if (rxq->rxbufs == NULL) {
575 nfp_net_rx_queue_release(dev, queue_idx);
576 dev->data->rx_queues[queue_idx] = NULL;
580 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
581 rxq->rxbufs, rxq->rxds, (unsigned long)rxq->dma);
583 nfp_net_reset_rx_queue(rxq);
588 * Telling the HW about the physical address of the RX ring and number
589 * of descriptors in log2 format
591 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
592 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
598 * nfp_net_tx_free_bufs - Check for descriptors with a complete
600 * @txq: TX queue to work with
601 * Returns number of descriptors freed
604 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
609 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
610 " status", txq->qidx);
612 /* Work out how many packets have been sent */
613 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
615 if (qcp_rd_p == txq->rd_p) {
616 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
617 "packets (%u, %u)", txq->qidx,
618 qcp_rd_p, txq->rd_p);
622 if (qcp_rd_p > txq->rd_p)
623 todo = qcp_rd_p - txq->rd_p;
625 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
627 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
628 qcp_rd_p, txq->rd_p, txq->rd_p);
634 if (unlikely(txq->rd_p >= txq->tx_count))
635 txq->rd_p -= txq->tx_count;
641 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
645 if (txq->txbufs == NULL)
648 for (i = 0; i < txq->tx_count; i++) {
649 if (txq->txbufs[i].mbuf) {
650 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
651 txq->txbufs[i].mbuf = NULL;
657 nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
659 struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
662 nfp_net_tx_queue_release_mbufs(txq);
663 rte_free(txq->txbufs);
669 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
671 nfp_net_tx_queue_release_mbufs(txq);
677 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
678 uint16_t nb_desc, unsigned int socket_id,
679 const struct rte_eth_txconf *tx_conf)
681 const struct rte_memzone *tz;
682 struct nfp_net_txq *txq;
683 uint16_t tx_free_thresh;
684 struct nfp_net_hw *hw;
687 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689 PMD_INIT_FUNC_TRACE();
691 /* Validating number of descriptors */
692 tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
693 if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
694 nb_desc > NFP_NET_MAX_TX_DESC ||
695 nb_desc < NFP_NET_MIN_TX_DESC) {
696 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
700 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
701 tx_conf->tx_free_thresh :
702 DEFAULT_TX_FREE_THRESH);
704 if (tx_free_thresh > (nb_desc)) {
706 "tx_free_thresh must be less than the number of TX "
707 "descriptors. (tx_free_thresh=%u port=%d "
708 "queue=%d)", (unsigned int)tx_free_thresh,
709 dev->data->port_id, (int)queue_idx);
714 * Free memory prior to re-allocation if needed. This is the case after
715 * calling nfp_net_stop
717 if (dev->data->tx_queues[queue_idx]) {
718 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
720 nfp_net_tx_queue_release(dev, queue_idx);
721 dev->data->tx_queues[queue_idx] = NULL;
724 /* Allocating tx queue data structure */
725 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
726 RTE_CACHE_LINE_SIZE, socket_id);
728 PMD_DRV_LOG(ERR, "Error allocating tx dma");
732 dev->data->tx_queues[queue_idx] = txq;
735 * Allocate TX ring hardware descriptors. A memzone large enough to
736 * handle the maximum ring size is allocated in order to allow for
737 * resizing in later calls to the queue setup function.
739 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
740 sizeof(struct nfp_net_tx_desc) *
741 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
744 PMD_DRV_LOG(ERR, "Error allocating tx dma");
745 nfp_net_tx_queue_release(dev, queue_idx);
746 dev->data->tx_queues[queue_idx] = NULL;
750 txq->tx_count = nb_desc;
751 txq->tx_free_thresh = tx_free_thresh;
752 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
753 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
754 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
756 /* queue mapping based on firmware configuration */
757 txq->qidx = queue_idx;
758 txq->tx_qcidx = queue_idx * hw->stride_tx;
759 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
761 txq->port_id = dev->data->port_id;
763 /* Saving physical and virtual addresses for the TX ring */
764 txq->dma = (uint64_t)tz->iova;
765 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
767 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
768 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
769 sizeof(*txq->txbufs) * nb_desc,
770 RTE_CACHE_LINE_SIZE, socket_id);
771 if (txq->txbufs == NULL) {
772 nfp_net_tx_queue_release(dev, queue_idx);
773 dev->data->tx_queues[queue_idx] = NULL;
776 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
777 txq->txbufs, txq->txds, (unsigned long)txq->dma);
779 nfp_net_reset_tx_queue(txq);
784 * Telling the HW about the physical address of the TX ring and number
785 * of descriptors in log2 format
787 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
788 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
793 /* Leaving always free descriptors for avoiding wrapping confusion */
795 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
797 if (txq->wr_p >= txq->rd_p)
798 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
800 return txq->rd_p - txq->wr_p - 8;
804 * nfp_net_txq_full - Check if the TX queue free descriptors
805 * is below tx_free_threshold
807 * @txq: TX queue to check
809 * This function uses the host copy* of read/write pointers
812 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
814 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
817 /* nfp_net_tx_tso - Set TX descriptor for TSO */
819 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
823 struct nfp_net_hw *hw = txq->hw;
825 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
828 ol_flags = mb->ol_flags;
830 if (!(ol_flags & PKT_TX_TCP_SEG))
833 txd->l3_offset = mb->l2_len;
834 txd->l4_offset = mb->l2_len + mb->l3_len;
835 txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
836 txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
837 txd->flags = PCIE_DESC_TX_LSO;
848 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
850 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
854 struct nfp_net_hw *hw = txq->hw;
856 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
859 ol_flags = mb->ol_flags;
861 /* IPv6 does not need checksum */
862 if (ol_flags & PKT_TX_IP_CKSUM)
863 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
865 switch (ol_flags & PKT_TX_L4_MASK) {
866 case PKT_TX_UDP_CKSUM:
867 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
869 case PKT_TX_TCP_CKSUM:
870 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
874 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
875 txd->flags |= PCIE_DESC_TX_CSUM;
879 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
881 struct nfp_net_txq *txq;
882 struct nfp_net_hw *hw;
883 struct nfp_net_tx_desc *txds, txd;
884 struct rte_mbuf *pkt;
886 int pkt_size, dma_size;
887 uint16_t free_descs, issued_descs;
888 struct rte_mbuf **lmbuf;
893 txds = &txq->txds[txq->wr_p];
895 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
896 txq->qidx, txq->wr_p, nb_pkts);
898 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
899 nfp_net_tx_free_bufs(txq);
901 free_descs = (uint16_t)nfp_free_tx_desc(txq);
902 if (unlikely(free_descs == 0))
909 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
911 /* Sending packets */
912 while ((i < nb_pkts) && free_descs) {
913 /* Grabbing the mbuf linked to the current descriptor */
914 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
915 /* Warming the cache for releasing the mbuf later on */
916 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
918 pkt = *(tx_pkts + i);
920 if (unlikely(pkt->nb_segs > 1 &&
921 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
922 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
923 rte_panic("Multisegment packet unsupported\n");
926 /* Checking if we have enough descriptors */
927 if (unlikely(pkt->nb_segs > free_descs))
931 * Checksum and VLAN flags just in the first descriptor for a
932 * multisegment packet, but TSO info needs to be in all of them.
934 txd.data_len = pkt->pkt_len;
935 nfp_net_tx_tso(txq, &txd, pkt);
936 nfp_net_tx_cksum(txq, &txd, pkt);
938 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
939 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
940 txd.flags |= PCIE_DESC_TX_VLAN;
941 txd.vlan = pkt->vlan_tci;
945 * mbuf data_len is the data in one segment and pkt_len data
946 * in the whole packet. When the packet is just one segment,
947 * then data_len = pkt_len
949 pkt_size = pkt->pkt_len;
952 /* Copying TSO, VLAN and cksum info */
955 /* Releasing mbuf used by this descriptor previously*/
957 rte_pktmbuf_free_seg(*lmbuf);
960 * Linking mbuf with descriptor for being released
961 * next time descriptor is used
965 dma_size = pkt->data_len;
966 dma_addr = rte_mbuf_data_iova(pkt);
967 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
968 "%" PRIx64 "", dma_addr);
970 /* Filling descriptors fields */
971 txds->dma_len = dma_size;
972 txds->data_len = txd.data_len;
973 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
974 txds->dma_addr_lo = (dma_addr & 0xffffffff);
975 ASSERT(free_descs > 0);
979 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
982 pkt_size -= dma_size;
985 * Making the EOP, packets with just one segment
988 if (likely(!pkt_size))
989 txds->offset_eop = PCIE_DESC_TX_EOP;
991 txds->offset_eop = 0;
994 /* Referencing next free TX descriptor */
995 txds = &txq->txds[txq->wr_p];
996 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
1003 /* Increment write pointers. Force memory write before we let HW know */
1005 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);