2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
59 #include "nfp_net_pmd.h"
60 #include "nfp_net_logs.h"
61 #include "nfp_net_ctrl.h"
64 static void nfp_net_close(struct rte_eth_dev *dev);
65 static int nfp_net_configure(struct rte_eth_dev *dev);
66 static void nfp_net_dev_interrupt_handler(void *param);
67 static void nfp_net_dev_interrupt_delayed_handler(void *param);
68 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
69 static void nfp_net_infos_get(struct rte_eth_dev *dev,
70 struct rte_eth_dev_info *dev_info);
71 static int nfp_net_init(struct rte_eth_dev *eth_dev);
72 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
73 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
74 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
75 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
76 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
78 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
80 static void nfp_net_rx_queue_release(void *rxq);
81 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
82 uint16_t nb_desc, unsigned int socket_id,
83 const struct rte_eth_rxconf *rx_conf,
84 struct rte_mempool *mp);
85 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
86 static void nfp_net_tx_queue_release(void *txq);
87 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
88 uint16_t nb_desc, unsigned int socket_id,
89 const struct rte_eth_txconf *tx_conf);
90 static int nfp_net_start(struct rte_eth_dev *dev);
91 static int nfp_net_stats_get(struct rte_eth_dev *dev,
92 struct rte_eth_stats *stats);
93 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
94 static void nfp_net_stop(struct rte_eth_dev *dev);
95 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
98 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
99 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
100 struct rte_eth_rss_conf *rss_conf);
101 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
102 struct rte_eth_rss_reta_entry64 *reta_conf,
104 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
105 struct rte_eth_rss_conf *rss_conf);
108 * The offset of the queue controller queues in the PCIe Target. These
109 * happen to be at the same offset on the NFP6000 and the NFP3200 so
110 * we use a single macro here.
112 #define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff))
114 /* Maximum value which can be added to a queue with one transaction */
115 #define NFP_QCP_MAX_ADD 0x7f
117 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
118 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
120 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
122 NFP_QCP_READ_PTR = 0,
127 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
128 * @q: Base address for queue structure
129 * @ptr: Add to the Read or Write pointer
130 * @val: Value to add to the queue pointer
132 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
135 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
139 if (ptr == NFP_QCP_READ_PTR)
140 off = NFP_QCP_QUEUE_ADD_RPTR;
142 off = NFP_QCP_QUEUE_ADD_WPTR;
144 while (val > NFP_QCP_MAX_ADD) {
145 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
146 val -= NFP_QCP_MAX_ADD;
149 nn_writel(rte_cpu_to_le_32(val), q + off);
153 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
154 * @q: Base address for queue structure
155 * @ptr: Read or Write pointer
157 static inline uint32_t
158 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
163 if (ptr == NFP_QCP_READ_PTR)
164 off = NFP_QCP_QUEUE_STS_LO;
166 off = NFP_QCP_QUEUE_STS_HI;
168 val = rte_cpu_to_le_32(nn_readl(q + off));
170 if (ptr == NFP_QCP_READ_PTR)
171 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
173 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
177 * Functions to read/write from/to Config BAR
178 * Performs any endian conversion necessary.
180 static inline uint8_t
181 nn_cfg_readb(struct nfp_net_hw *hw, int off)
183 return nn_readb(hw->ctrl_bar + off);
187 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
189 nn_writeb(val, hw->ctrl_bar + off);
192 static inline uint32_t
193 nn_cfg_readl(struct nfp_net_hw *hw, int off)
195 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
199 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
201 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
204 static inline uint64_t
205 nn_cfg_readq(struct nfp_net_hw *hw, int off)
207 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
211 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
213 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
217 * Atomically reads link status information from global structure rte_eth_dev.
220 * - Pointer to the structure rte_eth_dev to read from.
221 * - Pointer to the buffer to be saved with the link status.
224 * - On success, zero.
225 * - On failure, negative value.
228 nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
229 struct rte_eth_link *link)
231 struct rte_eth_link *dst = link;
232 struct rte_eth_link *src = &dev->data->dev_link;
234 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
235 *(uint64_t *)src) == 0)
242 * Atomically writes the link status information into global
243 * structure rte_eth_dev.
246 * - Pointer to the structure rte_eth_dev to read from.
247 * - Pointer to the buffer to be saved with the link status.
250 * - On success, zero.
251 * - On failure, negative value.
254 nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
255 struct rte_eth_link *link)
257 struct rte_eth_link *dst = &dev->data->dev_link;
258 struct rte_eth_link *src = link;
260 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
261 *(uint64_t *)src) == 0)
268 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
272 if (rxq->rxbufs == NULL)
275 for (i = 0; i < rxq->rx_count; i++) {
276 if (rxq->rxbufs[i].mbuf) {
277 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
278 rxq->rxbufs[i].mbuf = NULL;
284 nfp_net_rx_queue_release(void *rx_queue)
286 struct nfp_net_rxq *rxq = rx_queue;
289 nfp_net_rx_queue_release_mbufs(rxq);
290 rte_free(rxq->rxbufs);
296 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
298 nfp_net_rx_queue_release_mbufs(rxq);
304 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
308 if (txq->txbufs == NULL)
311 for (i = 0; i < txq->tx_count; i++) {
312 if (txq->txbufs[i].mbuf) {
313 rte_pktmbuf_free(txq->txbufs[i].mbuf);
314 txq->txbufs[i].mbuf = NULL;
320 nfp_net_tx_queue_release(void *tx_queue)
322 struct nfp_net_txq *txq = tx_queue;
325 nfp_net_tx_queue_release_mbufs(txq);
326 rte_free(txq->txbufs);
332 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
334 nfp_net_tx_queue_release_mbufs(txq);
340 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
344 struct timespec wait;
346 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
349 if (hw->qcp_cfg == NULL)
350 rte_panic("Bad configuration queue pointer\n");
352 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
355 wait.tv_nsec = 1000000;
357 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
359 /* Poll update field, waiting for NFP to ack the config */
360 for (cnt = 0; ; cnt++) {
361 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
364 if (new & NFP_NET_CFG_UPDATE_ERR) {
365 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
368 if (cnt >= NFP_NET_POLL_TIMEOUT) {
369 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
370 " %dms", update, cnt);
371 rte_panic("Exiting\n");
373 nanosleep(&wait, 0); /* waiting for a 1ms */
375 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
380 * Reconfigure the NIC
381 * @nn: device to reconfigure
382 * @ctrl: The value for the ctrl field in the BAR config
383 * @update: The value for the update field in the BAR config
385 * Write the update word to the BAR and ping the reconfig queue. Then poll
386 * until the firmware has acknowledged the update by zeroing the update word.
389 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
393 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
396 rte_spinlock_lock(&hw->reconfig_lock);
398 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
399 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
403 err = __nfp_net_reconfig(hw, update);
405 rte_spinlock_unlock(&hw->reconfig_lock);
411 * Reconfig errors imply situations where they can be handled.
412 * Otherwise, rte_panic is called inside __nfp_net_reconfig
414 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
420 * Configure an Ethernet device. This function must be invoked first
421 * before any other function in the Ethernet API. This function can
422 * also be re-invoked when a device is in the stopped state.
425 nfp_net_configure(struct rte_eth_dev *dev)
427 struct rte_eth_conf *dev_conf;
428 struct rte_eth_rxmode *rxmode;
429 struct rte_eth_txmode *txmode;
430 uint32_t new_ctrl = 0;
432 struct nfp_net_hw *hw;
434 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
437 * A DPDK app sends info about how many queues to use and how
438 * those queues need to be configured. This is used by the
439 * DPDK core and it makes sure no more queues than those
440 * advertised by the driver are requested. This function is
441 * called after that internal process
444 PMD_INIT_LOG(DEBUG, "Configure");
446 dev_conf = &dev->data->dev_conf;
447 rxmode = &dev_conf->rxmode;
448 txmode = &dev_conf->txmode;
450 /* Checking TX mode */
451 if (txmode->mq_mode) {
452 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
456 /* Checking RX mode */
457 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
458 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
459 update = NFP_NET_CFG_UPDATE_RSS;
460 new_ctrl = NFP_NET_CFG_CTRL_RSS;
462 PMD_INIT_LOG(INFO, "RSS not supported");
467 if (rxmode->split_hdr_size) {
468 PMD_INIT_LOG(INFO, "rxmode does not support split header");
472 if (rxmode->hw_ip_checksum) {
473 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
474 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
476 PMD_INIT_LOG(INFO, "RXCSUM not supported");
481 if (rxmode->hw_vlan_filter) {
482 PMD_INIT_LOG(INFO, "VLAN filter not supported");
486 if (rxmode->hw_vlan_strip) {
487 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
488 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
490 PMD_INIT_LOG(INFO, "hw vlan strip not supported");
495 if (rxmode->hw_vlan_extend) {
496 PMD_INIT_LOG(INFO, "VLAN extended not supported");
500 if (rxmode->jumbo_frame)
501 hw->mtu = rxmode->max_rx_pkt_len;
503 if (!rxmode->hw_strip_crc)
504 PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
506 if (rxmode->enable_scatter) {
507 PMD_INIT_LOG(INFO, "Scatter not supported");
511 /* If next capabilities are supported, configure them by default */
514 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
515 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
518 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
519 new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
522 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
523 new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
525 /* TX checksum offload */
526 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
527 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
530 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
531 new_ctrl |= NFP_NET_CFG_CTRL_LSO;
534 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
535 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
540 update |= NFP_NET_CFG_UPDATE_GEN;
542 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
543 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
552 nfp_net_enable_queues(struct rte_eth_dev *dev)
554 struct nfp_net_hw *hw;
555 uint64_t enabled_queues = 0;
558 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560 /* Enabling the required TX queues in the device */
561 for (i = 0; i < dev->data->nb_tx_queues; i++)
562 enabled_queues |= (1 << i);
564 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
568 /* Enabling the required RX queues in the device */
569 for (i = 0; i < dev->data->nb_rx_queues; i++)
570 enabled_queues |= (1 << i);
572 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
576 nfp_net_disable_queues(struct rte_eth_dev *dev)
578 struct nfp_net_hw *hw;
579 uint32_t new_ctrl, update = 0;
581 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
583 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
584 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
586 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
587 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
588 NFP_NET_CFG_UPDATE_MSIX;
590 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
591 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
593 /* If an error when reconfig we avoid to change hw state */
594 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
601 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
605 for (i = 0; i < dev->data->nb_rx_queues; i++) {
606 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
613 nfp_net_params_setup(struct nfp_net_hw *hw)
615 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
616 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
620 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
622 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
625 #define ETH_ADDR_LEN 6
628 nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
632 for (i = 0; i < ETH_ADDR_LEN; i++)
633 dst[ETH_ADDR_LEN - i - 1] = src[i];
637 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
639 union eth_table_entry *entry;
643 entry = hw->eth_table;
645 /* Reading NFP ethernet table obtained before */
646 for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
647 if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
648 /* port not in use */
658 if (i == NSP_ETH_MAX_COUNT)
662 * hw points to port0 private data. We need hw now pointing to
666 nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
667 (uint8_t *)&entry->mac_addr);
673 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
677 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
678 memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
680 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
681 memcpy(&hw->mac_addr[4], &tmp, 2);
685 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
687 uint32_t mac0 = *(uint32_t *)mac;
690 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
693 mac1 = *(uint16_t *)mac;
694 nn_writew(rte_cpu_to_be_16(mac1),
695 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
699 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
700 struct rte_intr_handle *intr_handle)
702 struct nfp_net_hw *hw;
705 if (!intr_handle->intr_vec) {
706 intr_handle->intr_vec =
707 rte_zmalloc("intr_vec",
708 dev->data->nb_rx_queues * sizeof(int), 0);
709 if (!intr_handle->intr_vec) {
710 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
711 " intr_vec", dev->data->nb_rx_queues);
716 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
719 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
720 /* UIO just supports one queue and no LSC*/
721 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
722 intr_handle->intr_vec[0] = 0;
724 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
725 for (i = 0; i < dev->data->nb_rx_queues; i++) {
727 * The first msix vector is reserved for non
730 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
731 intr_handle->intr_vec[i] = i + 1;
732 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
733 intr_handle->intr_vec[i]);
737 /* Avoiding TX interrupts */
738 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
743 nfp_net_start(struct rte_eth_dev *dev)
745 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
746 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
747 struct rte_eth_conf *dev_conf;
748 struct rte_eth_rxmode *rxmode;
749 uint32_t new_ctrl, update = 0;
750 struct nfp_net_hw *hw;
751 uint32_t intr_vector;
754 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756 PMD_INIT_LOG(DEBUG, "Start");
758 /* Disabling queues just in case... */
759 nfp_net_disable_queues(dev);
761 /* Writing configuration parameters in the device */
762 nfp_net_params_setup(hw);
764 /* Enabling the required queues in the device */
765 nfp_net_enable_queues(dev);
767 /* check and configure queue intr-vector mapping */
768 if (dev->data->dev_conf.intr_conf.rxq != 0) {
769 if (hw->pf_multiport_enabled) {
770 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
771 "with NFP multiport PF");
774 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
776 * Better not to share LSC with RX interrupts.
777 * Unregistering LSC interrupt handler
779 rte_intr_callback_unregister(&pci_dev->intr_handle,
780 nfp_net_dev_interrupt_handler, (void *)dev);
782 if (dev->data->nb_rx_queues > 1) {
783 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
784 "supports 1 queue with UIO");
788 intr_vector = dev->data->nb_rx_queues;
789 if (rte_intr_efd_enable(intr_handle, intr_vector))
792 nfp_configure_rx_interrupt(dev, intr_handle);
793 update = NFP_NET_CFG_UPDATE_MSIX;
796 rte_intr_enable(intr_handle);
798 dev_conf = &dev->data->dev_conf;
799 rxmode = &dev_conf->rxmode;
801 /* Checking RX mode */
802 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
803 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
804 if (!nfp_net_rss_config_default(dev))
805 update |= NFP_NET_CFG_UPDATE_RSS;
807 PMD_INIT_LOG(INFO, "RSS not supported");
812 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
814 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
816 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
817 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
819 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
820 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
824 * Allocating rte mbuffs for configured rx queues.
825 * This requires queues being enabled before
827 if (nfp_net_rx_freelist_setup(dev) < 0) {
833 /* Configure the physical port up */
834 nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1);
842 * An error returned by this function should mean the app
843 * exiting and then the system releasing all the memory
844 * allocated even memory coming from hugepages.
846 * The device could be enabled at this point with some queues
847 * ready for getting packets. This is true if the call to
848 * nfp_net_rx_freelist_setup() succeeds for some queues but
849 * fails for subsequent queues.
851 * This should make the app exiting but better if we tell the
854 nfp_net_disable_queues(dev);
859 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
861 nfp_net_stop(struct rte_eth_dev *dev)
864 struct nfp_net_hw *hw;
866 PMD_INIT_LOG(DEBUG, "Stop");
868 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
870 nfp_net_disable_queues(dev);
873 for (i = 0; i < dev->data->nb_tx_queues; i++) {
874 nfp_net_reset_tx_queue(
875 (struct nfp_net_txq *)dev->data->tx_queues[i]);
878 for (i = 0; i < dev->data->nb_rx_queues; i++) {
879 nfp_net_reset_rx_queue(
880 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
884 /* Configure the physical port down */
885 nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0);
888 /* Reset and stop device. The device can not be restarted. */
890 nfp_net_close(struct rte_eth_dev *dev)
892 struct nfp_net_hw *hw;
893 struct rte_pci_device *pci_dev;
896 PMD_INIT_LOG(DEBUG, "Close");
898 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
902 * We assume that the DPDK application is stopping all the
903 * threads/queues before calling the device close function.
906 nfp_net_disable_queues(dev);
909 for (i = 0; i < dev->data->nb_tx_queues; i++) {
910 nfp_net_reset_tx_queue(
911 (struct nfp_net_txq *)dev->data->tx_queues[i]);
914 for (i = 0; i < dev->data->nb_rx_queues; i++) {
915 nfp_net_reset_rx_queue(
916 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
919 rte_intr_disable(&pci_dev->intr_handle);
920 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
922 /* unregister callback func from eal lib */
923 rte_intr_callback_unregister(&pci_dev->intr_handle,
924 nfp_net_dev_interrupt_handler,
928 * The ixgbe PMD driver disables the pcie master on the
929 * device. The i40e does not...
934 nfp_net_promisc_enable(struct rte_eth_dev *dev)
936 uint32_t new_ctrl, update = 0;
937 struct nfp_net_hw *hw;
939 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
941 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
943 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
944 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
948 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
949 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
953 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
954 update = NFP_NET_CFG_UPDATE_GEN;
957 * DPDK sets promiscuous mode on just after this call assuming
958 * it can not fail ...
960 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
967 nfp_net_promisc_disable(struct rte_eth_dev *dev)
969 uint32_t new_ctrl, update = 0;
970 struct nfp_net_hw *hw;
972 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
974 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
975 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
979 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
980 update = NFP_NET_CFG_UPDATE_GEN;
983 * DPDK sets promiscuous mode off just before this call
984 * assuming it can not fail ...
986 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
993 * return 0 means link status changed, -1 means not changed
995 * Wait to complete is needed as it can take up to 9 seconds to get the Link
999 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1001 struct nfp_net_hw *hw;
1002 struct rte_eth_link link, old;
1003 uint32_t nn_link_status;
1005 static const uint32_t ls_to_ethtool[] = {
1006 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1007 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
1008 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
1009 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
1010 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
1011 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
1012 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
1013 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
1016 PMD_DRV_LOG(DEBUG, "Link update\n");
1018 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1020 memset(&old, 0, sizeof(old));
1021 nfp_net_dev_atomic_read_link_status(dev, &old);
1023 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1025 memset(&link, 0, sizeof(struct rte_eth_link));
1027 if (nn_link_status & NFP_NET_CFG_STS_LINK)
1028 link.link_status = ETH_LINK_UP;
1030 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1032 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1033 NFP_NET_CFG_STS_LINK_RATE_MASK;
1035 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1036 link.link_speed = ETH_SPEED_NUM_NONE;
1038 link.link_speed = ls_to_ethtool[nn_link_status];
1040 if (old.link_status != link.link_status) {
1041 nfp_net_dev_atomic_write_link_status(dev, &link);
1042 if (link.link_status)
1043 PMD_DRV_LOG(INFO, "NIC Link is Up\n");
1045 PMD_DRV_LOG(INFO, "NIC Link is Down\n");
1053 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1056 struct nfp_net_hw *hw;
1057 struct rte_eth_stats nfp_dev_stats;
1059 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1061 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1063 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1065 /* reading per RX ring stats */
1066 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1067 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1070 nfp_dev_stats.q_ipackets[i] =
1071 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1073 nfp_dev_stats.q_ipackets[i] -=
1074 hw->eth_stats_base.q_ipackets[i];
1076 nfp_dev_stats.q_ibytes[i] =
1077 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1079 nfp_dev_stats.q_ibytes[i] -=
1080 hw->eth_stats_base.q_ibytes[i];
1083 /* reading per TX ring stats */
1084 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1085 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1088 nfp_dev_stats.q_opackets[i] =
1089 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1091 nfp_dev_stats.q_opackets[i] -=
1092 hw->eth_stats_base.q_opackets[i];
1094 nfp_dev_stats.q_obytes[i] =
1095 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1097 nfp_dev_stats.q_obytes[i] -=
1098 hw->eth_stats_base.q_obytes[i];
1101 nfp_dev_stats.ipackets =
1102 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1104 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1106 nfp_dev_stats.ibytes =
1107 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1109 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1111 nfp_dev_stats.opackets =
1112 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1114 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1116 nfp_dev_stats.obytes =
1117 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1119 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1121 /* reading general device stats */
1122 nfp_dev_stats.ierrors =
1123 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1125 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1127 nfp_dev_stats.oerrors =
1128 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1130 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1132 /* RX ring mbuf allocation failures */
1133 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1135 nfp_dev_stats.imissed =
1136 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1138 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1141 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1148 nfp_net_stats_reset(struct rte_eth_dev *dev)
1151 struct nfp_net_hw *hw;
1153 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1156 * hw->eth_stats_base records the per counter starting point.
1157 * Lets update it now
1160 /* reading per RX ring stats */
1161 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1162 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1165 hw->eth_stats_base.q_ipackets[i] =
1166 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1168 hw->eth_stats_base.q_ibytes[i] =
1169 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1172 /* reading per TX ring stats */
1173 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1174 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1177 hw->eth_stats_base.q_opackets[i] =
1178 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1180 hw->eth_stats_base.q_obytes[i] =
1181 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1184 hw->eth_stats_base.ipackets =
1185 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1187 hw->eth_stats_base.ibytes =
1188 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1190 hw->eth_stats_base.opackets =
1191 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1193 hw->eth_stats_base.obytes =
1194 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1196 /* reading general device stats */
1197 hw->eth_stats_base.ierrors =
1198 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1200 hw->eth_stats_base.oerrors =
1201 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1203 /* RX ring mbuf allocation failures */
1204 dev->data->rx_mbuf_alloc_failed = 0;
1206 hw->eth_stats_base.imissed =
1207 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1211 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1213 struct nfp_net_hw *hw;
1215 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1218 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1219 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1220 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1221 dev_info->max_rx_pktlen = hw->max_mtu;
1222 /* Next should change when PF support is implemented */
1223 dev_info->max_mac_addrs = 1;
1225 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1226 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1228 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1229 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1230 DEV_RX_OFFLOAD_UDP_CKSUM |
1231 DEV_RX_OFFLOAD_TCP_CKSUM;
1233 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1234 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1236 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1237 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1238 DEV_TX_OFFLOAD_UDP_CKSUM |
1239 DEV_TX_OFFLOAD_TCP_CKSUM;
1241 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1243 .pthresh = DEFAULT_RX_PTHRESH,
1244 .hthresh = DEFAULT_RX_HTHRESH,
1245 .wthresh = DEFAULT_RX_WTHRESH,
1247 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1251 dev_info->default_txconf = (struct rte_eth_txconf) {
1253 .pthresh = DEFAULT_TX_PTHRESH,
1254 .hthresh = DEFAULT_TX_HTHRESH,
1255 .wthresh = DEFAULT_TX_WTHRESH,
1257 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1258 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1259 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1260 ETH_TXQ_FLAGS_NOOFFLOADS,
1263 dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
1264 ETH_RSS_NONFRAG_IPV4_UDP |
1265 ETH_RSS_NONFRAG_IPV6_TCP |
1266 ETH_RSS_NONFRAG_IPV6_UDP;
1268 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1269 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1271 dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
1272 ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
1273 ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
1275 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
1276 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1279 static const uint32_t *
1280 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1282 static const uint32_t ptypes[] = {
1283 /* refers to nfp_net_set_hash() */
1284 RTE_PTYPE_INNER_L3_IPV4,
1285 RTE_PTYPE_INNER_L3_IPV6,
1286 RTE_PTYPE_INNER_L3_IPV6_EXT,
1287 RTE_PTYPE_INNER_L4_MASK,
1291 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1297 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1299 struct nfp_net_rxq *rxq;
1300 struct nfp_net_rx_desc *rxds;
1304 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1311 * Other PMDs are just checking the DD bit in intervals of 4
1312 * descriptors and counting all four if the first has the DD
1313 * bit on. Of course, this is not accurate but can be good for
1314 * performance. But ideally that should be done in descriptors
1315 * chunks belonging to the same cache line
1318 while (count < rxq->rx_count) {
1319 rxds = &rxq->rxds[idx];
1320 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1327 if ((idx) == rxq->rx_count)
1335 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1337 struct rte_pci_device *pci_dev;
1338 struct nfp_net_hw *hw;
1341 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1342 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1344 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1347 /* Make sure all updates are written before un-masking */
1349 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1350 NFP_NET_CFG_ICR_UNMASKED);
1355 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1357 struct rte_pci_device *pci_dev;
1358 struct nfp_net_hw *hw;
1361 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1364 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1367 /* Make sure all updates are written before un-masking */
1369 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1374 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1376 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1377 struct rte_eth_link link;
1379 memset(&link, 0, sizeof(link));
1380 nfp_net_dev_atomic_read_link_status(dev, &link);
1381 if (link.link_status)
1382 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1383 dev->data->port_id, link.link_speed,
1384 link.link_duplex == ETH_LINK_FULL_DUPLEX
1385 ? "full-duplex" : "half-duplex");
1387 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1388 dev->data->port_id);
1390 RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1391 pci_dev->addr.domain, pci_dev->addr.bus,
1392 pci_dev->addr.devid, pci_dev->addr.function);
1395 /* Interrupt configuration and handling */
1398 * nfp_net_irq_unmask - Unmask an interrupt
1400 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1401 * clear the ICR for the entry.
1404 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1406 struct nfp_net_hw *hw;
1407 struct rte_pci_device *pci_dev;
1409 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1410 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1412 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1413 /* If MSI-X auto-masking is used, clear the entry */
1415 rte_intr_enable(&pci_dev->intr_handle);
1417 /* Make sure all updates are written before un-masking */
1419 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1420 NFP_NET_CFG_ICR_UNMASKED);
1425 nfp_net_dev_interrupt_handler(void *param)
1428 struct rte_eth_link link;
1429 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1431 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1433 /* get the link status */
1434 memset(&link, 0, sizeof(link));
1435 nfp_net_dev_atomic_read_link_status(dev, &link);
1437 nfp_net_link_update(dev, 0);
1440 if (!link.link_status) {
1441 /* handle it 1 sec later, wait it being stable */
1442 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1443 /* likely to down */
1445 /* handle it 4 sec later, wait it being stable */
1446 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1449 if (rte_eal_alarm_set(timeout * 1000,
1450 nfp_net_dev_interrupt_delayed_handler,
1452 RTE_LOG(ERR, PMD, "Error setting alarm");
1454 nfp_net_irq_unmask(dev);
1459 * Interrupt handler which shall be registered for alarm callback for delayed
1460 * handling specific interrupt to wait for the stable nic state. As the NIC
1461 * interrupt state is not stable for nfp after link is just down, it needs
1462 * to wait 4 seconds to get the stable status.
1464 * @param handle Pointer to interrupt handle.
1465 * @param param The address of parameter (struct rte_eth_dev *)
1470 nfp_net_dev_interrupt_delayed_handler(void *param)
1472 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1474 nfp_net_link_update(dev, 0);
1475 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1477 nfp_net_dev_link_status_print(dev);
1480 nfp_net_irq_unmask(dev);
1484 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1486 struct nfp_net_hw *hw;
1488 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490 /* check that mtu is within the allowed range */
1491 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1494 /* mtu setting is forbidden if port is started */
1495 if (dev->data->dev_started) {
1496 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1497 dev->data->port_id);
1501 /* switch to jumbo mode if needed */
1502 if ((uint32_t)mtu > ETHER_MAX_LEN)
1503 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1505 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1507 /* update max frame size */
1508 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1510 /* writing to configuration space */
1511 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1519 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1520 uint16_t queue_idx, uint16_t nb_desc,
1521 unsigned int socket_id,
1522 const struct rte_eth_rxconf *rx_conf,
1523 struct rte_mempool *mp)
1525 const struct rte_memzone *tz;
1526 struct nfp_net_rxq *rxq;
1527 struct nfp_net_hw *hw;
1529 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1531 PMD_INIT_FUNC_TRACE();
1533 /* Validating number of descriptors */
1534 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1535 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1536 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1537 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1542 * Free memory prior to re-allocation if needed. This is the case after
1543 * calling nfp_net_stop
1545 if (dev->data->rx_queues[queue_idx]) {
1546 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1547 dev->data->rx_queues[queue_idx] = NULL;
1550 /* Allocating rx queue data structure */
1551 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1552 RTE_CACHE_LINE_SIZE, socket_id);
1556 /* Hw queues mapping based on firmware confifguration */
1557 rxq->qidx = queue_idx;
1558 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1559 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1560 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1561 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1564 * Tracking mbuf size for detecting a potential mbuf overflow due to
1568 rxq->mbuf_size = rxq->mem_pool->elt_size;
1569 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1570 hw->flbufsz = rxq->mbuf_size;
1572 rxq->rx_count = nb_desc;
1573 rxq->port_id = dev->data->port_id;
1574 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1575 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1577 rxq->drop_en = rx_conf->rx_drop_en;
1580 * Allocate RX ring hardware descriptors. A memzone large enough to
1581 * handle the maximum ring size is allocated in order to allow for
1582 * resizing in later calls to the queue setup function.
1584 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1585 sizeof(struct nfp_net_rx_desc) *
1586 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1590 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1591 nfp_net_rx_queue_release(rxq);
1595 /* Saving physical and virtual addresses for the RX ring */
1596 rxq->dma = (uint64_t)tz->iova;
1597 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1599 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1600 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1601 sizeof(*rxq->rxbufs) * nb_desc,
1602 RTE_CACHE_LINE_SIZE, socket_id);
1603 if (rxq->rxbufs == NULL) {
1604 nfp_net_rx_queue_release(rxq);
1608 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1609 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1611 nfp_net_reset_rx_queue(rxq);
1613 dev->data->rx_queues[queue_idx] = rxq;
1617 * Telling the HW about the physical address of the RX ring and number
1618 * of descriptors in log2 format
1620 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1621 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1627 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1629 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1633 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1636 for (i = 0; i < rxq->rx_count; i++) {
1637 struct nfp_net_rx_desc *rxd;
1638 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1641 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1642 (unsigned)rxq->qidx);
1646 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1648 rxd = &rxq->rxds[i];
1650 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1651 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1653 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1656 /* Make sure all writes are flushed before telling the hardware */
1659 /* Not advertising the whole ring as the firmware gets confused if so */
1660 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1663 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1669 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1670 uint16_t nb_desc, unsigned int socket_id,
1671 const struct rte_eth_txconf *tx_conf)
1673 const struct rte_memzone *tz;
1674 struct nfp_net_txq *txq;
1675 uint16_t tx_free_thresh;
1676 struct nfp_net_hw *hw;
1678 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1680 PMD_INIT_FUNC_TRACE();
1682 /* Validating number of descriptors */
1683 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1684 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1685 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1686 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1690 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1691 tx_conf->tx_free_thresh :
1692 DEFAULT_TX_FREE_THRESH);
1694 if (tx_free_thresh > (nb_desc)) {
1696 "tx_free_thresh must be less than the number of TX "
1697 "descriptors. (tx_free_thresh=%u port=%d "
1698 "queue=%d)\n", (unsigned int)tx_free_thresh,
1699 dev->data->port_id, (int)queue_idx);
1704 * Free memory prior to re-allocation if needed. This is the case after
1705 * calling nfp_net_stop
1707 if (dev->data->tx_queues[queue_idx]) {
1708 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1710 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1711 dev->data->tx_queues[queue_idx] = NULL;
1714 /* Allocating tx queue data structure */
1715 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1716 RTE_CACHE_LINE_SIZE, socket_id);
1718 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1723 * Allocate TX ring hardware descriptors. A memzone large enough to
1724 * handle the maximum ring size is allocated in order to allow for
1725 * resizing in later calls to the queue setup function.
1727 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1728 sizeof(struct nfp_net_tx_desc) *
1729 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1732 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1733 nfp_net_tx_queue_release(txq);
1737 txq->tx_count = nb_desc;
1738 txq->tx_free_thresh = tx_free_thresh;
1739 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1740 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1741 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1743 /* queue mapping based on firmware configuration */
1744 txq->qidx = queue_idx;
1745 txq->tx_qcidx = queue_idx * hw->stride_tx;
1746 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1748 txq->port_id = dev->data->port_id;
1749 txq->txq_flags = tx_conf->txq_flags;
1751 /* Saving physical and virtual addresses for the TX ring */
1752 txq->dma = (uint64_t)tz->iova;
1753 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1755 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1756 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1757 sizeof(*txq->txbufs) * nb_desc,
1758 RTE_CACHE_LINE_SIZE, socket_id);
1759 if (txq->txbufs == NULL) {
1760 nfp_net_tx_queue_release(txq);
1763 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1764 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1766 nfp_net_reset_tx_queue(txq);
1768 dev->data->tx_queues[queue_idx] = txq;
1772 * Telling the HW about the physical address of the TX ring and number
1773 * of descriptors in log2 format
1775 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1776 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1781 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1783 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1784 struct rte_mbuf *mb)
1787 struct nfp_net_hw *hw = txq->hw;
1789 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
1792 ol_flags = mb->ol_flags;
1794 if (!(ol_flags & PKT_TX_TCP_SEG))
1797 txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
1798 txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
1799 txd->flags = PCIE_DESC_TX_LSO;
1808 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1810 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1811 struct rte_mbuf *mb)
1814 struct nfp_net_hw *hw = txq->hw;
1816 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1819 ol_flags = mb->ol_flags;
1821 /* IPv6 does not need checksum */
1822 if (ol_flags & PKT_TX_IP_CKSUM)
1823 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1825 switch (ol_flags & PKT_TX_L4_MASK) {
1826 case PKT_TX_UDP_CKSUM:
1827 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1829 case PKT_TX_TCP_CKSUM:
1830 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1834 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1835 txd->flags |= PCIE_DESC_TX_CSUM;
1838 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1840 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1841 struct rte_mbuf *mb)
1843 struct nfp_net_hw *hw = rxq->hw;
1845 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1848 /* If IPv4 and IP checksum error, fail */
1849 if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1850 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1851 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1853 /* If neither UDP nor TCP return */
1854 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1855 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1858 if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1859 !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1860 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1862 if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1863 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1864 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1867 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1868 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1870 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1873 * nfp_net_set_hash - Set mbuf hash data
1875 * The RSS hash and hash-type are pre-pended to the packet data.
1876 * Extract and decode it and set the mbuf fields.
1879 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1880 struct rte_mbuf *mbuf)
1882 struct nfp_net_hw *hw = rxq->hw;
1883 uint8_t *meta_offset;
1886 uint32_t hash_type = 0;
1888 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1891 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
1892 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1895 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1896 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1898 } else if (NFP_DESC_META_LEN(rxd)) {
1901 * <---- 32 bit ----->
1906 * ====================
1909 * Field type word contains up to 8 4bit field types
1910 * A 4bit field type refers to a data field word
1911 * A data field word can have several 4bit field types
1913 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1914 meta_offset -= NFP_DESC_META_LEN(rxd);
1915 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1917 /* NFP PMD just supports metadata for hashing */
1918 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1919 case NFP_NET_META_HASH:
1920 /* next field type is about the hash type */
1921 meta_info >>= NFP_NET_META_FIELD_SIZE;
1922 /* hash value is in the data field */
1923 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1924 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1927 /* Unsupported metadata can be a performance issue */
1934 mbuf->hash.rss = hash;
1935 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1937 switch (hash_type) {
1938 case NFP_NET_RSS_IPV4:
1939 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1941 case NFP_NET_RSS_IPV6:
1942 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1944 case NFP_NET_RSS_IPV6_EX:
1945 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1948 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1953 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1955 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1958 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1963 * There are some decissions to take:
1964 * 1) How to check DD RX descriptors bit
1965 * 2) How and when to allocate new mbufs
1967 * Current implementation checks just one single DD bit each loop. As each
1968 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1969 * a single cache line instead. Tests with this change have not shown any
1970 * performance improvement but it requires further investigation. For example,
1971 * depending on which descriptor is next, the number of descriptors could be
1972 * less than 8 for just checking those in the same cache line. This implies
1973 * extra work which could be counterproductive by itself. Indeed, last firmware
1974 * changes are just doing this: writing several descriptors with the DD bit
1975 * for saving PCIe bandwidth and DMA operations from the NFP.
1977 * Mbuf allocation is done when a new packet is received. Then the descriptor
1978 * is automatically linked with the new mbuf and the old one is given to the
1979 * user. The main drawback with this design is mbuf allocation is heavier than
1980 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1981 * cache point of view it does not seem allocating the mbuf early on as we are
1982 * doing now have any benefit at all. Again, tests with this change have not
1983 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1984 * so looking at the implications of this type of allocation should be studied
1989 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1991 struct nfp_net_rxq *rxq;
1992 struct nfp_net_rx_desc *rxds;
1993 struct nfp_net_rx_buff *rxb;
1994 struct nfp_net_hw *hw;
1995 struct rte_mbuf *mb;
1996 struct rte_mbuf *new_mb;
2002 if (unlikely(rxq == NULL)) {
2004 * DPDK just checks the queue is lower than max queues
2005 * enabled. But the queue needs to be configured
2007 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2015 while (avail < nb_pkts) {
2016 rxb = &rxq->rxbufs[rxq->rd_p];
2017 if (unlikely(rxb == NULL)) {
2018 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2023 * Memory barrier to ensure that we won't do other
2024 * reads before the DD bit.
2028 rxds = &rxq->rxds[rxq->rd_p];
2029 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2033 * We got a packet. Let's alloc a new mbuff for refilling the
2034 * free descriptor ring as soon as possible
2036 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2037 if (unlikely(new_mb == NULL)) {
2038 RTE_LOG_DP(DEBUG, PMD,
2039 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2040 rxq->port_id, (unsigned int)rxq->qidx);
2041 nfp_net_mbuf_alloc_failed(rxq);
2048 * Grab the mbuff and refill the descriptor with the
2049 * previously allocated mbuff
2054 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
2055 rxds->rxd.data_len, rxq->mbuf_size);
2057 /* Size of this segment */
2058 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2059 /* Size of the whole packet. We just support 1 segment */
2060 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2062 if (unlikely((mb->data_len + hw->rx_offset) >
2065 * This should not happen and the user has the
2066 * responsibility of avoiding it. But we have
2067 * to give some info about the error
2069 RTE_LOG_DP(ERR, PMD,
2070 "mbuf overflow likely due to the RX offset.\n"
2071 "\t\tYour mbuf size should have extra space for"
2072 " RX offset=%u bytes.\n"
2073 "\t\tCurrently you just have %u bytes available"
2074 " but the received packet is %u bytes long",
2076 rxq->mbuf_size - hw->rx_offset,
2081 /* Filling the received mbuff with packet info */
2083 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2085 mb->data_off = RTE_PKTMBUF_HEADROOM +
2086 NFP_DESC_META_LEN(rxds);
2088 /* No scatter mode supported */
2092 /* Checking the RSS flag */
2093 nfp_net_set_hash(rxq, rxds, mb);
2095 /* Checking the checksum flag */
2096 nfp_net_rx_cksum(rxq, rxds, mb);
2098 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2099 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2100 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2101 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2104 /* Adding the mbuff to the mbuff array passed by the app */
2105 rx_pkts[avail++] = mb;
2107 /* Now resetting and updating the descriptor */
2110 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2112 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2113 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2116 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2123 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
2124 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2126 nb_hold += rxq->nb_rx_hold;
2129 * FL descriptors needs to be written before incrementing the
2130 * FL queue WR pointer
2133 if (nb_hold > rxq->rx_free_thresh) {
2134 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
2135 rxq->port_id, (unsigned int)rxq->qidx,
2136 (unsigned)nb_hold, (unsigned)avail);
2137 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2140 rxq->nb_rx_hold = nb_hold;
2146 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2148 * @txq: TX queue to work with
2149 * Returns number of descriptors freed
2152 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2157 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2158 " status\n", txq->qidx);
2160 /* Work out how many packets have been sent */
2161 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2163 if (qcp_rd_p == txq->rd_p) {
2164 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2165 "packets (%u, %u)\n", txq->qidx,
2166 qcp_rd_p, txq->rd_p);
2170 if (qcp_rd_p > txq->rd_p)
2171 todo = qcp_rd_p - txq->rd_p;
2173 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2175 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
2176 qcp_rd_p, txq->rd_p, txq->rd_p);
2182 if (unlikely(txq->rd_p >= txq->tx_count))
2183 txq->rd_p -= txq->tx_count;
2188 /* Leaving always free descriptors for avoiding wrapping confusion */
2190 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2192 if (txq->wr_p >= txq->rd_p)
2193 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2195 return txq->rd_p - txq->wr_p - 8;
2199 * nfp_net_txq_full - Check if the TX queue free descriptors
2200 * is below tx_free_threshold
2202 * @txq: TX queue to check
2204 * This function uses the host copy* of read/write pointers
2207 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2209 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2213 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2215 struct nfp_net_txq *txq;
2216 struct nfp_net_hw *hw;
2217 struct nfp_net_tx_desc *txds, txd;
2218 struct rte_mbuf *pkt;
2220 int pkt_size, dma_size;
2221 uint16_t free_descs, issued_descs;
2222 struct rte_mbuf **lmbuf;
2227 txds = &txq->txds[txq->wr_p];
2229 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
2230 txq->qidx, txq->wr_p, nb_pkts);
2232 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2233 nfp_net_tx_free_bufs(txq);
2235 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2236 if (unlikely(free_descs == 0))
2243 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
2244 txq->qidx, nb_pkts);
2245 /* Sending packets */
2246 while ((i < nb_pkts) && free_descs) {
2247 /* Grabbing the mbuf linked to the current descriptor */
2248 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2249 /* Warming the cache for releasing the mbuf later on */
2250 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2252 pkt = *(tx_pkts + i);
2254 if (unlikely((pkt->nb_segs > 1) &&
2255 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2256 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2257 rte_panic("Multisegment packet unsupported\n");
2260 /* Checking if we have enough descriptors */
2261 if (unlikely(pkt->nb_segs > free_descs))
2265 * Checksum and VLAN flags just in the first descriptor for a
2266 * multisegment packet, but TSO info needs to be in all of them.
2268 txd.data_len = pkt->pkt_len;
2269 nfp_net_tx_tso(txq, &txd, pkt);
2270 nfp_net_tx_cksum(txq, &txd, pkt);
2272 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2273 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2274 txd.flags |= PCIE_DESC_TX_VLAN;
2275 txd.vlan = pkt->vlan_tci;
2279 * mbuf data_len is the data in one segment and pkt_len data
2280 * in the whole packet. When the packet is just one segment,
2281 * then data_len = pkt_len
2283 pkt_size = pkt->pkt_len;
2286 /* Copying TSO, VLAN and cksum info */
2289 /* Releasing mbuf used by this descriptor previously*/
2291 rte_pktmbuf_free_seg(*lmbuf);
2294 * Linking mbuf with descriptor for being released
2295 * next time descriptor is used
2299 dma_size = pkt->data_len;
2300 dma_addr = rte_mbuf_data_iova(pkt);
2301 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2302 "%" PRIx64 "\n", dma_addr);
2304 /* Filling descriptors fields */
2305 txds->dma_len = dma_size;
2306 txds->data_len = txd.data_len;
2307 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2308 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2309 ASSERT(free_descs > 0);
2313 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2316 pkt_size -= dma_size;
2319 txds->offset_eop |= PCIE_DESC_TX_EOP;
2321 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
2324 /* Referencing next free TX descriptor */
2325 txds = &txq->txds[txq->wr_p];
2326 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2333 /* Increment write pointers. Force memory write before we let HW know */
2335 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2341 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2343 uint32_t new_ctrl, update;
2344 struct nfp_net_hw *hw;
2347 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2350 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2351 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2352 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2353 " ETH_VLAN_EXTEND_OFFLOAD");
2355 /* Enable vlan strip if it is not configured yet */
2356 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2357 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2358 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2360 /* Disable vlan strip just if it is configured */
2361 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2362 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2363 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2368 update = NFP_NET_CFG_UPDATE_GEN;
2370 ret = nfp_net_reconfig(hw, new_ctrl, update);
2372 hw->ctrl = new_ctrl;
2378 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2379 struct rte_eth_rss_reta_entry64 *reta_conf,
2382 uint32_t reta, mask;
2385 struct nfp_net_hw *hw =
2386 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2388 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2389 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2390 "(%d) doesn't match the number hardware can supported "
2391 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2396 * Update Redirection Table. There are 128 8bit-entries which can be
2397 * manage as 32 32bit-entries
2399 for (i = 0; i < reta_size; i += 4) {
2400 /* Handling 4 RSS entries per loop */
2401 idx = i / RTE_RETA_GROUP_SIZE;
2402 shift = i % RTE_RETA_GROUP_SIZE;
2403 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2409 /* If all 4 entries were set, don't need read RETA register */
2411 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2413 for (j = 0; j < 4; j++) {
2414 if (!(mask & (0x1 << j)))
2417 /* Clearing the entry bits */
2418 reta &= ~(0xFF << (8 * j));
2419 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2421 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2427 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2429 nfp_net_reta_update(struct rte_eth_dev *dev,
2430 struct rte_eth_rss_reta_entry64 *reta_conf,
2433 struct nfp_net_hw *hw =
2434 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2438 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2441 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2445 update = NFP_NET_CFG_UPDATE_RSS;
2447 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2453 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2455 nfp_net_reta_query(struct rte_eth_dev *dev,
2456 struct rte_eth_rss_reta_entry64 *reta_conf,
2462 struct nfp_net_hw *hw;
2464 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2466 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2469 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2470 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2471 "(%d) doesn't match the number hardware can supported "
2472 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2477 * Reading Redirection Table. There are 128 8bit-entries which can be
2478 * manage as 32 32bit-entries
2480 for (i = 0; i < reta_size; i += 4) {
2481 /* Handling 4 RSS entries per loop */
2482 idx = i / RTE_RETA_GROUP_SIZE;
2483 shift = i % RTE_RETA_GROUP_SIZE;
2484 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2489 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2491 for (j = 0; j < 4; j++) {
2492 if (!(mask & (0x1 << j)))
2494 reta_conf->reta[shift + j] =
2495 (uint8_t)((reta >> (8 * j)) & 0xF);
2502 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2503 struct rte_eth_rss_conf *rss_conf)
2505 struct nfp_net_hw *hw;
2507 uint32_t cfg_rss_ctrl = 0;
2511 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2513 /* Writing the key byte a byte */
2514 for (i = 0; i < rss_conf->rss_key_len; i++) {
2515 memcpy(&key, &rss_conf->rss_key[i], 1);
2516 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2519 rss_hf = rss_conf->rss_hf;
2521 if (rss_hf & ETH_RSS_IPV4)
2522 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
2523 NFP_NET_CFG_RSS_IPV4_TCP |
2524 NFP_NET_CFG_RSS_IPV4_UDP;
2526 if (rss_hf & ETH_RSS_IPV6)
2527 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
2528 NFP_NET_CFG_RSS_IPV6_TCP |
2529 NFP_NET_CFG_RSS_IPV6_UDP;
2531 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2532 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2534 /* configuring where to apply the RSS hash */
2535 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2537 /* Writing the key size */
2538 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2544 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2545 struct rte_eth_rss_conf *rss_conf)
2549 struct nfp_net_hw *hw;
2551 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2553 rss_hf = rss_conf->rss_hf;
2555 /* Checking if RSS is enabled */
2556 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2557 if (rss_hf != 0) { /* Enable RSS? */
2558 RTE_LOG(ERR, PMD, "RSS unsupported\n");
2561 return 0; /* Nothing to do */
2564 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2565 RTE_LOG(ERR, PMD, "hash key too long\n");
2569 nfp_net_rss_hash_write(dev, rss_conf);
2571 update = NFP_NET_CFG_UPDATE_RSS;
2573 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2580 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2581 struct rte_eth_rss_conf *rss_conf)
2584 uint32_t cfg_rss_ctrl;
2587 struct nfp_net_hw *hw;
2589 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2591 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2594 rss_hf = rss_conf->rss_hf;
2595 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2597 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2598 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2600 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2601 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2603 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2604 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2606 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2607 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2609 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2610 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2612 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2613 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2615 /* Reading the key size */
2616 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2618 /* Reading the key byte a byte */
2619 for (i = 0; i < rss_conf->rss_key_len; i++) {
2620 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2621 memcpy(&rss_conf->rss_key[i], &key, 1);
2628 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2630 struct rte_eth_conf *dev_conf;
2631 struct rte_eth_rss_conf rss_conf;
2632 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2633 uint16_t rx_queues = dev->data->nb_rx_queues;
2637 RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
2640 nfp_reta_conf[0].mask = ~0x0;
2641 nfp_reta_conf[1].mask = ~0x0;
2644 for (i = 0; i < 0x40; i += 8) {
2645 for (j = i; j < (i + 8); j++) {
2646 nfp_reta_conf[0].reta[j] = queue;
2647 nfp_reta_conf[1].reta[j] = queue++;
2651 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2655 dev_conf = &dev->data->dev_conf;
2657 RTE_LOG(INFO, PMD, "wrong rss conf");
2660 rss_conf = dev_conf->rx_adv_conf.rss_conf;
2662 ret = nfp_net_rss_hash_write(dev, &rss_conf);
2668 /* Initialise and register driver with DPDK Application */
2669 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2670 .dev_configure = nfp_net_configure,
2671 .dev_start = nfp_net_start,
2672 .dev_stop = nfp_net_stop,
2673 .dev_close = nfp_net_close,
2674 .promiscuous_enable = nfp_net_promisc_enable,
2675 .promiscuous_disable = nfp_net_promisc_disable,
2676 .link_update = nfp_net_link_update,
2677 .stats_get = nfp_net_stats_get,
2678 .stats_reset = nfp_net_stats_reset,
2679 .dev_infos_get = nfp_net_infos_get,
2680 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2681 .mtu_set = nfp_net_dev_mtu_set,
2682 .vlan_offload_set = nfp_net_vlan_offload_set,
2683 .reta_update = nfp_net_reta_update,
2684 .reta_query = nfp_net_reta_query,
2685 .rss_hash_update = nfp_net_rss_hash_update,
2686 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2687 .rx_queue_setup = nfp_net_rx_queue_setup,
2688 .rx_queue_release = nfp_net_rx_queue_release,
2689 .rx_queue_count = nfp_net_rx_queue_count,
2690 .tx_queue_setup = nfp_net_tx_queue_setup,
2691 .tx_queue_release = nfp_net_tx_queue_release,
2692 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2693 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2697 * All eth_dev created got its private data, but before nfp_net_init, that
2698 * private data is referencing private data for all the PF ports. This is due
2699 * to how the vNIC bars are mapped based on first port, so all ports need info
2700 * about port 0 private data. Inside nfp_net_init the private data pointer is
2701 * changed to the right address for each port once the bars have been mapped.
2703 * This functions helps to find out which port and therefore which offset
2704 * inside the private data array to use.
2707 get_pf_port_number(char *name)
2709 char *pf_str = name;
2712 while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2717 * This should not happen at all and it would mean major
2718 * implementation fault.
2720 rte_panic("nfp_net: problem with pf device name\n");
2722 /* Expecting _portX with X within [0,7] */
2725 return (int)strtol(pf_str, NULL, 10);
2729 nfp_net_init(struct rte_eth_dev *eth_dev)
2731 struct rte_pci_device *pci_dev;
2732 struct nfp_net_hw *hw, *hwport0;
2734 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2738 nspu_desc_t *nspu_desc = NULL;
2739 uint64_t bar_offset;
2742 PMD_INIT_FUNC_TRACE();
2744 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2746 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2747 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2748 port = get_pf_port_number(eth_dev->data->name);
2749 if (port < 0 || port > 7) {
2750 RTE_LOG(ERR, PMD, "Port value is wrong\n");
2754 PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
2756 /* This points to port 0 private data */
2757 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2759 /* This points to the specific port private data */
2760 hw = &hwport0[port];
2761 hw->pf_port_idx = port;
2763 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2767 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2768 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2769 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2771 /* For secondary processes, the primary has done all the work */
2772 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2775 rte_eth_copy_pci_info(eth_dev, pci_dev);
2777 hw->device_id = pci_dev->id.device_id;
2778 hw->vendor_id = pci_dev->id.vendor_id;
2779 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2780 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2782 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2783 pci_dev->id.vendor_id, pci_dev->id.device_id,
2784 pci_dev->addr.domain, pci_dev->addr.bus,
2785 pci_dev->addr.devid, pci_dev->addr.function);
2787 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2788 if (hw->ctrl_bar == NULL) {
2790 "hw->ctrl_bar is NULL. BAR0 not configured\n");
2794 if (hw->is_pf && port == 0) {
2795 nspu_desc = hw->nspu_desc;
2797 if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) {
2799 * A firmware should be there after PF probe so this
2800 * should not happen.
2802 RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n");
2806 /* vNIC PF control BAR is a subset of PF PCI device BAR */
2807 hw->ctrl_bar += bar_offset;
2808 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2812 if (!hwport0->ctrl_bar)
2815 /* address based on port0 offset */
2816 hw->ctrl_bar = hwport0->ctrl_bar +
2817 (port * NFP_PF_CSR_SLICE_SIZE);
2820 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2822 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2823 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2825 /* Work out where in the BAR the queues start. */
2826 switch (pci_dev->id.device_id) {
2827 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2828 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2829 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2830 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2831 tx_bar_off = NFP_PCIE_QUEUE(start_q);
2832 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2833 rx_bar_off = NFP_PCIE_QUEUE(start_q);
2836 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2840 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
2841 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
2843 if (hw->is_pf && port == 0) {
2844 /* configure access to tx/rx vNIC BARs */
2845 nfp_nsp_map_queues_bar(nspu_desc, &bar_offset);
2846 PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n",
2848 hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr;
2850 /* vNIC PF tx/rx BARs are a subset of PF PCI device */
2851 hwport0->hw_queues += bar_offset;
2853 /* Lets seize the chance to read eth table from hw */
2854 if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
2859 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2860 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2861 eth_dev->data->dev_private = hw;
2863 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2865 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2869 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2870 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2872 nfp_net_cfg_queue_setup(hw);
2874 /* Get some of the read-only fields from the config BAR */
2875 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2876 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2877 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2878 hw->mtu = ETHER_MTU;
2880 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2881 hw->rx_offset = NFP_NET_RX_OFFSET;
2883 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2885 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
2886 hw->ver, hw->max_mtu);
2887 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2888 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2889 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2890 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2891 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2892 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2893 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2894 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2895 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2896 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2897 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2898 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2902 hw->stride_rx = stride;
2903 hw->stride_tx = stride;
2905 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2906 hw->max_rx_queues, hw->max_tx_queues);
2908 /* Initializing spinlock for reconfigs */
2909 rte_spinlock_init(&hw->reconfig_lock);
2911 /* Allocating memory for mac addr */
2912 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2913 if (eth_dev->data->mac_addrs == NULL) {
2914 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2919 nfp_net_pf_read_mac(hwport0, port);
2920 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2922 nfp_net_vf_read_mac(hw);
2925 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
2926 /* Using random mac addresses for VFs */
2927 eth_random_addr(&hw->mac_addr[0]);
2928 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2931 /* Copying mac address to DPDK eth_dev struct */
2932 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2933 ð_dev->data->mac_addrs[0]);
2935 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2936 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2937 eth_dev->data->port_id, pci_dev->id.vendor_id,
2938 pci_dev->id.device_id,
2939 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2940 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2942 /* Registering LSC interrupt handler */
2943 rte_intr_callback_register(&pci_dev->intr_handle,
2944 nfp_net_dev_interrupt_handler,
2947 /* Telling the firmware about the LSC interrupt entry */
2948 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2950 /* Recording current stats counters values */
2951 nfp_net_stats_reset(eth_dev);
2957 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
2958 nfpu_desc_t *nfpu_desc, void **priv)
2960 struct rte_eth_dev *eth_dev;
2961 struct nfp_net_hw *hw;
2965 port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
2970 sprintf(port_name, "%s_port%d", dev->device.name, port);
2972 sprintf(port_name, "%s", dev->device.name);
2974 eth_dev = rte_eth_dev_allocate(port_name);
2979 *priv = rte_zmalloc(port_name,
2980 sizeof(struct nfp_net_adapter) * ports,
2981 RTE_CACHE_LINE_SIZE);
2983 rte_eth_dev_release_port(eth_dev);
2988 eth_dev->data->dev_private = *priv;
2991 * dev_private pointing to port0 dev_private because we need
2992 * to configure vNIC bars based on port0 at nfp_net_init.
2993 * Then dev_private is adjusted per port.
2995 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
2996 hw->nspu_desc = nfpu_desc->nspu;
2997 hw->nfpu_desc = nfpu_desc;
3000 hw->pf_multiport_enabled = 1;
3002 eth_dev->device = &dev->device;
3003 rte_eth_copy_pci_info(eth_dev, dev);
3005 ret = nfp_net_init(eth_dev);
3008 rte_eth_dev_release_port(eth_dev);
3010 rte_free(port_name);
3015 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3016 struct rte_pci_device *dev)
3018 nfpu_desc_t *nfpu_desc;
3019 nspu_desc_t *nspu_desc;
3020 uint64_t offset_symbol;
3021 uint8_t *bar_offset;
3031 nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0);
3035 if (nfpu_open(dev, nfpu_desc, 0) < 0) {
3037 "nfpu_open failed\n");
3041 nspu_desc = nfpu_desc->nspu;
3044 /* Check NSP ABI version */
3045 if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) {
3046 RTE_LOG(INFO, PMD, "NFP NSP not present\n");
3049 PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor);
3051 if ((major == 0) && (minor < 20)) {
3052 RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n");
3056 ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports",
3061 bar_offset = (uint8_t *)dev->mem_resource[0].addr;
3062 bar_offset += offset_symbol;
3063 total_ports = (uint32_t)*bar_offset;
3064 PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
3066 if (total_ports <= 0 || total_ports > 8) {
3067 RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
3072 for (i = 0; i < total_ports; i++) {
3073 ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv);
3081 nfpu_close(nfpu_desc);
3083 rte_free(nfpu_desc);
3088 int nfp_logtype_init;
3089 int nfp_logtype_driver;
3091 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3093 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3094 PCI_DEVICE_ID_NFP4000_PF_NIC)
3097 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3098 PCI_DEVICE_ID_NFP6000_PF_NIC)
3105 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3107 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3108 PCI_DEVICE_ID_NFP6000_VF_NIC)
3115 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3116 struct rte_pci_device *pci_dev)
3118 return rte_eth_dev_pci_generic_probe(pci_dev,
3119 sizeof(struct nfp_net_adapter), nfp_net_init);
3122 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3124 struct rte_eth_dev *eth_dev;
3125 struct nfp_net_hw *hw, *hwport0;
3128 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
3129 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
3130 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
3131 port = get_pf_port_number(eth_dev->data->name);
3132 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3133 hw = &hwport0[port];
3135 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3137 /* hotplug is not possible with multiport PF */
3138 if (hw->pf_multiport_enabled)
3140 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3143 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3144 .id_table = pci_id_nfp_pf_net_map,
3145 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3146 .probe = nfp_pf_pci_probe,
3147 .remove = eth_nfp_pci_remove,
3150 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3151 .id_table = pci_id_nfp_vf_net_map,
3152 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3153 .probe = eth_nfp_pci_probe,
3154 .remove = eth_nfp_pci_remove,
3157 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3158 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3159 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3160 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3161 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3162 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3164 RTE_INIT(nfp_init_log);
3168 nfp_logtype_init = rte_log_register("pmd.nfp.init");
3169 if (nfp_logtype_init >= 0)
3170 rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
3171 nfp_logtype_driver = rte_log_register("pmd.nfp.driver");
3172 if (nfp_logtype_driver >= 0)
3173 rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
3177 * c-file-style: "Linux"
3178 * indent-tabs-mode: t