2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
59 #include "nfp_net_pmd.h"
60 #include "nfp_net_logs.h"
61 #include "nfp_net_ctrl.h"
64 static void nfp_net_close(struct rte_eth_dev *dev);
65 static int nfp_net_configure(struct rte_eth_dev *dev);
66 static void nfp_net_dev_interrupt_handler(void *param);
67 static void nfp_net_dev_interrupt_delayed_handler(void *param);
68 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
69 static void nfp_net_infos_get(struct rte_eth_dev *dev,
70 struct rte_eth_dev_info *dev_info);
71 static int nfp_net_init(struct rte_eth_dev *eth_dev);
72 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
73 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
74 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
75 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
76 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
78 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
80 static void nfp_net_rx_queue_release(void *rxq);
81 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
82 uint16_t nb_desc, unsigned int socket_id,
83 const struct rte_eth_rxconf *rx_conf,
84 struct rte_mempool *mp);
85 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
86 static void nfp_net_tx_queue_release(void *txq);
87 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
88 uint16_t nb_desc, unsigned int socket_id,
89 const struct rte_eth_txconf *tx_conf);
90 static int nfp_net_start(struct rte_eth_dev *dev);
91 static void nfp_net_stats_get(struct rte_eth_dev *dev,
92 struct rte_eth_stats *stats);
93 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
94 static void nfp_net_stop(struct rte_eth_dev *dev);
95 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
99 * The offset of the queue controller queues in the PCIe Target. These
100 * happen to be at the same offset on the NFP6000 and the NFP3200 so
101 * we use a single macro here.
103 #define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff))
105 /* Maximum value which can be added to a queue with one transaction */
106 #define NFP_QCP_MAX_ADD 0x7f
108 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
109 (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
111 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
113 NFP_QCP_READ_PTR = 0,
118 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
119 * @q: Base address for queue structure
120 * @ptr: Add to the Read or Write pointer
121 * @val: Value to add to the queue pointer
123 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
126 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
130 if (ptr == NFP_QCP_READ_PTR)
131 off = NFP_QCP_QUEUE_ADD_RPTR;
133 off = NFP_QCP_QUEUE_ADD_WPTR;
135 while (val > NFP_QCP_MAX_ADD) {
136 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
137 val -= NFP_QCP_MAX_ADD;
140 nn_writel(rte_cpu_to_le_32(val), q + off);
144 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
145 * @q: Base address for queue structure
146 * @ptr: Read or Write pointer
148 static inline uint32_t
149 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
154 if (ptr == NFP_QCP_READ_PTR)
155 off = NFP_QCP_QUEUE_STS_LO;
157 off = NFP_QCP_QUEUE_STS_HI;
159 val = rte_cpu_to_le_32(nn_readl(q + off));
161 if (ptr == NFP_QCP_READ_PTR)
162 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
164 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
168 * Functions to read/write from/to Config BAR
169 * Performs any endian conversion necessary.
171 static inline uint8_t
172 nn_cfg_readb(struct nfp_net_hw *hw, int off)
174 return nn_readb(hw->ctrl_bar + off);
178 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
180 nn_writeb(val, hw->ctrl_bar + off);
183 static inline uint32_t
184 nn_cfg_readl(struct nfp_net_hw *hw, int off)
186 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
190 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
192 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
195 static inline uint64_t
196 nn_cfg_readq(struct nfp_net_hw *hw, int off)
198 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
202 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
204 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
208 * Atomically reads link status information from global structure rte_eth_dev.
211 * - Pointer to the structure rte_eth_dev to read from.
212 * - Pointer to the buffer to be saved with the link status.
215 * - On success, zero.
216 * - On failure, negative value.
219 nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
220 struct rte_eth_link *link)
222 struct rte_eth_link *dst = link;
223 struct rte_eth_link *src = &dev->data->dev_link;
225 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
226 *(uint64_t *)src) == 0)
233 * Atomically writes the link status information into global
234 * structure rte_eth_dev.
237 * - Pointer to the structure rte_eth_dev to read from.
238 * - Pointer to the buffer to be saved with the link status.
241 * - On success, zero.
242 * - On failure, negative value.
245 nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
246 struct rte_eth_link *link)
248 struct rte_eth_link *dst = &dev->data->dev_link;
249 struct rte_eth_link *src = link;
251 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
252 *(uint64_t *)src) == 0)
259 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
263 if (rxq->rxbufs == NULL)
266 for (i = 0; i < rxq->rx_count; i++) {
267 if (rxq->rxbufs[i].mbuf) {
268 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
269 rxq->rxbufs[i].mbuf = NULL;
275 nfp_net_rx_queue_release(void *rx_queue)
277 struct nfp_net_rxq *rxq = rx_queue;
280 nfp_net_rx_queue_release_mbufs(rxq);
281 rte_free(rxq->rxbufs);
287 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
289 nfp_net_rx_queue_release_mbufs(rxq);
295 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
299 if (txq->txbufs == NULL)
302 for (i = 0; i < txq->tx_count; i++) {
303 if (txq->txbufs[i].mbuf) {
304 rte_pktmbuf_free(txq->txbufs[i].mbuf);
305 txq->txbufs[i].mbuf = NULL;
311 nfp_net_tx_queue_release(void *tx_queue)
313 struct nfp_net_txq *txq = tx_queue;
316 nfp_net_tx_queue_release_mbufs(txq);
317 rte_free(txq->txbufs);
323 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
325 nfp_net_tx_queue_release_mbufs(txq);
331 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
335 struct timespec wait;
337 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
340 if (hw->qcp_cfg == NULL)
341 rte_panic("Bad configuration queue pointer\n");
343 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
346 wait.tv_nsec = 1000000;
348 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
350 /* Poll update field, waiting for NFP to ack the config */
351 for (cnt = 0; ; cnt++) {
352 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
355 if (new & NFP_NET_CFG_UPDATE_ERR) {
356 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
359 if (cnt >= NFP_NET_POLL_TIMEOUT) {
360 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
361 " %dms", update, cnt);
362 rte_panic("Exiting\n");
364 nanosleep(&wait, 0); /* waiting for a 1ms */
366 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
371 * Reconfigure the NIC
372 * @nn: device to reconfigure
373 * @ctrl: The value for the ctrl field in the BAR config
374 * @update: The value for the update field in the BAR config
376 * Write the update word to the BAR and ping the reconfig queue. Then poll
377 * until the firmware has acknowledged the update by zeroing the update word.
380 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
384 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
387 rte_spinlock_lock(&hw->reconfig_lock);
389 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
390 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
394 err = __nfp_net_reconfig(hw, update);
396 rte_spinlock_unlock(&hw->reconfig_lock);
402 * Reconfig errors imply situations where they can be handled.
403 * Otherwise, rte_panic is called inside __nfp_net_reconfig
405 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
411 * Configure an Ethernet device. This function must be invoked first
412 * before any other function in the Ethernet API. This function can
413 * also be re-invoked when a device is in the stopped state.
416 nfp_net_configure(struct rte_eth_dev *dev)
418 struct rte_eth_conf *dev_conf;
419 struct rte_eth_rxmode *rxmode;
420 struct rte_eth_txmode *txmode;
421 uint32_t new_ctrl = 0;
423 struct nfp_net_hw *hw;
425 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
428 * A DPDK app sends info about how many queues to use and how
429 * those queues need to be configured. This is used by the
430 * DPDK core and it makes sure no more queues than those
431 * advertised by the driver are requested. This function is
432 * called after that internal process
435 PMD_INIT_LOG(DEBUG, "Configure");
437 dev_conf = &dev->data->dev_conf;
438 rxmode = &dev_conf->rxmode;
439 txmode = &dev_conf->txmode;
441 /* Checking TX mode */
442 if (txmode->mq_mode) {
443 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
447 /* Checking RX mode */
448 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
449 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
450 update = NFP_NET_CFG_UPDATE_RSS;
451 new_ctrl = NFP_NET_CFG_CTRL_RSS;
453 PMD_INIT_LOG(INFO, "RSS not supported");
458 if (rxmode->split_hdr_size) {
459 PMD_INIT_LOG(INFO, "rxmode does not support split header");
463 if (rxmode->hw_ip_checksum) {
464 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
465 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
467 PMD_INIT_LOG(INFO, "RXCSUM not supported");
472 if (rxmode->hw_vlan_filter) {
473 PMD_INIT_LOG(INFO, "VLAN filter not supported");
477 if (rxmode->hw_vlan_strip) {
478 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
479 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
481 PMD_INIT_LOG(INFO, "hw vlan strip not supported");
486 if (rxmode->hw_vlan_extend) {
487 PMD_INIT_LOG(INFO, "VLAN extended not supported");
491 /* Supporting VLAN insertion by default */
492 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
493 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
495 if (rxmode->jumbo_frame)
496 /* this is handled in rte_eth_dev_configure */
498 if (rxmode->hw_strip_crc) {
499 PMD_INIT_LOG(INFO, "strip CRC not supported");
503 if (rxmode->enable_scatter) {
504 PMD_INIT_LOG(INFO, "Scatter not supported");
511 update |= NFP_NET_CFG_UPDATE_GEN;
513 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
514 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
523 nfp_net_enable_queues(struct rte_eth_dev *dev)
525 struct nfp_net_hw *hw;
526 uint64_t enabled_queues = 0;
529 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 /* Enabling the required TX queues in the device */
532 for (i = 0; i < dev->data->nb_tx_queues; i++)
533 enabled_queues |= (1 << i);
535 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
539 /* Enabling the required RX queues in the device */
540 for (i = 0; i < dev->data->nb_rx_queues; i++)
541 enabled_queues |= (1 << i);
543 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
547 nfp_net_disable_queues(struct rte_eth_dev *dev)
549 struct nfp_net_hw *hw;
550 uint32_t new_ctrl, update = 0;
552 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
555 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
557 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
558 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
559 NFP_NET_CFG_UPDATE_MSIX;
561 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
562 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
564 /* If an error when reconfig we avoid to change hw state */
565 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
572 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
576 for (i = 0; i < dev->data->nb_rx_queues; i++) {
577 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
584 nfp_net_params_setup(struct nfp_net_hw *hw)
586 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
587 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
591 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
593 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
596 #define ETH_ADDR_LEN 6
599 nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
603 for (i = 0; i < ETH_ADDR_LEN; i++)
604 dst[ETH_ADDR_LEN - i - 1] = src[i];
608 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
610 union eth_table_entry *entry;
614 entry = hw->eth_table;
616 /* Reading NFP ethernet table obtained before */
617 for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
618 if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
619 /* port not in use */
629 if (i == NSP_ETH_MAX_COUNT)
633 * hw points to port0 private data. We need hw now pointing to
637 nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
638 (uint8_t *)&entry->mac_addr);
644 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
648 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
649 memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
651 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
652 memcpy(&hw->mac_addr[4], &tmp, 2);
656 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
658 uint32_t mac0 = *(uint32_t *)mac;
661 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
664 mac1 = *(uint16_t *)mac;
665 nn_writew(rte_cpu_to_be_16(mac1),
666 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
670 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
671 struct rte_intr_handle *intr_handle)
673 struct nfp_net_hw *hw;
676 if (!intr_handle->intr_vec) {
677 intr_handle->intr_vec =
678 rte_zmalloc("intr_vec",
679 dev->data->nb_rx_queues * sizeof(int), 0);
680 if (!intr_handle->intr_vec) {
681 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
682 " intr_vec", dev->data->nb_rx_queues);
687 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
690 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
691 /* UIO just supports one queue and no LSC*/
692 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
693 intr_handle->intr_vec[0] = 0;
695 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
696 for (i = 0; i < dev->data->nb_rx_queues; i++) {
698 * The first msix vector is reserved for non
701 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
702 intr_handle->intr_vec[i] = i + 1;
703 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
704 intr_handle->intr_vec[i]);
708 /* Avoiding TX interrupts */
709 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
714 nfp_net_start(struct rte_eth_dev *dev)
716 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
717 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
718 uint32_t new_ctrl, update = 0;
719 struct nfp_net_hw *hw;
720 uint32_t intr_vector;
723 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
725 PMD_INIT_LOG(DEBUG, "Start");
727 /* Disabling queues just in case... */
728 nfp_net_disable_queues(dev);
730 /* Writing configuration parameters in the device */
731 nfp_net_params_setup(hw);
733 /* Enabling the required queues in the device */
734 nfp_net_enable_queues(dev);
736 /* check and configure queue intr-vector mapping */
737 if (dev->data->dev_conf.intr_conf.rxq != 0) {
738 if (hw->pf_multiport_enabled) {
739 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
740 "with NFP multiport PF");
743 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
745 * Better not to share LSC with RX interrupts.
746 * Unregistering LSC interrupt handler
748 rte_intr_callback_unregister(&pci_dev->intr_handle,
749 nfp_net_dev_interrupt_handler, (void *)dev);
751 if (dev->data->nb_rx_queues > 1) {
752 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
753 "supports 1 queue with UIO");
757 intr_vector = dev->data->nb_rx_queues;
758 if (rte_intr_efd_enable(intr_handle, intr_vector))
761 nfp_configure_rx_interrupt(dev, intr_handle);
762 update = NFP_NET_CFG_UPDATE_MSIX;
765 rte_intr_enable(intr_handle);
768 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
770 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
772 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
773 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
775 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
776 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
780 * Allocating rte mbuffs for configured rx queues.
781 * This requires queues being enabled before
783 if (nfp_net_rx_freelist_setup(dev) < 0) {
789 /* Configure the physical port up */
790 nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1);
798 * An error returned by this function should mean the app
799 * exiting and then the system releasing all the memory
800 * allocated even memory coming from hugepages.
802 * The device could be enabled at this point with some queues
803 * ready for getting packets. This is true if the call to
804 * nfp_net_rx_freelist_setup() succeeds for some queues but
805 * fails for subsequent queues.
807 * This should make the app exiting but better if we tell the
810 nfp_net_disable_queues(dev);
815 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
817 nfp_net_stop(struct rte_eth_dev *dev)
820 struct nfp_net_hw *hw;
822 PMD_INIT_LOG(DEBUG, "Stop");
824 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
826 nfp_net_disable_queues(dev);
829 for (i = 0; i < dev->data->nb_tx_queues; i++) {
830 nfp_net_reset_tx_queue(
831 (struct nfp_net_txq *)dev->data->tx_queues[i]);
834 for (i = 0; i < dev->data->nb_rx_queues; i++) {
835 nfp_net_reset_rx_queue(
836 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
840 /* Configure the physical port down */
841 nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0);
844 /* Reset and stop device. The device can not be restarted. */
846 nfp_net_close(struct rte_eth_dev *dev)
848 struct nfp_net_hw *hw;
849 struct rte_pci_device *pci_dev;
852 PMD_INIT_LOG(DEBUG, "Close");
854 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
858 * We assume that the DPDK application is stopping all the
859 * threads/queues before calling the device close function.
862 nfp_net_disable_queues(dev);
865 for (i = 0; i < dev->data->nb_tx_queues; i++) {
866 nfp_net_reset_tx_queue(
867 (struct nfp_net_txq *)dev->data->tx_queues[i]);
870 for (i = 0; i < dev->data->nb_rx_queues; i++) {
871 nfp_net_reset_rx_queue(
872 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
875 rte_intr_disable(&pci_dev->intr_handle);
876 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
878 /* unregister callback func from eal lib */
879 rte_intr_callback_unregister(&pci_dev->intr_handle,
880 nfp_net_dev_interrupt_handler,
884 * The ixgbe PMD driver disables the pcie master on the
885 * device. The i40e does not...
890 nfp_net_promisc_enable(struct rte_eth_dev *dev)
892 uint32_t new_ctrl, update = 0;
893 struct nfp_net_hw *hw;
895 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
897 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
900 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
904 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
905 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
909 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
910 update = NFP_NET_CFG_UPDATE_GEN;
913 * DPDK sets promiscuous mode on just after this call assuming
914 * it can not fail ...
916 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
923 nfp_net_promisc_disable(struct rte_eth_dev *dev)
925 uint32_t new_ctrl, update = 0;
926 struct nfp_net_hw *hw;
928 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
930 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
931 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
935 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
936 update = NFP_NET_CFG_UPDATE_GEN;
939 * DPDK sets promiscuous mode off just before this call
940 * assuming it can not fail ...
942 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
949 * return 0 means link status changed, -1 means not changed
951 * Wait to complete is needed as it can take up to 9 seconds to get the Link
955 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
957 struct nfp_net_hw *hw;
958 struct rte_eth_link link, old;
959 uint32_t nn_link_status;
961 static const uint32_t ls_to_ethtool[] = {
962 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
963 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
964 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
965 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
966 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
967 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
968 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
969 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
972 PMD_DRV_LOG(DEBUG, "Link update\n");
974 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
976 memset(&old, 0, sizeof(old));
977 nfp_net_dev_atomic_read_link_status(dev, &old);
979 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
981 memset(&link, 0, sizeof(struct rte_eth_link));
983 if (nn_link_status & NFP_NET_CFG_STS_LINK)
984 link.link_status = ETH_LINK_UP;
986 link.link_duplex = ETH_LINK_FULL_DUPLEX;
988 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
989 NFP_NET_CFG_STS_LINK_RATE_MASK;
991 if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
992 ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
993 (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
994 /* We really do not know the speed wil old firmware */
995 link.link_speed = ETH_SPEED_NUM_NONE;
997 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
998 link.link_speed = ETH_SPEED_NUM_NONE;
1000 link.link_speed = ls_to_ethtool[nn_link_status];
1003 if (old.link_status != link.link_status) {
1004 nfp_net_dev_atomic_write_link_status(dev, &link);
1005 if (link.link_status)
1006 PMD_DRV_LOG(INFO, "NIC Link is Up\n");
1008 PMD_DRV_LOG(INFO, "NIC Link is Down\n");
1016 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1019 struct nfp_net_hw *hw;
1020 struct rte_eth_stats nfp_dev_stats;
1022 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1024 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1026 /* reading per RX ring stats */
1027 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1028 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1031 nfp_dev_stats.q_ipackets[i] =
1032 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1034 nfp_dev_stats.q_ipackets[i] -=
1035 hw->eth_stats_base.q_ipackets[i];
1037 nfp_dev_stats.q_ibytes[i] =
1038 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1040 nfp_dev_stats.q_ibytes[i] -=
1041 hw->eth_stats_base.q_ibytes[i];
1044 /* reading per TX ring stats */
1045 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1046 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1049 nfp_dev_stats.q_opackets[i] =
1050 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1052 nfp_dev_stats.q_opackets[i] -=
1053 hw->eth_stats_base.q_opackets[i];
1055 nfp_dev_stats.q_obytes[i] =
1056 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1058 nfp_dev_stats.q_obytes[i] -=
1059 hw->eth_stats_base.q_obytes[i];
1062 nfp_dev_stats.ipackets =
1063 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1065 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1067 nfp_dev_stats.ibytes =
1068 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1070 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1072 nfp_dev_stats.opackets =
1073 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1075 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1077 nfp_dev_stats.obytes =
1078 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1080 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1082 /* reading general device stats */
1083 nfp_dev_stats.ierrors =
1084 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1086 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1088 nfp_dev_stats.oerrors =
1089 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1091 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1093 /* RX ring mbuf allocation failures */
1094 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1096 nfp_dev_stats.imissed =
1097 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1099 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1102 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1106 nfp_net_stats_reset(struct rte_eth_dev *dev)
1109 struct nfp_net_hw *hw;
1111 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1114 * hw->eth_stats_base records the per counter starting point.
1115 * Lets update it now
1118 /* reading per RX ring stats */
1119 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1120 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1123 hw->eth_stats_base.q_ipackets[i] =
1124 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1126 hw->eth_stats_base.q_ibytes[i] =
1127 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1130 /* reading per TX ring stats */
1131 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1132 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1135 hw->eth_stats_base.q_opackets[i] =
1136 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1138 hw->eth_stats_base.q_obytes[i] =
1139 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1142 hw->eth_stats_base.ipackets =
1143 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1145 hw->eth_stats_base.ibytes =
1146 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1148 hw->eth_stats_base.opackets =
1149 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1151 hw->eth_stats_base.obytes =
1152 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1154 /* reading general device stats */
1155 hw->eth_stats_base.ierrors =
1156 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1158 hw->eth_stats_base.oerrors =
1159 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1161 /* RX ring mbuf allocation failures */
1162 dev->data->rx_mbuf_alloc_failed = 0;
1164 hw->eth_stats_base.imissed =
1165 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1169 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1171 struct nfp_net_hw *hw;
1173 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1176 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1177 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1178 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1179 dev_info->max_rx_pktlen = hw->mtu;
1180 /* Next should change when PF support is implemented */
1181 dev_info->max_mac_addrs = 1;
1183 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1184 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1186 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1187 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1188 DEV_RX_OFFLOAD_UDP_CKSUM |
1189 DEV_RX_OFFLOAD_TCP_CKSUM;
1191 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1192 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1194 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1195 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1196 DEV_TX_OFFLOAD_UDP_CKSUM |
1197 DEV_TX_OFFLOAD_TCP_CKSUM;
1199 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1201 .pthresh = DEFAULT_RX_PTHRESH,
1202 .hthresh = DEFAULT_RX_HTHRESH,
1203 .wthresh = DEFAULT_RX_WTHRESH,
1205 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1209 dev_info->default_txconf = (struct rte_eth_txconf) {
1211 .pthresh = DEFAULT_TX_PTHRESH,
1212 .hthresh = DEFAULT_TX_HTHRESH,
1213 .wthresh = DEFAULT_TX_WTHRESH,
1215 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1216 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1217 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1218 ETH_TXQ_FLAGS_NOOFFLOADS,
1221 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1222 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1224 dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
1225 ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
1226 ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
1228 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
1229 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1232 static const uint32_t *
1233 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1235 static const uint32_t ptypes[] = {
1236 /* refers to nfp_net_set_hash() */
1237 RTE_PTYPE_INNER_L3_IPV4,
1238 RTE_PTYPE_INNER_L3_IPV6,
1239 RTE_PTYPE_INNER_L3_IPV6_EXT,
1240 RTE_PTYPE_INNER_L4_MASK,
1244 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1250 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1252 struct nfp_net_rxq *rxq;
1253 struct nfp_net_rx_desc *rxds;
1257 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1264 * Other PMDs are just checking the DD bit in intervals of 4
1265 * descriptors and counting all four if the first has the DD
1266 * bit on. Of course, this is not accurate but can be good for
1267 * performance. But ideally that should be done in descriptors
1268 * chunks belonging to the same cache line
1271 while (count < rxq->rx_count) {
1272 rxds = &rxq->rxds[idx];
1273 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1280 if ((idx) == rxq->rx_count)
1288 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1290 struct rte_pci_device *pci_dev;
1291 struct nfp_net_hw *hw;
1294 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1295 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1297 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1300 /* Make sure all updates are written before un-masking */
1302 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1303 NFP_NET_CFG_ICR_UNMASKED);
1308 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1310 struct rte_pci_device *pci_dev;
1311 struct nfp_net_hw *hw;
1314 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1317 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1320 /* Make sure all updates are written before un-masking */
1322 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1327 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1329 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1330 struct rte_eth_link link;
1332 memset(&link, 0, sizeof(link));
1333 nfp_net_dev_atomic_read_link_status(dev, &link);
1334 if (link.link_status)
1335 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1336 (int)(dev->data->port_id), (unsigned)link.link_speed,
1337 link.link_duplex == ETH_LINK_FULL_DUPLEX
1338 ? "full-duplex" : "half-duplex");
1340 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1341 (int)(dev->data->port_id));
1343 RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1344 pci_dev->addr.domain, pci_dev->addr.bus,
1345 pci_dev->addr.devid, pci_dev->addr.function);
1348 /* Interrupt configuration and handling */
1351 * nfp_net_irq_unmask - Unmask an interrupt
1353 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1354 * clear the ICR for the entry.
1357 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1359 struct nfp_net_hw *hw;
1360 struct rte_pci_device *pci_dev;
1362 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1363 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1365 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1366 /* If MSI-X auto-masking is used, clear the entry */
1368 rte_intr_enable(&pci_dev->intr_handle);
1370 /* Make sure all updates are written before un-masking */
1372 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1373 NFP_NET_CFG_ICR_UNMASKED);
1378 nfp_net_dev_interrupt_handler(void *param)
1381 struct rte_eth_link link;
1382 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1384 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1386 /* get the link status */
1387 memset(&link, 0, sizeof(link));
1388 nfp_net_dev_atomic_read_link_status(dev, &link);
1390 nfp_net_link_update(dev, 0);
1393 if (!link.link_status) {
1394 /* handle it 1 sec later, wait it being stable */
1395 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1396 /* likely to down */
1398 /* handle it 4 sec later, wait it being stable */
1399 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1402 if (rte_eal_alarm_set(timeout * 1000,
1403 nfp_net_dev_interrupt_delayed_handler,
1405 RTE_LOG(ERR, PMD, "Error setting alarm");
1407 nfp_net_irq_unmask(dev);
1412 * Interrupt handler which shall be registered for alarm callback for delayed
1413 * handling specific interrupt to wait for the stable nic state. As the NIC
1414 * interrupt state is not stable for nfp after link is just down, it needs
1415 * to wait 4 seconds to get the stable status.
1417 * @param handle Pointer to interrupt handle.
1418 * @param param The address of parameter (struct rte_eth_dev *)
1423 nfp_net_dev_interrupt_delayed_handler(void *param)
1425 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1427 nfp_net_link_update(dev, 0);
1428 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
1430 nfp_net_dev_link_status_print(dev);
1433 nfp_net_irq_unmask(dev);
1437 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1439 struct nfp_net_hw *hw;
1441 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1443 /* check that mtu is within the allowed range */
1444 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1447 /* switch to jumbo mode if needed */
1448 if ((uint32_t)mtu > ETHER_MAX_LEN)
1449 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1451 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1453 /* update max frame size */
1454 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1456 /* writing to configuration space */
1457 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1465 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1466 uint16_t queue_idx, uint16_t nb_desc,
1467 unsigned int socket_id,
1468 const struct rte_eth_rxconf *rx_conf,
1469 struct rte_mempool *mp)
1471 const struct rte_memzone *tz;
1472 struct nfp_net_rxq *rxq;
1473 struct nfp_net_hw *hw;
1475 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1477 PMD_INIT_FUNC_TRACE();
1479 /* Validating number of descriptors */
1480 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1481 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1482 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1483 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1488 * Free memory prior to re-allocation if needed. This is the case after
1489 * calling nfp_net_stop
1491 if (dev->data->rx_queues[queue_idx]) {
1492 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1493 dev->data->rx_queues[queue_idx] = NULL;
1496 /* Allocating rx queue data structure */
1497 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1498 RTE_CACHE_LINE_SIZE, socket_id);
1502 /* Hw queues mapping based on firmware confifguration */
1503 rxq->qidx = queue_idx;
1504 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1505 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1506 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1507 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1510 * Tracking mbuf size for detecting a potential mbuf overflow due to
1514 rxq->mbuf_size = rxq->mem_pool->elt_size;
1515 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1516 hw->flbufsz = rxq->mbuf_size;
1518 rxq->rx_count = nb_desc;
1519 rxq->port_id = dev->data->port_id;
1520 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1521 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1523 rxq->drop_en = rx_conf->rx_drop_en;
1526 * Allocate RX ring hardware descriptors. A memzone large enough to
1527 * handle the maximum ring size is allocated in order to allow for
1528 * resizing in later calls to the queue setup function.
1530 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1531 sizeof(struct nfp_net_rx_desc) *
1532 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1536 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1537 nfp_net_rx_queue_release(rxq);
1541 /* Saving physical and virtual addresses for the RX ring */
1542 rxq->dma = (uint64_t)tz->phys_addr;
1543 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1545 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1546 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1547 sizeof(*rxq->rxbufs) * nb_desc,
1548 RTE_CACHE_LINE_SIZE, socket_id);
1549 if (rxq->rxbufs == NULL) {
1550 nfp_net_rx_queue_release(rxq);
1554 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1555 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1557 nfp_net_reset_rx_queue(rxq);
1559 dev->data->rx_queues[queue_idx] = rxq;
1563 * Telling the HW about the physical address of the RX ring and number
1564 * of descriptors in log2 format
1566 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1567 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1573 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1575 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1579 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1582 for (i = 0; i < rxq->rx_count; i++) {
1583 struct nfp_net_rx_desc *rxd;
1584 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1587 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1588 (unsigned)rxq->qidx);
1592 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1594 rxd = &rxq->rxds[i];
1596 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1597 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1599 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1602 /* Make sure all writes are flushed before telling the hardware */
1605 /* Not advertising the whole ring as the firmware gets confused if so */
1606 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1609 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1615 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1616 uint16_t nb_desc, unsigned int socket_id,
1617 const struct rte_eth_txconf *tx_conf)
1619 const struct rte_memzone *tz;
1620 struct nfp_net_txq *txq;
1621 uint16_t tx_free_thresh;
1622 struct nfp_net_hw *hw;
1624 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1626 PMD_INIT_FUNC_TRACE();
1628 /* Validating number of descriptors */
1629 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1630 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1631 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1632 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1636 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1637 tx_conf->tx_free_thresh :
1638 DEFAULT_TX_FREE_THRESH);
1640 if (tx_free_thresh > (nb_desc)) {
1642 "tx_free_thresh must be less than the number of TX "
1643 "descriptors. (tx_free_thresh=%u port=%d "
1644 "queue=%d)\n", (unsigned int)tx_free_thresh,
1645 (int)dev->data->port_id, (int)queue_idx);
1650 * Free memory prior to re-allocation if needed. This is the case after
1651 * calling nfp_net_stop
1653 if (dev->data->tx_queues[queue_idx]) {
1654 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1656 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1657 dev->data->tx_queues[queue_idx] = NULL;
1660 /* Allocating tx queue data structure */
1661 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1662 RTE_CACHE_LINE_SIZE, socket_id);
1664 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1669 * Allocate TX ring hardware descriptors. A memzone large enough to
1670 * handle the maximum ring size is allocated in order to allow for
1671 * resizing in later calls to the queue setup function.
1673 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1674 sizeof(struct nfp_net_tx_desc) *
1675 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1678 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1679 nfp_net_tx_queue_release(txq);
1683 txq->tx_count = nb_desc;
1684 txq->tx_free_thresh = tx_free_thresh;
1685 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1686 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1687 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1689 /* queue mapping based on firmware configuration */
1690 txq->qidx = queue_idx;
1691 txq->tx_qcidx = queue_idx * hw->stride_tx;
1692 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1694 txq->port_id = dev->data->port_id;
1695 txq->txq_flags = tx_conf->txq_flags;
1697 /* Saving physical and virtual addresses for the TX ring */
1698 txq->dma = (uint64_t)tz->phys_addr;
1699 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1701 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1702 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1703 sizeof(*txq->txbufs) * nb_desc,
1704 RTE_CACHE_LINE_SIZE, socket_id);
1705 if (txq->txbufs == NULL) {
1706 nfp_net_tx_queue_release(txq);
1709 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1710 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1712 nfp_net_reset_tx_queue(txq);
1714 dev->data->tx_queues[queue_idx] = txq;
1718 * Telling the HW about the physical address of the TX ring and number
1719 * of descriptors in log2 format
1721 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1722 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1727 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1729 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1730 struct rte_mbuf *mb)
1733 struct nfp_net_hw *hw = txq->hw;
1735 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
1738 ol_flags = mb->ol_flags;
1740 if (!(ol_flags & PKT_TX_TCP_SEG))
1743 txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
1744 txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
1745 txd->flags = PCIE_DESC_TX_LSO;
1754 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1756 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1757 struct rte_mbuf *mb)
1760 struct nfp_net_hw *hw = txq->hw;
1762 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1765 ol_flags = mb->ol_flags;
1767 /* IPv6 does not need checksum */
1768 if (ol_flags & PKT_TX_IP_CKSUM)
1769 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1771 switch (ol_flags & PKT_TX_L4_MASK) {
1772 case PKT_TX_UDP_CKSUM:
1773 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1775 case PKT_TX_TCP_CKSUM:
1776 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1780 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1781 txd->flags |= PCIE_DESC_TX_CSUM;
1784 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1786 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1787 struct rte_mbuf *mb)
1789 struct nfp_net_hw *hw = rxq->hw;
1791 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1794 /* If IPv4 and IP checksum error, fail */
1795 if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1796 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1797 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1799 /* If neither UDP nor TCP return */
1800 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1801 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1804 if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1805 !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1806 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1808 if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1809 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1810 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1813 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1814 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1816 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1819 * nfp_net_set_hash - Set mbuf hash data
1821 * The RSS hash and hash-type are pre-pended to the packet data.
1822 * Extract and decode it and set the mbuf fields.
1825 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1826 struct rte_mbuf *mbuf)
1828 struct nfp_net_hw *hw = rxq->hw;
1829 uint8_t *meta_offset;
1832 uint32_t hash_type = 0;
1834 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1837 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
1838 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1841 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1842 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1844 } else if (NFP_DESC_META_LEN(rxd)) {
1847 * <---- 32 bit ----->
1852 * ====================
1855 * Field type word contains up to 8 4bit field types
1856 * A 4bit field type refers to a data field word
1857 * A data field word can have several 4bit field types
1859 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1860 meta_offset -= NFP_DESC_META_LEN(rxd);
1861 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1863 /* NFP PMD just supports metadata for hashing */
1864 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1865 case NFP_NET_META_HASH:
1866 /* next field type is about the hash type */
1867 meta_info >>= NFP_NET_META_FIELD_SIZE;
1868 /* hash value is in the data field */
1869 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1870 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1873 /* Unsupported metadata can be a performance issue */
1880 mbuf->hash.rss = hash;
1881 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1883 switch (hash_type) {
1884 case NFP_NET_RSS_IPV4:
1885 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1887 case NFP_NET_RSS_IPV6:
1888 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1890 case NFP_NET_RSS_IPV6_EX:
1891 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1894 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1899 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1901 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1904 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1909 * There are some decissions to take:
1910 * 1) How to check DD RX descriptors bit
1911 * 2) How and when to allocate new mbufs
1913 * Current implementation checks just one single DD bit each loop. As each
1914 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1915 * a single cache line instead. Tests with this change have not shown any
1916 * performance improvement but it requires further investigation. For example,
1917 * depending on which descriptor is next, the number of descriptors could be
1918 * less than 8 for just checking those in the same cache line. This implies
1919 * extra work which could be counterproductive by itself. Indeed, last firmware
1920 * changes are just doing this: writing several descriptors with the DD bit
1921 * for saving PCIe bandwidth and DMA operations from the NFP.
1923 * Mbuf allocation is done when a new packet is received. Then the descriptor
1924 * is automatically linked with the new mbuf and the old one is given to the
1925 * user. The main drawback with this design is mbuf allocation is heavier than
1926 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1927 * cache point of view it does not seem allocating the mbuf early on as we are
1928 * doing now have any benefit at all. Again, tests with this change have not
1929 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1930 * so looking at the implications of this type of allocation should be studied
1935 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1937 struct nfp_net_rxq *rxq;
1938 struct nfp_net_rx_desc *rxds;
1939 struct nfp_net_rx_buff *rxb;
1940 struct nfp_net_hw *hw;
1941 struct rte_mbuf *mb;
1942 struct rte_mbuf *new_mb;
1948 if (unlikely(rxq == NULL)) {
1950 * DPDK just checks the queue is lower than max queues
1951 * enabled. But the queue needs to be configured
1953 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
1961 while (avail < nb_pkts) {
1962 rxb = &rxq->rxbufs[rxq->rd_p];
1963 if (unlikely(rxb == NULL)) {
1964 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
1969 * Memory barrier to ensure that we won't do other
1970 * reads before the DD bit.
1974 rxds = &rxq->rxds[rxq->rd_p];
1975 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1979 * We got a packet. Let's alloc a new mbuff for refilling the
1980 * free descriptor ring as soon as possible
1982 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
1983 if (unlikely(new_mb == NULL)) {
1984 RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
1985 "queue_id=%u\n", (unsigned)rxq->port_id,
1986 (unsigned)rxq->qidx);
1987 nfp_net_mbuf_alloc_failed(rxq);
1994 * Grab the mbuff and refill the descriptor with the
1995 * previously allocated mbuff
2000 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
2001 rxds->rxd.data_len, rxq->mbuf_size);
2003 /* Size of this segment */
2004 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2005 /* Size of the whole packet. We just support 1 segment */
2006 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2008 if (unlikely((mb->data_len + hw->rx_offset) >
2011 * This should not happen and the user has the
2012 * responsibility of avoiding it. But we have
2013 * to give some info about the error
2015 RTE_LOG_DP(ERR, PMD,
2016 "mbuf overflow likely due to the RX offset.\n"
2017 "\t\tYour mbuf size should have extra space for"
2018 " RX offset=%u bytes.\n"
2019 "\t\tCurrently you just have %u bytes available"
2020 " but the received packet is %u bytes long",
2022 rxq->mbuf_size - hw->rx_offset,
2027 /* Filling the received mbuff with packet info */
2029 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2031 mb->data_off = RTE_PKTMBUF_HEADROOM +
2032 NFP_DESC_META_LEN(rxds);
2034 /* No scatter mode supported */
2038 /* Checking the RSS flag */
2039 nfp_net_set_hash(rxq, rxds, mb);
2041 /* Checking the checksum flag */
2042 nfp_net_rx_cksum(rxq, rxds, mb);
2044 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2045 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2046 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2047 mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
2050 /* Adding the mbuff to the mbuff array passed by the app */
2051 rx_pkts[avail++] = mb;
2053 /* Now resetting and updating the descriptor */
2056 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2058 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2059 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2062 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2069 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
2070 (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
2072 nb_hold += rxq->nb_rx_hold;
2075 * FL descriptors needs to be written before incrementing the
2076 * FL queue WR pointer
2079 if (nb_hold > rxq->rx_free_thresh) {
2080 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
2081 (unsigned)rxq->port_id, (unsigned)rxq->qidx,
2082 (unsigned)nb_hold, (unsigned)avail);
2083 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2086 rxq->nb_rx_hold = nb_hold;
2092 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2094 * @txq: TX queue to work with
2095 * Returns number of descriptors freed
2098 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2103 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2104 " status\n", txq->qidx);
2106 /* Work out how many packets have been sent */
2107 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2109 if (qcp_rd_p == txq->rd_p) {
2110 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2111 "packets (%u, %u)\n", txq->qidx,
2112 qcp_rd_p, txq->rd_p);
2116 if (qcp_rd_p > txq->rd_p)
2117 todo = qcp_rd_p - txq->rd_p;
2119 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2121 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
2122 qcp_rd_p, txq->rd_p, txq->rd_p);
2128 if (unlikely(txq->rd_p >= txq->tx_count))
2129 txq->rd_p -= txq->tx_count;
2134 /* Leaving always free descriptors for avoiding wrapping confusion */
2136 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2138 if (txq->wr_p >= txq->rd_p)
2139 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2141 return txq->rd_p - txq->wr_p - 8;
2145 * nfp_net_txq_full - Check if the TX queue free descriptors
2146 * is below tx_free_threshold
2148 * @txq: TX queue to check
2150 * This function uses the host copy* of read/write pointers
2153 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2155 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2159 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2161 struct nfp_net_txq *txq;
2162 struct nfp_net_hw *hw;
2163 struct nfp_net_tx_desc *txds, txd;
2164 struct rte_mbuf *pkt;
2166 int pkt_size, dma_size;
2167 uint16_t free_descs, issued_descs;
2168 struct rte_mbuf **lmbuf;
2173 txds = &txq->txds[txq->wr_p];
2175 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
2176 txq->qidx, txq->wr_p, nb_pkts);
2178 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2179 nfp_net_tx_free_bufs(txq);
2181 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2182 if (unlikely(free_descs == 0))
2189 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
2190 txq->qidx, nb_pkts);
2191 /* Sending packets */
2192 while ((i < nb_pkts) && free_descs) {
2193 /* Grabbing the mbuf linked to the current descriptor */
2194 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2195 /* Warming the cache for releasing the mbuf later on */
2196 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2198 pkt = *(tx_pkts + i);
2200 if (unlikely((pkt->nb_segs > 1) &&
2201 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2202 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2203 rte_panic("Multisegment packet unsupported\n");
2206 /* Checking if we have enough descriptors */
2207 if (unlikely(pkt->nb_segs > free_descs))
2211 * Checksum and VLAN flags just in the first descriptor for a
2212 * multisegment packet, but TSO info needs to be in all of them.
2214 txd.data_len = pkt->pkt_len;
2215 nfp_net_tx_tso(txq, &txd, pkt);
2216 nfp_net_tx_cksum(txq, &txd, pkt);
2218 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2219 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2220 txd.flags |= PCIE_DESC_TX_VLAN;
2221 txd.vlan = pkt->vlan_tci;
2225 * mbuf data_len is the data in one segment and pkt_len data
2226 * in the whole packet. When the packet is just one segment,
2227 * then data_len = pkt_len
2229 pkt_size = pkt->pkt_len;
2232 /* Copying TSO, VLAN and cksum info */
2235 /* Releasing mbuf used by this descriptor previously*/
2237 rte_pktmbuf_free_seg(*lmbuf);
2240 * Linking mbuf with descriptor for being released
2241 * next time descriptor is used
2245 dma_size = pkt->data_len;
2246 dma_addr = rte_mbuf_data_dma_addr(pkt);
2247 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2248 "%" PRIx64 "\n", dma_addr);
2250 /* Filling descriptors fields */
2251 txds->dma_len = dma_size;
2252 txds->data_len = txd.data_len;
2253 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2254 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2255 ASSERT(free_descs > 0);
2259 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2262 pkt_size -= dma_size;
2265 txds->offset_eop |= PCIE_DESC_TX_EOP;
2267 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
2270 /* Referencing next free TX descriptor */
2271 txds = &txq->txds[txq->wr_p];
2272 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2279 /* Increment write pointers. Force memory write before we let HW know */
2281 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2287 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2289 uint32_t new_ctrl, update;
2290 struct nfp_net_hw *hw;
2292 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2295 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2296 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2297 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2298 " ETH_VLAN_EXTEND_OFFLOAD");
2300 /* Enable vlan strip if it is not configured yet */
2301 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2302 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2303 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2305 /* Disable vlan strip just if it is configured */
2306 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2307 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2308 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2313 update = NFP_NET_CFG_UPDATE_GEN;
2315 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
2318 hw->ctrl = new_ctrl;
2321 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2323 nfp_net_reta_update(struct rte_eth_dev *dev,
2324 struct rte_eth_rss_reta_entry64 *reta_conf,
2327 uint32_t reta, mask;
2331 struct nfp_net_hw *hw =
2332 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2334 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2337 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2338 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2339 "(%d) doesn't match the number hardware can supported "
2340 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2345 * Update Redirection Table. There are 128 8bit-entries which can be
2346 * manage as 32 32bit-entries
2348 for (i = 0; i < reta_size; i += 4) {
2349 /* Handling 4 RSS entries per loop */
2350 idx = i / RTE_RETA_GROUP_SIZE;
2351 shift = i % RTE_RETA_GROUP_SIZE;
2352 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2358 /* If all 4 entries were set, don't need read RETA register */
2360 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2362 for (j = 0; j < 4; j++) {
2363 if (!(mask & (0x1 << j)))
2366 /* Clearing the entry bits */
2367 reta &= ~(0xFF << (8 * j));
2368 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2370 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2374 update = NFP_NET_CFG_UPDATE_RSS;
2376 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2382 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2384 nfp_net_reta_query(struct rte_eth_dev *dev,
2385 struct rte_eth_rss_reta_entry64 *reta_conf,
2391 struct nfp_net_hw *hw;
2393 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2395 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2398 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2399 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2400 "(%d) doesn't match the number hardware can supported "
2401 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2406 * Reading Redirection Table. There are 128 8bit-entries which can be
2407 * manage as 32 32bit-entries
2409 for (i = 0; i < reta_size; i += 4) {
2410 /* Handling 4 RSS entries per loop */
2411 idx = i / RTE_RETA_GROUP_SIZE;
2412 shift = i % RTE_RETA_GROUP_SIZE;
2413 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2418 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2420 for (j = 0; j < 4; j++) {
2421 if (!(mask & (0x1 << j)))
2423 reta_conf->reta[shift + j] =
2424 (uint8_t)((reta >> (8 * j)) & 0xF);
2431 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2432 struct rte_eth_rss_conf *rss_conf)
2435 uint32_t cfg_rss_ctrl = 0;
2439 struct nfp_net_hw *hw;
2441 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2443 rss_hf = rss_conf->rss_hf;
2445 /* Checking if RSS is enabled */
2446 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2447 if (rss_hf != 0) { /* Enable RSS? */
2448 RTE_LOG(ERR, PMD, "RSS unsupported\n");
2451 return 0; /* Nothing to do */
2454 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2455 RTE_LOG(ERR, PMD, "hash key too long\n");
2459 if (rss_hf & ETH_RSS_IPV4)
2460 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
2461 NFP_NET_CFG_RSS_IPV4_TCP |
2462 NFP_NET_CFG_RSS_IPV4_UDP;
2464 if (rss_hf & ETH_RSS_IPV6)
2465 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
2466 NFP_NET_CFG_RSS_IPV6_TCP |
2467 NFP_NET_CFG_RSS_IPV6_UDP;
2469 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2470 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2472 /* configuring where to apply the RSS hash */
2473 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2475 /* Writing the key byte a byte */
2476 for (i = 0; i < rss_conf->rss_key_len; i++) {
2477 memcpy(&key, &rss_conf->rss_key[i], 1);
2478 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2481 /* Writing the key size */
2482 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2484 update = NFP_NET_CFG_UPDATE_RSS;
2486 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2493 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2494 struct rte_eth_rss_conf *rss_conf)
2497 uint32_t cfg_rss_ctrl;
2500 struct nfp_net_hw *hw;
2502 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2504 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2507 rss_hf = rss_conf->rss_hf;
2508 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2510 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2511 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2513 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2514 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2516 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2517 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2519 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2520 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2522 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2523 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2525 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2526 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2528 /* Reading the key size */
2529 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2531 /* Reading the key byte a byte */
2532 for (i = 0; i < rss_conf->rss_key_len; i++) {
2533 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2534 memcpy(&rss_conf->rss_key[i], &key, 1);
2540 /* Initialise and register driver with DPDK Application */
2541 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2542 .dev_configure = nfp_net_configure,
2543 .dev_start = nfp_net_start,
2544 .dev_stop = nfp_net_stop,
2545 .dev_close = nfp_net_close,
2546 .promiscuous_enable = nfp_net_promisc_enable,
2547 .promiscuous_disable = nfp_net_promisc_disable,
2548 .link_update = nfp_net_link_update,
2549 .stats_get = nfp_net_stats_get,
2550 .stats_reset = nfp_net_stats_reset,
2551 .dev_infos_get = nfp_net_infos_get,
2552 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2553 .mtu_set = nfp_net_dev_mtu_set,
2554 .vlan_offload_set = nfp_net_vlan_offload_set,
2555 .reta_update = nfp_net_reta_update,
2556 .reta_query = nfp_net_reta_query,
2557 .rss_hash_update = nfp_net_rss_hash_update,
2558 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2559 .rx_queue_setup = nfp_net_rx_queue_setup,
2560 .rx_queue_release = nfp_net_rx_queue_release,
2561 .rx_queue_count = nfp_net_rx_queue_count,
2562 .tx_queue_setup = nfp_net_tx_queue_setup,
2563 .tx_queue_release = nfp_net_tx_queue_release,
2564 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2565 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2569 * All eth_dev created got its private data, but before nfp_net_init, that
2570 * private data is referencing private data for all the PF ports. This is due
2571 * to how the vNIC bars are mapped based on first port, so all ports need info
2572 * about port 0 private data. Inside nfp_net_init the private data pointer is
2573 * changed to the right address for each port once the bars have been mapped.
2575 * This functions helps to find out which port and therefore which offset
2576 * inside the private data array to use.
2579 get_pf_port_number(char *name)
2581 char *pf_str = name;
2584 while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2589 * This should not happen at all and it would mean major
2590 * implementation fault.
2592 rte_panic("nfp_net: problem with pf device name\n");
2594 /* Expecting _portX with X within [0,7] */
2597 return (int)strtol(pf_str, NULL, 10);
2601 nfp_net_init(struct rte_eth_dev *eth_dev)
2603 struct rte_pci_device *pci_dev;
2604 struct nfp_net_hw *hw, *hwport0;
2606 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2610 nspu_desc_t *nspu_desc = NULL;
2611 uint64_t bar_offset;
2614 PMD_INIT_FUNC_TRACE();
2616 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2618 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2619 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2620 port = get_pf_port_number(eth_dev->data->name);
2621 if (port < 0 || port > 7) {
2622 RTE_LOG(ERR, PMD, "Port value is wrong\n");
2626 PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
2628 /* This points to port 0 private data */
2629 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2631 /* This points to the specific port private data */
2632 hw = &hwport0[port];
2633 hw->pf_port_idx = port;
2635 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2639 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2640 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2641 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2643 /* For secondary processes, the primary has done all the work */
2644 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2647 rte_eth_copy_pci_info(eth_dev, pci_dev);
2648 /* hotplug is not possible with multiport PF */
2649 if (!hw->pf_multiport_enabled)
2650 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
2652 hw->device_id = pci_dev->id.device_id;
2653 hw->vendor_id = pci_dev->id.vendor_id;
2654 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2655 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2657 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2658 pci_dev->id.vendor_id, pci_dev->id.device_id,
2659 pci_dev->addr.domain, pci_dev->addr.bus,
2660 pci_dev->addr.devid, pci_dev->addr.function);
2662 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2663 if (hw->ctrl_bar == NULL) {
2665 "hw->ctrl_bar is NULL. BAR0 not configured\n");
2669 if (hw->is_pf && port == 0) {
2670 nspu_desc = hw->nspu_desc;
2672 if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) {
2674 * A firmware should be there after PF probe so this
2675 * should not happen.
2677 RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n");
2681 /* vNIC PF control BAR is a subset of PF PCI device BAR */
2682 hw->ctrl_bar += bar_offset;
2683 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2687 if (!hwport0->ctrl_bar)
2690 /* address based on port0 offset */
2691 hw->ctrl_bar = hwport0->ctrl_bar +
2692 (port * NFP_PF_CSR_SLICE_SIZE);
2695 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2697 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2698 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2700 /* Work out where in the BAR the queues start. */
2701 switch (pci_dev->id.device_id) {
2702 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2703 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2704 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2705 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2706 tx_bar_off = NFP_PCIE_QUEUE(start_q);
2707 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2708 rx_bar_off = NFP_PCIE_QUEUE(start_q);
2711 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2715 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
2716 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
2718 if (hw->is_pf && port == 0) {
2719 /* configure access to tx/rx vNIC BARs */
2720 nfp_nsp_map_queues_bar(nspu_desc, &bar_offset);
2721 PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n",
2723 hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr;
2725 /* vNIC PF tx/rx BARs are a subset of PF PCI device */
2726 hwport0->hw_queues += bar_offset;
2728 /* Lets seize the chance to read eth table from hw */
2729 if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
2734 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2735 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2736 eth_dev->data->dev_private = hw;
2738 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2740 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2744 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2745 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2747 nfp_net_cfg_queue_setup(hw);
2749 /* Get some of the read-only fields from the config BAR */
2750 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2751 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2752 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2753 hw->mtu = hw->max_mtu;
2755 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2756 hw->rx_offset = NFP_NET_RX_OFFSET;
2758 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2760 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
2761 hw->ver, hw->max_mtu);
2762 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
2763 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2764 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2765 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2766 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2767 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2768 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2769 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2770 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2771 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2775 hw->stride_rx = stride;
2776 hw->stride_tx = stride;
2778 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2779 hw->max_rx_queues, hw->max_tx_queues);
2781 /* Initializing spinlock for reconfigs */
2782 rte_spinlock_init(&hw->reconfig_lock);
2784 /* Allocating memory for mac addr */
2785 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2786 if (eth_dev->data->mac_addrs == NULL) {
2787 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2792 nfp_net_pf_read_mac(hwport0, port);
2794 nfp_net_vf_read_mac(hw);
2796 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
2797 /* Using random mac addresses for VFs */
2798 eth_random_addr(&hw->mac_addr[0]);
2799 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2802 /* Copying mac address to DPDK eth_dev struct */
2803 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2804 ð_dev->data->mac_addrs[0]);
2806 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2807 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2808 eth_dev->data->port_id, pci_dev->id.vendor_id,
2809 pci_dev->id.device_id,
2810 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2811 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2813 /* Registering LSC interrupt handler */
2814 rte_intr_callback_register(&pci_dev->intr_handle,
2815 nfp_net_dev_interrupt_handler,
2818 /* Telling the firmware about the LSC interrupt entry */
2819 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2821 /* Recording current stats counters values */
2822 nfp_net_stats_reset(eth_dev);
2828 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
2829 nfpu_desc_t *nfpu_desc, void **priv)
2831 struct rte_eth_dev *eth_dev;
2832 struct nfp_net_hw *hw;
2836 port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
2841 sprintf(port_name, "%s_port%d", dev->device.name, port);
2843 sprintf(port_name, "%s", dev->device.name);
2845 eth_dev = rte_eth_dev_allocate(port_name);
2850 *priv = rte_zmalloc(port_name,
2851 sizeof(struct nfp_net_adapter) * ports,
2852 RTE_CACHE_LINE_SIZE);
2854 rte_eth_dev_release_port(eth_dev);
2859 eth_dev->data->dev_private = *priv;
2862 * dev_private pointing to port0 dev_private because we need
2863 * to configure vNIC bars based on port0 at nfp_net_init.
2864 * Then dev_private is adjusted per port.
2866 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
2867 hw->nspu_desc = nfpu_desc->nspu;
2868 hw->nfpu_desc = nfpu_desc;
2871 hw->pf_multiport_enabled = 1;
2873 eth_dev->device = &dev->device;
2874 rte_eth_copy_pci_info(eth_dev, dev);
2876 ret = nfp_net_init(eth_dev);
2879 rte_eth_dev_release_port(eth_dev);
2881 rte_free(port_name);
2886 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2887 struct rte_pci_device *dev)
2889 nfpu_desc_t *nfpu_desc;
2890 nspu_desc_t *nspu_desc;
2891 uint64_t offset_symbol;
2892 uint8_t *bar_offset;
2902 nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0);
2906 if (nfpu_open(dev, nfpu_desc, 0) < 0) {
2908 "nfpu_open failed\n");
2912 nspu_desc = nfpu_desc->nspu;
2915 /* Check NSP ABI version */
2916 if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) {
2917 RTE_LOG(INFO, PMD, "NFP NSP not present\n");
2920 PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor);
2922 if ((major == 0) && (minor < 20)) {
2923 RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n");
2927 ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports",
2932 bar_offset = (uint8_t *)dev->mem_resource[0].addr;
2933 bar_offset += offset_symbol;
2934 total_ports = (uint32_t)*bar_offset;
2935 PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
2937 if (total_ports <= 0 || total_ports > 8) {
2938 RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
2943 for (i = 0; i < total_ports; i++) {
2944 ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv);
2952 nfpu_close(nfpu_desc);
2954 rte_free(nfpu_desc);
2959 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2961 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2962 PCI_DEVICE_ID_NFP4000_PF_NIC)
2965 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2966 PCI_DEVICE_ID_NFP6000_PF_NIC)
2973 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
2975 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2976 PCI_DEVICE_ID_NFP6000_VF_NIC)
2983 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2984 struct rte_pci_device *pci_dev)
2986 return rte_eth_dev_pci_generic_probe(pci_dev,
2987 sizeof(struct nfp_net_adapter), nfp_net_init);
2990 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2992 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2995 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2996 .id_table = pci_id_nfp_pf_net_map,
2997 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2998 .probe = nfp_pf_pci_probe,
2999 .remove = eth_nfp_pci_remove,
3002 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3003 .id_table = pci_id_nfp_vf_net_map,
3004 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3005 .probe = eth_nfp_pci_probe,
3006 .remove = eth_nfp_pci_remove,
3009 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3010 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3011 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3012 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3013 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3014 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3018 * c-file-style: "Linux"
3019 * indent-tabs-mode: t