net: add macro for MAC address print
[dpdk.git] / drivers / net / nfp / nfp_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14  */
15
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
19 #include <rte_dev.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include "eal_firmware.h"
26
27 #include "nfpcore/nfp_cpp.h"
28 #include "nfpcore/nfp_nffw.h"
29 #include "nfpcore/nfp_hwinfo.h"
30 #include "nfpcore/nfp_mip.h"
31 #include "nfpcore/nfp_rtsym.h"
32 #include "nfpcore/nfp_nsp.h"
33
34 #include "nfp_common.h"
35 #include "nfp_rxtx.h"
36 #include "nfp_logs.h"
37 #include "nfp_ctrl.h"
38 #include "nfp_cpp_bridge.h"
39
40
41 static int nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port);
42 static int nfp_net_start(struct rte_eth_dev *dev);
43 static int nfp_net_stop(struct rte_eth_dev *dev);
44 static int nfp_net_set_link_up(struct rte_eth_dev *dev);
45 static int nfp_net_set_link_down(struct rte_eth_dev *dev);
46 static int nfp_net_close(struct rte_eth_dev *dev);
47 static int nfp_net_init(struct rte_eth_dev *eth_dev);
48 static int nfp_fw_upload(struct rte_pci_device *dev,
49                          struct nfp_nsp *nsp, char *card);
50 static int nfp_fw_setup(struct rte_pci_device *dev,
51                         struct nfp_cpp *cpp,
52                         struct nfp_eth_table *nfp_eth_table,
53                         struct nfp_hwinfo *hwinfo);
54 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
55 static int nfp_pf_init(struct rte_pci_device *pci_dev);
56 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev);
57 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
58                             struct rte_pci_device *dev);
59 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
60 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev);
61
62 static int
63 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
64 {
65         struct nfp_eth_table *nfp_eth_table;
66         struct nfp_net_hw *hw = NULL;
67
68         /* Grab a pointer to the correct physical port */
69         hw = pf_dev->ports[port];
70
71         nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
72
73         nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
74                          (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
75
76         free(nfp_eth_table);
77         return 0;
78 }
79
80 static int
81 nfp_net_start(struct rte_eth_dev *dev)
82 {
83         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
84         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
85         uint32_t new_ctrl, update = 0;
86         struct nfp_net_hw *hw;
87         struct nfp_pf_dev *pf_dev;
88         struct rte_eth_conf *dev_conf;
89         struct rte_eth_rxmode *rxmode;
90         uint32_t intr_vector;
91         int ret;
92
93         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
94         pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95
96         PMD_INIT_LOG(DEBUG, "Start");
97
98         /* Disabling queues just in case... */
99         nfp_net_disable_queues(dev);
100
101         /* Enabling the required queues in the device */
102         nfp_net_enable_queues(dev);
103
104         /* check and configure queue intr-vector mapping */
105         if (dev->data->dev_conf.intr_conf.rxq != 0) {
106                 if (pf_dev->multiport) {
107                         PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
108                                           "with NFP multiport PF");
109                                 return -EINVAL;
110                 }
111                 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
112                         /*
113                          * Better not to share LSC with RX interrupts.
114                          * Unregistering LSC interrupt handler
115                          */
116                         rte_intr_callback_unregister(&pci_dev->intr_handle,
117                                 nfp_net_dev_interrupt_handler, (void *)dev);
118
119                         if (dev->data->nb_rx_queues > 1) {
120                                 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
121                                              "supports 1 queue with UIO");
122                                 return -EIO;
123                         }
124                 }
125                 intr_vector = dev->data->nb_rx_queues;
126                 if (rte_intr_efd_enable(intr_handle, intr_vector))
127                         return -1;
128
129                 nfp_configure_rx_interrupt(dev, intr_handle);
130                 update = NFP_NET_CFG_UPDATE_MSIX;
131         }
132
133         rte_intr_enable(intr_handle);
134
135         new_ctrl = nfp_check_offloads(dev);
136
137         /* Writing configuration parameters in the device */
138         nfp_net_params_setup(hw);
139
140         dev_conf = &dev->data->dev_conf;
141         rxmode = &dev_conf->rxmode;
142
143         if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
144                 nfp_net_rss_config_default(dev);
145                 update |= NFP_NET_CFG_UPDATE_RSS;
146                 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
147         }
148
149         /* Enable device */
150         new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
151
152         update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
153
154         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
155                 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
156
157         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
158         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
159                 return -EIO;
160
161         /*
162          * Allocating rte mbufs for configured rx queues.
163          * This requires queues being enabled before
164          */
165         if (nfp_net_rx_freelist_setup(dev) < 0) {
166                 ret = -ENOMEM;
167                 goto error;
168         }
169
170         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171                 /* Configure the physical port up */
172                 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
173         else
174                 nfp_eth_set_configured(dev->process_private,
175                                        hw->nfp_idx, 1);
176
177         hw->ctrl = new_ctrl;
178
179         return 0;
180
181 error:
182         /*
183          * An error returned by this function should mean the app
184          * exiting and then the system releasing all the memory
185          * allocated even memory coming from hugepages.
186          *
187          * The device could be enabled at this point with some queues
188          * ready for getting packets. This is true if the call to
189          * nfp_net_rx_freelist_setup() succeeds for some queues but
190          * fails for subsequent queues.
191          *
192          * This should make the app exiting but better if we tell the
193          * device first.
194          */
195         nfp_net_disable_queues(dev);
196
197         return ret;
198 }
199
200 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
201 static int
202 nfp_net_stop(struct rte_eth_dev *dev)
203 {
204         int i;
205         struct nfp_net_hw *hw;
206         struct nfp_net_txq *this_tx_q;
207         struct nfp_net_rxq *this_rx_q;
208
209         PMD_INIT_LOG(DEBUG, "Stop");
210
211         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
212
213         nfp_net_disable_queues(dev);
214
215         /* Clear queues */
216         for (i = 0; i < dev->data->nb_tx_queues; i++) {
217                 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
218                 nfp_net_reset_tx_queue(this_tx_q);
219         }
220
221         for (i = 0; i < dev->data->nb_rx_queues; i++) {
222                 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
223                 nfp_net_reset_rx_queue(this_rx_q);
224         }
225
226         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
227                 /* Configure the physical port down */
228                 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
229         else
230                 nfp_eth_set_configured(dev->process_private,
231                                        hw->nfp_idx, 0);
232
233         return 0;
234 }
235
236 /* Set the link up. */
237 static int
238 nfp_net_set_link_up(struct rte_eth_dev *dev)
239 {
240         struct nfp_net_hw *hw;
241
242         PMD_DRV_LOG(DEBUG, "Set link up");
243
244         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
245
246         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
247                 /* Configure the physical port down */
248                 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
249         else
250                 return nfp_eth_set_configured(dev->process_private,
251                                               hw->nfp_idx, 1);
252 }
253
254 /* Set the link down. */
255 static int
256 nfp_net_set_link_down(struct rte_eth_dev *dev)
257 {
258         struct nfp_net_hw *hw;
259
260         PMD_DRV_LOG(DEBUG, "Set link down");
261
262         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
263
264         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
265                 /* Configure the physical port down */
266                 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
267         else
268                 return nfp_eth_set_configured(dev->process_private,
269                                               hw->nfp_idx, 0);
270 }
271
272 /* Reset and stop device. The device can not be restarted. */
273 static int
274 nfp_net_close(struct rte_eth_dev *dev)
275 {
276         struct nfp_net_hw *hw;
277         struct rte_pci_device *pci_dev;
278         struct nfp_pf_dev *pf_dev;
279         struct nfp_net_txq *this_tx_q;
280         struct nfp_net_rxq *this_rx_q;
281         int i;
282
283         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
284                 return 0;
285
286         PMD_INIT_LOG(DEBUG, "Close");
287
288         pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
289         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
290         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
291
292         /*
293          * We assume that the DPDK application is stopping all the
294          * threads/queues before calling the device close function.
295          */
296
297         nfp_net_disable_queues(dev);
298
299         /* Clear queues */
300         for (i = 0; i < dev->data->nb_tx_queues; i++) {
301                 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
302                 nfp_net_reset_tx_queue(this_tx_q);
303         }
304
305         for (i = 0; i < dev->data->nb_rx_queues; i++) {
306                 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
307                 nfp_net_reset_rx_queue(this_rx_q);
308         }
309
310         /* Only free PF resources after all physical ports have been closed */
311         /* Mark this port as unused and free device priv resources*/
312         nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
313         pf_dev->ports[hw->idx] = NULL;
314         rte_eth_dev_release_port(dev);
315
316         for (i = 0; i < pf_dev->total_phyports; i++) {
317                 /* Check to see if ports are still in use */
318                 if (pf_dev->ports[i])
319                         return 0;
320         }
321
322         /* Now it is safe to free all PF resources */
323         PMD_INIT_LOG(INFO, "Freeing PF resources");
324         nfp_cpp_area_free(pf_dev->ctrl_area);
325         nfp_cpp_area_free(pf_dev->hwqueues_area);
326         free(pf_dev->hwinfo);
327         free(pf_dev->sym_tbl);
328         nfp_cpp_free(pf_dev->cpp);
329         rte_free(pf_dev);
330
331         rte_intr_disable(&pci_dev->intr_handle);
332
333         /* unregister callback func from eal lib */
334         rte_intr_callback_unregister(&pci_dev->intr_handle,
335                                      nfp_net_dev_interrupt_handler,
336                                      (void *)dev);
337
338         /*
339          * The ixgbe PMD driver disables the pcie master on the
340          * device. The i40e does not...
341          */
342
343         return 0;
344 }
345
346 /* Initialise and register driver with DPDK Application */
347 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
348         .dev_configure          = nfp_net_configure,
349         .dev_start              = nfp_net_start,
350         .dev_stop               = nfp_net_stop,
351         .dev_set_link_up        = nfp_net_set_link_up,
352         .dev_set_link_down      = nfp_net_set_link_down,
353         .dev_close              = nfp_net_close,
354         .promiscuous_enable     = nfp_net_promisc_enable,
355         .promiscuous_disable    = nfp_net_promisc_disable,
356         .link_update            = nfp_net_link_update,
357         .stats_get              = nfp_net_stats_get,
358         .stats_reset            = nfp_net_stats_reset,
359         .dev_infos_get          = nfp_net_infos_get,
360         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
361         .mtu_set                = nfp_net_dev_mtu_set,
362         .mac_addr_set           = nfp_set_mac_addr,
363         .vlan_offload_set       = nfp_net_vlan_offload_set,
364         .reta_update            = nfp_net_reta_update,
365         .reta_query             = nfp_net_reta_query,
366         .rss_hash_update        = nfp_net_rss_hash_update,
367         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
368         .rx_queue_setup         = nfp_net_rx_queue_setup,
369         .rx_queue_release       = nfp_net_rx_queue_release,
370         .tx_queue_setup         = nfp_net_tx_queue_setup,
371         .tx_queue_release       = nfp_net_tx_queue_release,
372         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
373         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
374 };
375
376 static int
377 nfp_net_init(struct rte_eth_dev *eth_dev)
378 {
379         struct rte_pci_device *pci_dev;
380         struct nfp_pf_dev *pf_dev;
381         struct nfp_net_hw *hw;
382         struct rte_ether_addr *tmp_ether_addr;
383
384         uint64_t tx_bar_off = 0, rx_bar_off = 0;
385         uint32_t start_q;
386         int stride = 4;
387         int port = 0;
388         int err;
389
390         PMD_INIT_FUNC_TRACE();
391
392         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
393
394         /* Use backpointer here to the PF of this eth_dev */
395         pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
396
397         /* NFP can not handle DMA addresses requiring more than 40 bits */
398         if (rte_mem_check_dma_mask(40)) {
399                 RTE_LOG(ERR, PMD, "device %s can not be used:",
400                                    pci_dev->device.name);
401                 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
402                 return -ENODEV;
403         };
404
405         port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
406         if (port < 0 || port > 7) {
407                 PMD_DRV_LOG(ERR, "Port value is wrong");
408                 return -ENODEV;
409         }
410
411         /* Use PF array of physical ports to get pointer to
412          * this specific port
413          */
414         hw = pf_dev->ports[port];
415
416         PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
417                             "NFP internal port number: %d",
418                             port, hw->nfp_idx);
419
420         eth_dev->dev_ops = &nfp_net_eth_dev_ops;
421         eth_dev->rx_queue_count = nfp_net_rx_queue_count;
422         eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
423         eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
424
425         /* For secondary processes, the primary has done all the work */
426         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
427                 return 0;
428
429         rte_eth_copy_pci_info(eth_dev, pci_dev);
430
431         hw->device_id = pci_dev->id.device_id;
432         hw->vendor_id = pci_dev->id.vendor_id;
433         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
434         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
435
436         PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
437                      pci_dev->id.vendor_id, pci_dev->id.device_id,
438                      pci_dev->addr.domain, pci_dev->addr.bus,
439                      pci_dev->addr.devid, pci_dev->addr.function);
440
441         hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
442         if (hw->ctrl_bar == NULL) {
443                 PMD_DRV_LOG(ERR,
444                         "hw->ctrl_bar is NULL. BAR0 not configured");
445                 return -ENODEV;
446         }
447
448         if (port == 0) {
449                 hw->ctrl_bar = pf_dev->ctrl_bar;
450         } else {
451                 if (!pf_dev->ctrl_bar)
452                         return -ENODEV;
453                 /* Use port offset in pf ctrl_bar for this
454                  * ports control bar
455                  */
456                 hw->ctrl_bar = pf_dev->ctrl_bar +
457                                (port * NFP_PF_CSR_SLICE_SIZE);
458         }
459
460         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
461
462         hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
463         hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
464
465         /* Work out where in the BAR the queues start. */
466         switch (pci_dev->id.device_id) {
467         case PCI_DEVICE_ID_NFP4000_PF_NIC:
468         case PCI_DEVICE_ID_NFP6000_PF_NIC:
469                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
470                 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
471                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
472                 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
473                 break;
474         default:
475                 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
476                 err = -ENODEV;
477                 goto dev_err_ctrl_map;
478         }
479
480         PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
481         PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
482
483         hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
484         hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
485         eth_dev->data->dev_private = hw;
486
487         PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
488                      hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
489
490         nfp_net_cfg_queue_setup(hw);
491
492         /* Get some of the read-only fields from the config BAR */
493         hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
494         hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
495         hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
496         hw->mtu = RTE_ETHER_MTU;
497
498         /* VLAN insertion is incompatible with LSOv2 */
499         if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
500                 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
501
502         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
503                 hw->rx_offset = NFP_NET_RX_OFFSET;
504         else
505                 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
506
507         PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
508                            NFD_CFG_MAJOR_VERSION_of(hw->ver),
509                            NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
510
511         PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
512                      hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
513                      hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
514                      hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
515                      hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
516                      hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
517                      hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
518                      hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
519                      hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
520                      hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
521                      hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
522                      hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
523                      hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
524                      hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
525                      hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
526
527         hw->ctrl = 0;
528
529         hw->stride_rx = stride;
530         hw->stride_tx = stride;
531
532         PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
533                      hw->max_rx_queues, hw->max_tx_queues);
534
535         /* Initializing spinlock for reconfigs */
536         rte_spinlock_init(&hw->reconfig_lock);
537
538         /* Allocating memory for mac addr */
539         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
540                                                RTE_ETHER_ADDR_LEN, 0);
541         if (eth_dev->data->mac_addrs == NULL) {
542                 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
543                 err = -ENOMEM;
544                 goto dev_err_queues_map;
545         }
546
547         nfp_net_pf_read_mac(pf_dev, port);
548         nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
549
550         tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
551         if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
552                 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
553                                    port);
554                 /* Using random mac addresses for VFs */
555                 rte_eth_random_addr(&hw->mac_addr[0]);
556                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
557         }
558
559         /* Copying mac address to DPDK eth_dev struct */
560         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
561                         &eth_dev->data->mac_addrs[0]);
562
563         if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
564                 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
565
566         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
567
568         PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
569                      "mac=" RTE_ETHER_ADDR_PRT_FMT,
570                      eth_dev->data->port_id, pci_dev->id.vendor_id,
571                      pci_dev->id.device_id,
572                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
573                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
574
575         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576                 /* Registering LSC interrupt handler */
577                 rte_intr_callback_register(&pci_dev->intr_handle,
578                                            nfp_net_dev_interrupt_handler,
579                                            (void *)eth_dev);
580                 /* Telling the firmware about the LSC interrupt entry */
581                 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
582                 /* Recording current stats counters values */
583                 nfp_net_stats_reset(eth_dev);
584         }
585
586         return 0;
587
588 dev_err_queues_map:
589                 nfp_cpp_area_free(hw->hwqueues_area);
590 dev_err_ctrl_map:
591                 nfp_cpp_area_free(hw->ctrl_area);
592
593         return err;
594 }
595
596 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
597
598 static int
599 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
600 {
601         struct nfp_cpp *cpp = nsp->cpp;
602         void *fw_buf;
603         char fw_name[125];
604         char serial[40];
605         size_t fsize;
606
607         /* Looking for firmware file in order of priority */
608
609         /* First try to find a firmware image specific for this device */
610         snprintf(serial, sizeof(serial),
611                         "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
612                 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
613                 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
614                 cpp->interface & 0xff);
615
616         snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
617                         serial);
618
619         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
620         if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
621                 goto load_fw;
622         /* Then try the PCI name */
623         snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
624                         dev->device.name);
625
626         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
627         if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
628                 goto load_fw;
629
630         /* Finally try the card type and media */
631         snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
632         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
633         if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
634                 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
635                 return -ENOENT;
636         }
637
638 load_fw:
639         PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
640                 fw_name, fsize);
641         PMD_DRV_LOG(INFO, "Uploading the firmware ...");
642         nfp_nsp_load_fw(nsp, fw_buf, fsize);
643         PMD_DRV_LOG(INFO, "Done");
644
645         free(fw_buf);
646
647         return 0;
648 }
649
650 static int
651 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
652              struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
653 {
654         struct nfp_nsp *nsp;
655         const char *nfp_fw_model;
656         char card_desc[100];
657         int err = 0;
658
659         nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
660
661         if (nfp_fw_model) {
662                 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
663         } else {
664                 PMD_DRV_LOG(ERR, "firmware model NOT found");
665                 return -EIO;
666         }
667
668         if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
669                 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
670                        nfp_eth_table->count);
671                 return -EIO;
672         }
673
674         PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
675                            nfp_eth_table->count);
676
677         PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
678
679         snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
680                         nfp_fw_model, nfp_eth_table->count,
681                         nfp_eth_table->ports[0].speed / 1000);
682
683         nsp = nfp_nsp_open(cpp);
684         if (!nsp) {
685                 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
686                 return -EIO;
687         }
688
689         nfp_nsp_device_soft_reset(nsp);
690         err = nfp_fw_upload(dev, nsp, card_desc);
691
692         nfp_nsp_close(nsp);
693         return err;
694 }
695
696 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
697 {
698         struct nfp_net_hw *hw;
699         struct rte_eth_dev *eth_dev;
700         struct nfp_eth_table *nfp_eth_table = NULL;
701         int ret = 0;
702         int i;
703
704         nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
705         if (!nfp_eth_table) {
706                 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
707                 ret = -EIO;
708                 goto error;
709         }
710
711         /* Loop through all physical ports on PF */
712         for (i = 0; i < pf_dev->total_phyports; i++) {
713                 const unsigned int numa_node = rte_socket_id();
714                 char port_name[RTE_ETH_NAME_MAX_LEN];
715
716                 snprintf(port_name, sizeof(port_name), "%s_port%d",
717                          pf_dev->pci_dev->device.name, i);
718
719                 /* Allocate a eth_dev for this phyport */
720                 eth_dev = rte_eth_dev_allocate(port_name);
721                 if (!eth_dev) {
722                         ret = -ENODEV;
723                         goto port_cleanup;
724                 }
725
726                 /* Allocate memory for this phyport */
727                 eth_dev->data->dev_private =
728                         rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
729                                            RTE_CACHE_LINE_SIZE, numa_node);
730                 if (!eth_dev->data->dev_private) {
731                         ret = -ENOMEM;
732                         rte_eth_dev_release_port(eth_dev);
733                         goto port_cleanup;
734                 }
735
736                 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
737
738                 /* Add this device to the PF's array of physical ports */
739                 pf_dev->ports[i] = hw;
740
741                 hw->pf_dev = pf_dev;
742                 hw->cpp = pf_dev->cpp;
743                 hw->eth_dev = eth_dev;
744                 hw->idx = i;
745                 hw->nfp_idx = nfp_eth_table->ports[i].index;
746                 hw->is_phyport = true;
747
748                 eth_dev->device = &pf_dev->pci_dev->device;
749
750                 /* ctrl/tx/rx BAR mappings and remaining init happens in
751                  * nfp_net_init
752                  */
753                 ret = nfp_net_init(eth_dev);
754
755                 if (ret) {
756                         ret = -ENODEV;
757                         goto port_cleanup;
758                 }
759
760                 rte_eth_dev_probing_finish(eth_dev);
761
762         } /* End loop, all ports on this PF */
763         ret = 0;
764         goto eth_table_cleanup;
765
766 port_cleanup:
767         for (i = 0; i < pf_dev->total_phyports; i++) {
768                 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
769                         struct rte_eth_dev *tmp_dev;
770                         tmp_dev = pf_dev->ports[i]->eth_dev;
771                         rte_eth_dev_release_port(tmp_dev);
772                         pf_dev->ports[i] = NULL;
773                 }
774         }
775 eth_table_cleanup:
776         free(nfp_eth_table);
777 error:
778         return ret;
779 }
780
781 static int nfp_pf_init(struct rte_pci_device *pci_dev)
782 {
783         struct nfp_pf_dev *pf_dev = NULL;
784         struct nfp_cpp *cpp;
785         struct nfp_hwinfo *hwinfo;
786         struct nfp_rtsym_table *sym_tbl;
787         struct nfp_eth_table *nfp_eth_table = NULL;
788         char name[RTE_ETH_NAME_MAX_LEN];
789         int total_ports;
790         int ret = -ENODEV;
791         int err;
792
793         if (!pci_dev)
794                 return ret;
795
796         /*
797          * When device bound to UIO, the device could be used, by mistake,
798          * by two DPDK apps, and the UIO driver does not avoid it. This
799          * could lead to a serious problem when configuring the NFP CPP
800          * interface. Here we avoid this telling to the CPP init code to
801          * use a lock file if UIO is being used.
802          */
803         if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
804                 cpp = nfp_cpp_from_device_name(pci_dev, 0);
805         else
806                 cpp = nfp_cpp_from_device_name(pci_dev, 1);
807
808         if (!cpp) {
809                 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
810                 ret = -EIO;
811                 goto error;
812         }
813
814         hwinfo = nfp_hwinfo_read(cpp);
815         if (!hwinfo) {
816                 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
817                 ret = -EIO;
818                 goto error;
819         }
820
821         nfp_eth_table = nfp_eth_read_ports(cpp);
822         if (!nfp_eth_table) {
823                 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
824                 ret = -EIO;
825                 goto hwinfo_cleanup;
826         }
827
828         if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
829                 PMD_INIT_LOG(ERR, "Error when uploading firmware");
830                 ret = -EIO;
831                 goto eth_table_cleanup;
832         }
833
834         /* Now the symbol table should be there */
835         sym_tbl = nfp_rtsym_table_read(cpp);
836         if (!sym_tbl) {
837                 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
838                                 " symbol table");
839                 ret = -EIO;
840                 goto eth_table_cleanup;
841         }
842
843         total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
844         if (total_ports != (int)nfp_eth_table->count) {
845                 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
846                 ret = -EIO;
847                 goto sym_tbl_cleanup;
848         }
849
850         PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
851
852         if (total_ports <= 0 || total_ports > 8) {
853                 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
854                 ret = -ENODEV;
855                 goto sym_tbl_cleanup;
856         }
857         /* Allocate memory for the PF "device" */
858         snprintf(name, sizeof(name), "nfp_pf%d", 0);
859         pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
860         if (!pf_dev) {
861                 ret = -ENOMEM;
862                 goto sym_tbl_cleanup;
863         }
864
865         /* Populate the newly created PF device */
866         pf_dev->cpp = cpp;
867         pf_dev->hwinfo = hwinfo;
868         pf_dev->sym_tbl = sym_tbl;
869         pf_dev->total_phyports = total_ports;
870
871         if (total_ports > 1)
872                 pf_dev->multiport = true;
873
874         pf_dev->pci_dev = pci_dev;
875
876         /* Map the symbol table */
877         pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
878                                      pf_dev->total_phyports * 32768,
879                                      &pf_dev->ctrl_area);
880         if (!pf_dev->ctrl_bar) {
881                 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
882                 ret = -EIO;
883                 goto pf_cleanup;
884         }
885
886         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
887
888         /* configure access to tx/rx vNIC BARs */
889         pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
890                                               NFP_PCIE_QUEUE(0),
891                                               NFP_QCP_QUEUE_AREA_SZ,
892                                               &pf_dev->hwqueues_area);
893         if (!pf_dev->hw_queues) {
894                 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
895                 ret = -EIO;
896                 goto ctrl_area_cleanup;
897         }
898
899         PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
900
901         /* Initialize and prep physical ports now
902          * This will loop through all physical ports
903          */
904         ret = nfp_init_phyports(pf_dev);
905         if (ret) {
906                 PMD_INIT_LOG(ERR, "Could not create physical ports");
907                 goto hwqueues_cleanup;
908         }
909
910         /* register the CPP bridge service here for primary use */
911         nfp_register_cpp_service(pf_dev->cpp);
912
913         return 0;
914
915 hwqueues_cleanup:
916         nfp_cpp_area_free(pf_dev->hwqueues_area);
917 ctrl_area_cleanup:
918         nfp_cpp_area_free(pf_dev->ctrl_area);
919 pf_cleanup:
920         rte_free(pf_dev);
921 sym_tbl_cleanup:
922         free(sym_tbl);
923 eth_table_cleanup:
924         free(nfp_eth_table);
925 hwinfo_cleanup:
926         free(hwinfo);
927 error:
928         return ret;
929 }
930
931 /*
932  * When attaching to the NFP4000/6000 PF on a secondary process there
933  * is no need to initialise the PF again. Only minimal work is required
934  * here
935  */
936 static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
937 {
938         struct nfp_cpp *cpp;
939         struct nfp_rtsym_table *sym_tbl;
940         int total_ports;
941         int i;
942         int err;
943
944         if (!pci_dev)
945                 return -ENODEV;
946
947         /*
948          * When device bound to UIO, the device could be used, by mistake,
949          * by two DPDK apps, and the UIO driver does not avoid it. This
950          * could lead to a serious problem when configuring the NFP CPP
951          * interface. Here we avoid this telling to the CPP init code to
952          * use a lock file if UIO is being used.
953          */
954         if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
955                 cpp = nfp_cpp_from_device_name(pci_dev, 0);
956         else
957                 cpp = nfp_cpp_from_device_name(pci_dev, 1);
958
959         if (!cpp) {
960                 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
961                 return -EIO;
962         }
963
964         /*
965          * We don't have access to the PF created in the primary process
966          * here so we have to read the number of ports from firmware
967          */
968         sym_tbl = nfp_rtsym_table_read(cpp);
969         if (!sym_tbl) {
970                 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
971                                 " symbol table");
972                 return -EIO;
973         }
974
975         total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
976
977         for (i = 0; i < total_ports; i++) {
978                 struct rte_eth_dev *eth_dev;
979                 char port_name[RTE_ETH_NAME_MAX_LEN];
980
981                 snprintf(port_name, sizeof(port_name), "%s_port%d",
982                          pci_dev->device.name, i);
983
984                 PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s",
985                     port_name);
986                 eth_dev = rte_eth_dev_attach_secondary(port_name);
987                 if (!eth_dev) {
988                         RTE_LOG(ERR, EAL,
989                         "secondary process attach failed, "
990                         "ethdev doesn't exist");
991                         return -ENODEV;
992                 }
993                 eth_dev->process_private = cpp;
994                 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
995                 eth_dev->rx_queue_count = nfp_net_rx_queue_count;
996                 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
997                 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
998                 rte_eth_dev_probing_finish(eth_dev);
999         }
1000
1001         /* Register the CPP bridge service for the secondary too */
1002         nfp_register_cpp_service(cpp);
1003
1004         return 0;
1005 }
1006
1007 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1008                             struct rte_pci_device *dev)
1009 {
1010         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1011                 return nfp_pf_init(dev);
1012         else
1013                 return nfp_pf_secondary_init(dev);
1014 }
1015
1016 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1017         {
1018                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1019                                PCI_DEVICE_ID_NFP4000_PF_NIC)
1020         },
1021         {
1022                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1023                                PCI_DEVICE_ID_NFP6000_PF_NIC)
1024         },
1025         {
1026                 .vendor_id = 0,
1027         },
1028 };
1029
1030 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1031 {
1032         struct rte_pci_device *pci_dev;
1033         uint16_t port_id;
1034
1035         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1036
1037         /* Free up all physical ports under PF */
1038         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1039                 rte_eth_dev_close(port_id);
1040         /*
1041          * Ports can be closed and freed but hotplugging is not
1042          * currently supported
1043          */
1044         return -ENOTSUP;
1045 }
1046
1047 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1048 {
1049         return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1050 }
1051
1052 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1053         .id_table = pci_id_nfp_pf_net_map,
1054         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1055         .probe = nfp_pf_pci_probe,
1056         .remove = eth_nfp_pci_remove,
1057 };
1058
1059 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1060 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1061 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1062 /*
1063  * Local variables:
1064  * c-file-style: "Linux"
1065  * indent-tabs-mode: t
1066  * End:
1067  */