net/nfp: free port private data on close
[dpdk.git] / drivers / net / nfp / nfp_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_net.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14  */
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_dev.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
32
33 #include "nfpcore/nfp_cpp.h"
34 #include "nfpcore/nfp_nffw.h"
35 #include "nfpcore/nfp_hwinfo.h"
36 #include "nfpcore/nfp_mip.h"
37 #include "nfpcore/nfp_rtsym.h"
38 #include "nfpcore/nfp_nsp.h"
39
40 #include "nfp_net_pmd.h"
41 #include "nfp_net_logs.h"
42 #include "nfp_net_ctrl.h"
43
44 #include <sys/types.h>
45 #include <sys/socket.h>
46 #include <sys/un.h>
47 #include <unistd.h>
48 #include <stdio.h>
49 #include <sys/ioctl.h>
50 #include <errno.h>
51
52 /* Prototypes */
53 static int nfp_net_close(struct rte_eth_dev *dev);
54 static int nfp_net_configure(struct rte_eth_dev *dev);
55 static void nfp_net_dev_interrupt_handler(void *param);
56 static void nfp_net_dev_interrupt_delayed_handler(void *param);
57 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
58 static int nfp_net_infos_get(struct rte_eth_dev *dev,
59                              struct rte_eth_dev_info *dev_info);
60 static int nfp_net_init(struct rte_eth_dev *eth_dev);
61 static int nfp_pf_init(struct rte_eth_dev *eth_dev);
62 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
63 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
64 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
65 static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
66 static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
67 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
68 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
69                                        uint16_t queue_idx);
70 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
71                                   uint16_t nb_pkts);
72 static void nfp_net_rx_queue_release(void *rxq);
73 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
74                                   uint16_t nb_desc, unsigned int socket_id,
75                                   const struct rte_eth_rxconf *rx_conf,
76                                   struct rte_mempool *mp);
77 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
78 static void nfp_net_tx_queue_release(void *txq);
79 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
80                                   uint16_t nb_desc, unsigned int socket_id,
81                                   const struct rte_eth_txconf *tx_conf);
82 static int nfp_net_start(struct rte_eth_dev *dev);
83 static int nfp_net_stats_get(struct rte_eth_dev *dev,
84                               struct rte_eth_stats *stats);
85 static int nfp_net_stats_reset(struct rte_eth_dev *dev);
86 static int nfp_net_stop(struct rte_eth_dev *dev);
87 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
88                                   uint16_t nb_pkts);
89
90 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
91 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
92                                    struct rte_eth_rss_conf *rss_conf);
93 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
94                     struct rte_eth_rss_reta_entry64 *reta_conf,
95                     uint16_t reta_size);
96 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
97                         struct rte_eth_rss_conf *rss_conf);
98 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
99                              struct rte_ether_addr *mac_addr);
100 static int32_t nfp_cpp_bridge_service_func(void *args);
101 static int nfp_fw_setup(struct rte_pci_device *dev,
102                         struct nfp_cpp *cpp,
103                         struct nfp_eth_table *nfp_eth_table,
104                         struct nfp_hwinfo *hwinfo);
105
106
107 /* The offset of the queue controller queues in the PCIe Target */
108 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
109
110 /* Maximum value which can be added to a queue with one transaction */
111 #define NFP_QCP_MAX_ADD 0x7f
112
113 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
114         (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
115
116 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
117 enum nfp_qcp_ptr {
118         NFP_QCP_READ_PTR = 0,
119         NFP_QCP_WRITE_PTR
120 };
121
122 /*
123  * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
124  * @q: Base address for queue structure
125  * @ptr: Add to the Read or Write pointer
126  * @val: Value to add to the queue pointer
127  *
128  * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
129  */
130 static inline void
131 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
132 {
133         uint32_t off;
134
135         if (ptr == NFP_QCP_READ_PTR)
136                 off = NFP_QCP_QUEUE_ADD_RPTR;
137         else
138                 off = NFP_QCP_QUEUE_ADD_WPTR;
139
140         while (val > NFP_QCP_MAX_ADD) {
141                 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
142                 val -= NFP_QCP_MAX_ADD;
143         }
144
145         nn_writel(rte_cpu_to_le_32(val), q + off);
146 }
147
148 /*
149  * nfp_qcp_read - Read the current Read/Write pointer value for a queue
150  * @q:  Base address for queue structure
151  * @ptr: Read or Write pointer
152  */
153 static inline uint32_t
154 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
155 {
156         uint32_t off;
157         uint32_t val;
158
159         if (ptr == NFP_QCP_READ_PTR)
160                 off = NFP_QCP_QUEUE_STS_LO;
161         else
162                 off = NFP_QCP_QUEUE_STS_HI;
163
164         val = rte_cpu_to_le_32(nn_readl(q + off));
165
166         if (ptr == NFP_QCP_READ_PTR)
167                 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
168         else
169                 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
170 }
171
172 /*
173  * Functions to read/write from/to Config BAR
174  * Performs any endian conversion necessary.
175  */
176 static inline uint8_t
177 nn_cfg_readb(struct nfp_net_hw *hw, int off)
178 {
179         return nn_readb(hw->ctrl_bar + off);
180 }
181
182 static inline void
183 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
184 {
185         nn_writeb(val, hw->ctrl_bar + off);
186 }
187
188 static inline uint32_t
189 nn_cfg_readl(struct nfp_net_hw *hw, int off)
190 {
191         return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
192 }
193
194 static inline void
195 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
196 {
197         nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
198 }
199
200 static inline uint64_t
201 nn_cfg_readq(struct nfp_net_hw *hw, int off)
202 {
203         return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
204 }
205
206 static inline void
207 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
208 {
209         nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
210 }
211
212 static void
213 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
214 {
215         unsigned i;
216
217         if (rxq->rxbufs == NULL)
218                 return;
219
220         for (i = 0; i < rxq->rx_count; i++) {
221                 if (rxq->rxbufs[i].mbuf) {
222                         rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
223                         rxq->rxbufs[i].mbuf = NULL;
224                 }
225         }
226 }
227
228 static void
229 nfp_net_rx_queue_release(void *rx_queue)
230 {
231         struct nfp_net_rxq *rxq = rx_queue;
232
233         if (rxq) {
234                 nfp_net_rx_queue_release_mbufs(rxq);
235                 rte_free(rxq->rxbufs);
236                 rte_free(rxq);
237         }
238 }
239
240 static void
241 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
242 {
243         nfp_net_rx_queue_release_mbufs(rxq);
244         rxq->rd_p = 0;
245         rxq->nb_rx_hold = 0;
246 }
247
248 static void
249 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
250 {
251         unsigned i;
252
253         if (txq->txbufs == NULL)
254                 return;
255
256         for (i = 0; i < txq->tx_count; i++) {
257                 if (txq->txbufs[i].mbuf) {
258                         rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
259                         txq->txbufs[i].mbuf = NULL;
260                 }
261         }
262 }
263
264 static void
265 nfp_net_tx_queue_release(void *tx_queue)
266 {
267         struct nfp_net_txq *txq = tx_queue;
268
269         if (txq) {
270                 nfp_net_tx_queue_release_mbufs(txq);
271                 rte_free(txq->txbufs);
272                 rte_free(txq);
273         }
274 }
275
276 static void
277 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
278 {
279         nfp_net_tx_queue_release_mbufs(txq);
280         txq->wr_p = 0;
281         txq->rd_p = 0;
282 }
283
284 static int
285 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
286 {
287         int cnt;
288         uint32_t new;
289         struct timespec wait;
290
291         PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
292                     hw->qcp_cfg);
293
294         if (hw->qcp_cfg == NULL)
295                 rte_panic("Bad configuration queue pointer\n");
296
297         nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
298
299         wait.tv_sec = 0;
300         wait.tv_nsec = 1000000;
301
302         PMD_DRV_LOG(DEBUG, "Polling for update ack...");
303
304         /* Poll update field, waiting for NFP to ack the config */
305         for (cnt = 0; ; cnt++) {
306                 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
307                 if (new == 0)
308                         break;
309                 if (new & NFP_NET_CFG_UPDATE_ERR) {
310                         PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
311                         return -1;
312                 }
313                 if (cnt >= NFP_NET_POLL_TIMEOUT) {
314                         PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
315                                           " %dms", update, cnt);
316                         rte_panic("Exiting\n");
317                 }
318                 nanosleep(&wait, 0); /* waiting for a 1ms */
319         }
320         PMD_DRV_LOG(DEBUG, "Ack DONE");
321         return 0;
322 }
323
324 /*
325  * Reconfigure the NIC
326  * @nn:    device to reconfigure
327  * @ctrl:    The value for the ctrl field in the BAR config
328  * @update:  The value for the update field in the BAR config
329  *
330  * Write the update word to the BAR and ping the reconfig queue. Then poll
331  * until the firmware has acknowledged the update by zeroing the update word.
332  */
333 static int
334 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
335 {
336         uint32_t err;
337
338         PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
339                     ctrl, update);
340
341         rte_spinlock_lock(&hw->reconfig_lock);
342
343         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
344         nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
345
346         rte_wmb();
347
348         err = __nfp_net_reconfig(hw, update);
349
350         rte_spinlock_unlock(&hw->reconfig_lock);
351
352         if (!err)
353                 return 0;
354
355         /*
356          * Reconfig errors imply situations where they can be handled.
357          * Otherwise, rte_panic is called inside __nfp_net_reconfig
358          */
359         PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
360                      ctrl, update);
361         return -EIO;
362 }
363
364 /*
365  * Configure an Ethernet device. This function must be invoked first
366  * before any other function in the Ethernet API. This function can
367  * also be re-invoked when a device is in the stopped state.
368  */
369 static int
370 nfp_net_configure(struct rte_eth_dev *dev)
371 {
372         struct rte_eth_conf *dev_conf;
373         struct rte_eth_rxmode *rxmode;
374         struct rte_eth_txmode *txmode;
375         struct nfp_net_hw *hw;
376
377         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
378
379         /*
380          * A DPDK app sends info about how many queues to use and how
381          * those queues need to be configured. This is used by the
382          * DPDK core and it makes sure no more queues than those
383          * advertised by the driver are requested. This function is
384          * called after that internal process
385          */
386
387         PMD_INIT_LOG(DEBUG, "Configure");
388
389         dev_conf = &dev->data->dev_conf;
390         rxmode = &dev_conf->rxmode;
391         txmode = &dev_conf->txmode;
392
393         if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
394                 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
395
396         /* Checking TX mode */
397         if (txmode->mq_mode) {
398                 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
399                 return -EINVAL;
400         }
401
402         /* Checking RX mode */
403         if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
404             !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
405                 PMD_INIT_LOG(INFO, "RSS not supported");
406                 return -EINVAL;
407         }
408
409         return 0;
410 }
411
412 static void
413 nfp_net_enable_queues(struct rte_eth_dev *dev)
414 {
415         struct nfp_net_hw *hw;
416         uint64_t enabled_queues = 0;
417         int i;
418
419         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
420
421         /* Enabling the required TX queues in the device */
422         for (i = 0; i < dev->data->nb_tx_queues; i++)
423                 enabled_queues |= (1 << i);
424
425         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
426
427         enabled_queues = 0;
428
429         /* Enabling the required RX queues in the device */
430         for (i = 0; i < dev->data->nb_rx_queues; i++)
431                 enabled_queues |= (1 << i);
432
433         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
434 }
435
436 static void
437 nfp_net_disable_queues(struct rte_eth_dev *dev)
438 {
439         struct nfp_net_hw *hw;
440         uint32_t new_ctrl, update = 0;
441
442         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
443
444         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
445         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
446
447         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
448         update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
449                  NFP_NET_CFG_UPDATE_MSIX;
450
451         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
452                 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
453
454         /* If an error when reconfig we avoid to change hw state */
455         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
456                 return;
457
458         hw->ctrl = new_ctrl;
459 }
460
461 static int
462 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
463 {
464         int i;
465
466         for (i = 0; i < dev->data->nb_rx_queues; i++) {
467                 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
468                         return -1;
469         }
470         return 0;
471 }
472
473 static void
474 nfp_net_params_setup(struct nfp_net_hw *hw)
475 {
476         nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
477         nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
478 }
479
480 static void
481 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
482 {
483         hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
484 }
485
486 #define ETH_ADDR_LEN    6
487
488 static void
489 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
490 {
491         int i;
492
493         for (i = 0; i < ETH_ADDR_LEN; i++)
494                 dst[i] = src[i];
495 }
496
497 static int
498 nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
499 {
500         struct nfp_eth_table *nfp_eth_table;
501         struct nfp_net_hw *hw = NULL;
502
503         /* Grab a pointer to the correct physical port */
504         hw = pf_dev->ports[port];
505
506         nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
507
508         nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
509                          (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
510
511         free(nfp_eth_table);
512         return 0;
513 }
514
515 static void
516 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
517 {
518         uint32_t tmp;
519
520         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
521         memcpy(&hw->mac_addr[0], &tmp, 4);
522
523         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
524         memcpy(&hw->mac_addr[4], &tmp, 2);
525 }
526
527 static void
528 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
529 {
530         uint32_t mac0 = *(uint32_t *)mac;
531         uint16_t mac1;
532
533         nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
534
535         mac += 4;
536         mac1 = *(uint16_t *)mac;
537         nn_writew(rte_cpu_to_be_16(mac1),
538                   hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
539 }
540
541 int
542 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
543 {
544         struct nfp_net_hw *hw;
545         uint32_t update, ctrl;
546
547         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
548         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
549             !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
550                 PMD_INIT_LOG(INFO, "MAC address unable to change when"
551                                   " port enabled");
552                 return -EBUSY;
553         }
554
555         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
556             !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
557                 return -EBUSY;
558
559         /* Writing new MAC to the specific port BAR address */
560         nfp_net_write_mac(hw, (uint8_t *)mac_addr);
561
562         /* Signal the NIC about the change */
563         update = NFP_NET_CFG_UPDATE_MACADDR;
564         ctrl = hw->ctrl;
565         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
566             (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
567                 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
568         if (nfp_net_reconfig(hw, ctrl, update) < 0) {
569                 PMD_INIT_LOG(INFO, "MAC address update failed");
570                 return -EIO;
571         }
572         return 0;
573 }
574
575 static int
576 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
577                            struct rte_intr_handle *intr_handle)
578 {
579         struct nfp_net_hw *hw;
580         int i;
581
582         if (!intr_handle->intr_vec) {
583                 intr_handle->intr_vec =
584                         rte_zmalloc("intr_vec",
585                                     dev->data->nb_rx_queues * sizeof(int), 0);
586                 if (!intr_handle->intr_vec) {
587                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
588                                      " intr_vec", dev->data->nb_rx_queues);
589                         return -ENOMEM;
590                 }
591         }
592
593         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594
595         if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
596                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
597                 /* UIO just supports one queue and no LSC*/
598                 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
599                 intr_handle->intr_vec[0] = 0;
600         } else {
601                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
602                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
603                         /*
604                          * The first msix vector is reserved for non
605                          * efd interrupts
606                         */
607                         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
608                         intr_handle->intr_vec[i] = i + 1;
609                         PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
610                                             intr_handle->intr_vec[i]);
611                 }
612         }
613
614         /* Avoiding TX interrupts */
615         hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
616         return 0;
617 }
618
619 static uint32_t
620 nfp_check_offloads(struct rte_eth_dev *dev)
621 {
622         struct nfp_net_hw *hw;
623         struct rte_eth_conf *dev_conf;
624         struct rte_eth_rxmode *rxmode;
625         struct rte_eth_txmode *txmode;
626         uint32_t ctrl = 0;
627
628         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
629
630         dev_conf = &dev->data->dev_conf;
631         rxmode = &dev_conf->rxmode;
632         txmode = &dev_conf->txmode;
633
634         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
635                 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
636                         ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
637         }
638
639         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
640                 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
641                         ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
642         }
643
644         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
645                 hw->mtu = rxmode->max_rx_pkt_len;
646
647         if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
648                 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
649
650         /* L2 broadcast */
651         if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
652                 ctrl |= NFP_NET_CFG_CTRL_L2BC;
653
654         /* L2 multicast */
655         if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
656                 ctrl |= NFP_NET_CFG_CTRL_L2MC;
657
658         /* TX checksum offload */
659         if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
660             txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
661             txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
662                 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
663
664         /* LSO offload */
665         if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
666                 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
667                         ctrl |= NFP_NET_CFG_CTRL_LSO;
668                 else
669                         ctrl |= NFP_NET_CFG_CTRL_LSO2;
670         }
671
672         /* RX gather */
673         if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
674                 ctrl |= NFP_NET_CFG_CTRL_GATHER;
675
676         return ctrl;
677 }
678
679 static int
680 nfp_net_start(struct rte_eth_dev *dev)
681 {
682         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
683         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
684         uint32_t new_ctrl, update = 0;
685         struct nfp_net_hw *hw;
686         struct nfp_pf_dev *pf_dev;
687         struct rte_eth_conf *dev_conf;
688         struct rte_eth_rxmode *rxmode;
689         uint32_t intr_vector;
690         int ret;
691
692         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
693         pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
694
695         PMD_INIT_LOG(DEBUG, "Start");
696
697         /* Disabling queues just in case... */
698         nfp_net_disable_queues(dev);
699
700         /* Enabling the required queues in the device */
701         nfp_net_enable_queues(dev);
702
703         /* check and configure queue intr-vector mapping */
704         if (dev->data->dev_conf.intr_conf.rxq != 0) {
705                 if (pf_dev->multiport) {
706                         PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
707                                           "with NFP multiport PF");
708                                 return -EINVAL;
709                 }
710                 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
711                         /*
712                          * Better not to share LSC with RX interrupts.
713                          * Unregistering LSC interrupt handler
714                          */
715                         rte_intr_callback_unregister(&pci_dev->intr_handle,
716                                 nfp_net_dev_interrupt_handler, (void *)dev);
717
718                         if (dev->data->nb_rx_queues > 1) {
719                                 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
720                                              "supports 1 queue with UIO");
721                                 return -EIO;
722                         }
723                 }
724                 intr_vector = dev->data->nb_rx_queues;
725                 if (rte_intr_efd_enable(intr_handle, intr_vector))
726                         return -1;
727
728                 nfp_configure_rx_interrupt(dev, intr_handle);
729                 update = NFP_NET_CFG_UPDATE_MSIX;
730         }
731
732         rte_intr_enable(intr_handle);
733
734         new_ctrl = nfp_check_offloads(dev);
735
736         /* Writing configuration parameters in the device */
737         nfp_net_params_setup(hw);
738
739         dev_conf = &dev->data->dev_conf;
740         rxmode = &dev_conf->rxmode;
741
742         if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
743                 nfp_net_rss_config_default(dev);
744                 update |= NFP_NET_CFG_UPDATE_RSS;
745                 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
746         }
747
748         /* Enable device */
749         new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
750
751         update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
752
753         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
754                 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
755
756         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
757         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
758                 return -EIO;
759
760         /*
761          * Allocating rte mbufs for configured rx queues.
762          * This requires queues being enabled before
763          */
764         if (nfp_net_rx_freelist_setup(dev) < 0) {
765                 ret = -ENOMEM;
766                 goto error;
767         }
768
769         if (hw->is_phyport) {
770                 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
771                         /* Configure the physical port up */
772                         nfp_eth_set_configured(hw->cpp, hw->idx, 1);
773                 else
774                         nfp_eth_set_configured(dev->process_private,
775                                                hw->idx, 1);
776         }
777
778         hw->ctrl = new_ctrl;
779
780         return 0;
781
782 error:
783         /*
784          * An error returned by this function should mean the app
785          * exiting and then the system releasing all the memory
786          * allocated even memory coming from hugepages.
787          *
788          * The device could be enabled at this point with some queues
789          * ready for getting packets. This is true if the call to
790          * nfp_net_rx_freelist_setup() succeeds for some queues but
791          * fails for subsequent queues.
792          *
793          * This should make the app exiting but better if we tell the
794          * device first.
795          */
796         nfp_net_disable_queues(dev);
797
798         return ret;
799 }
800
801 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
802 static int
803 nfp_net_stop(struct rte_eth_dev *dev)
804 {
805         int i;
806         struct nfp_net_hw *hw;
807
808         PMD_INIT_LOG(DEBUG, "Stop");
809
810         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811
812         nfp_net_disable_queues(dev);
813
814         /* Clear queues */
815         for (i = 0; i < dev->data->nb_tx_queues; i++) {
816                 nfp_net_reset_tx_queue(
817                         (struct nfp_net_txq *)dev->data->tx_queues[i]);
818         }
819
820         for (i = 0; i < dev->data->nb_rx_queues; i++) {
821                 nfp_net_reset_rx_queue(
822                         (struct nfp_net_rxq *)dev->data->rx_queues[i]);
823         }
824
825         if (hw->is_phyport) {
826                 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
827                         /* Configure the physical port down */
828                         nfp_eth_set_configured(hw->cpp, hw->idx, 0);
829                 else
830                         nfp_eth_set_configured(dev->process_private,
831                                                hw->idx, 0);
832         }
833
834         return 0;
835 }
836
837 /* Set the link up. */
838 static int
839 nfp_net_set_link_up(struct rte_eth_dev *dev)
840 {
841         struct nfp_net_hw *hw;
842
843         PMD_DRV_LOG(DEBUG, "Set link up");
844
845         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
846
847         if (!hw->is_phyport)
848                 return -ENOTSUP;
849
850         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
851                 /* Configure the physical port down */
852                 return nfp_eth_set_configured(hw->cpp, hw->idx, 1);
853         else
854                 return nfp_eth_set_configured(dev->process_private,
855                                               hw->idx, 1);
856 }
857
858 /* Set the link down. */
859 static int
860 nfp_net_set_link_down(struct rte_eth_dev *dev)
861 {
862         struct nfp_net_hw *hw;
863
864         PMD_DRV_LOG(DEBUG, "Set link down");
865
866         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867
868         if (!hw->is_phyport)
869                 return -ENOTSUP;
870
871         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
872                 /* Configure the physical port down */
873                 return nfp_eth_set_configured(hw->cpp, hw->idx, 0);
874         else
875                 return nfp_eth_set_configured(dev->process_private,
876                                               hw->idx, 0);
877 }
878
879 /* Reset and stop device. The device can not be restarted. */
880 static int
881 nfp_net_close(struct rte_eth_dev *dev)
882 {
883         struct nfp_net_hw *hw;
884         struct rte_pci_device *pci_dev;
885         int i;
886
887         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
888                 return 0;
889
890         PMD_INIT_LOG(DEBUG, "Close");
891
892         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
894
895         /*
896          * We assume that the DPDK application is stopping all the
897          * threads/queues before calling the device close function.
898          */
899
900         nfp_net_disable_queues(dev);
901
902         /* Clear queues */
903         for (i = 0; i < dev->data->nb_tx_queues; i++) {
904                 nfp_net_reset_tx_queue(
905                         (struct nfp_net_txq *)dev->data->tx_queues[i]);
906         }
907
908         for (i = 0; i < dev->data->nb_rx_queues; i++) {
909                 nfp_net_reset_rx_queue(
910                         (struct nfp_net_rxq *)dev->data->rx_queues[i]);
911         }
912
913         /* Only free PF resources after all physical ports have been closed */
914         if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
915             pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
916                 struct nfp_pf_dev *pf_dev;
917                 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
918
919                 /* Mark this port as unused and free device priv resources*/
920                 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
921                 pf_dev->ports[hw->idx] = NULL;
922                 rte_eth_dev_release_port(dev);
923
924                 for (i = 0; i < pf_dev->total_phyports; i++) {
925                         /* Check to see if ports are still in use */
926                         if (pf_dev->ports[i])
927                                 return 0;
928                 }
929
930                 /* Now it is safe to free all PF resources */
931                 PMD_INIT_LOG(INFO, "Freeing PF resources");
932                 nfp_cpp_area_free(pf_dev->ctrl_area);
933                 nfp_cpp_area_free(pf_dev->hwqueues_area);
934                 free(pf_dev->hwinfo);
935                 free(pf_dev->sym_tbl);
936                 nfp_cpp_free(pf_dev->cpp);
937                 rte_free(pf_dev);
938         }
939
940         rte_intr_disable(&pci_dev->intr_handle);
941
942         /* unregister callback func from eal lib */
943         rte_intr_callback_unregister(&pci_dev->intr_handle,
944                                      nfp_net_dev_interrupt_handler,
945                                      (void *)dev);
946
947         /*
948          * The ixgbe PMD driver disables the pcie master on the
949          * device. The i40e does not...
950          */
951
952         return 0;
953 }
954
955 static int
956 nfp_net_promisc_enable(struct rte_eth_dev *dev)
957 {
958         uint32_t new_ctrl, update = 0;
959         struct nfp_net_hw *hw;
960         int ret;
961
962         PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
963
964         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
965
966         if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
967                 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
968                 return -ENOTSUP;
969         }
970
971         if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
972                 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
973                 return 0;
974         }
975
976         new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
977         update = NFP_NET_CFG_UPDATE_GEN;
978
979         /*
980          * DPDK sets promiscuous mode on just after this call assuming
981          * it can not fail ...
982          */
983         ret = nfp_net_reconfig(hw, new_ctrl, update);
984         if (ret < 0)
985                 return ret;
986
987         hw->ctrl = new_ctrl;
988
989         return 0;
990 }
991
992 static int
993 nfp_net_promisc_disable(struct rte_eth_dev *dev)
994 {
995         uint32_t new_ctrl, update = 0;
996         struct nfp_net_hw *hw;
997         int ret;
998
999         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1000
1001         if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
1002                 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
1003                 return 0;
1004         }
1005
1006         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
1007         update = NFP_NET_CFG_UPDATE_GEN;
1008
1009         /*
1010          * DPDK sets promiscuous mode off just before this call
1011          * assuming it can not fail ...
1012          */
1013         ret = nfp_net_reconfig(hw, new_ctrl, update);
1014         if (ret < 0)
1015                 return ret;
1016
1017         hw->ctrl = new_ctrl;
1018
1019         return 0;
1020 }
1021
1022 /*
1023  * return 0 means link status changed, -1 means not changed
1024  *
1025  * Wait to complete is needed as it can take up to 9 seconds to get the Link
1026  * status.
1027  */
1028 static int
1029 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1030 {
1031         struct nfp_net_hw *hw;
1032         struct rte_eth_link link;
1033         uint32_t nn_link_status;
1034         int ret;
1035
1036         static const uint32_t ls_to_ethtool[] = {
1037                 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1038                 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
1039                 [NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
1040                 [NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
1041                 [NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
1042                 [NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
1043                 [NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
1044                 [NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
1045         };
1046
1047         PMD_DRV_LOG(DEBUG, "Link update");
1048
1049         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1050
1051         nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1052
1053         memset(&link, 0, sizeof(struct rte_eth_link));
1054
1055         if (nn_link_status & NFP_NET_CFG_STS_LINK)
1056                 link.link_status = ETH_LINK_UP;
1057
1058         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1059
1060         nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1061                          NFP_NET_CFG_STS_LINK_RATE_MASK;
1062
1063         if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1064                 link.link_speed = ETH_SPEED_NUM_NONE;
1065         else
1066                 link.link_speed = ls_to_ethtool[nn_link_status];
1067
1068         ret = rte_eth_linkstatus_set(dev, &link);
1069         if (ret == 0) {
1070                 if (link.link_status)
1071                         PMD_DRV_LOG(INFO, "NIC Link is Up");
1072                 else
1073                         PMD_DRV_LOG(INFO, "NIC Link is Down");
1074         }
1075         return ret;
1076 }
1077
1078 static int
1079 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1080 {
1081         int i;
1082         struct nfp_net_hw *hw;
1083         struct rte_eth_stats nfp_dev_stats;
1084
1085         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086
1087         /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1088
1089         memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1090
1091         /* reading per RX ring stats */
1092         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1093                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1094                         break;
1095
1096                 nfp_dev_stats.q_ipackets[i] =
1097                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1098
1099                 nfp_dev_stats.q_ipackets[i] -=
1100                         hw->eth_stats_base.q_ipackets[i];
1101
1102                 nfp_dev_stats.q_ibytes[i] =
1103                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1104
1105                 nfp_dev_stats.q_ibytes[i] -=
1106                         hw->eth_stats_base.q_ibytes[i];
1107         }
1108
1109         /* reading per TX ring stats */
1110         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1111                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1112                         break;
1113
1114                 nfp_dev_stats.q_opackets[i] =
1115                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1116
1117                 nfp_dev_stats.q_opackets[i] -=
1118                         hw->eth_stats_base.q_opackets[i];
1119
1120                 nfp_dev_stats.q_obytes[i] =
1121                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1122
1123                 nfp_dev_stats.q_obytes[i] -=
1124                         hw->eth_stats_base.q_obytes[i];
1125         }
1126
1127         nfp_dev_stats.ipackets =
1128                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1129
1130         nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1131
1132         nfp_dev_stats.ibytes =
1133                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1134
1135         nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1136
1137         nfp_dev_stats.opackets =
1138                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1139
1140         nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1141
1142         nfp_dev_stats.obytes =
1143                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1144
1145         nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1146
1147         /* reading general device stats */
1148         nfp_dev_stats.ierrors =
1149                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1150
1151         nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1152
1153         nfp_dev_stats.oerrors =
1154                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1155
1156         nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1157
1158         /* RX ring mbuf allocation failures */
1159         nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1160
1161         nfp_dev_stats.imissed =
1162                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1163
1164         nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1165
1166         if (stats) {
1167                 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1168                 return 0;
1169         }
1170         return -EINVAL;
1171 }
1172
1173 static int
1174 nfp_net_stats_reset(struct rte_eth_dev *dev)
1175 {
1176         int i;
1177         struct nfp_net_hw *hw;
1178
1179         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1180
1181         /*
1182          * hw->eth_stats_base records the per counter starting point.
1183          * Lets update it now
1184          */
1185
1186         /* reading per RX ring stats */
1187         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1188                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1189                         break;
1190
1191                 hw->eth_stats_base.q_ipackets[i] =
1192                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1193
1194                 hw->eth_stats_base.q_ibytes[i] =
1195                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1196         }
1197
1198         /* reading per TX ring stats */
1199         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1200                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1201                         break;
1202
1203                 hw->eth_stats_base.q_opackets[i] =
1204                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1205
1206                 hw->eth_stats_base.q_obytes[i] =
1207                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1208         }
1209
1210         hw->eth_stats_base.ipackets =
1211                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1212
1213         hw->eth_stats_base.ibytes =
1214                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1215
1216         hw->eth_stats_base.opackets =
1217                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1218
1219         hw->eth_stats_base.obytes =
1220                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1221
1222         /* reading general device stats */
1223         hw->eth_stats_base.ierrors =
1224                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1225
1226         hw->eth_stats_base.oerrors =
1227                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1228
1229         /* RX ring mbuf allocation failures */
1230         dev->data->rx_mbuf_alloc_failed = 0;
1231
1232         hw->eth_stats_base.imissed =
1233                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1234
1235         return 0;
1236 }
1237
1238 static int
1239 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1240 {
1241         struct nfp_net_hw *hw;
1242
1243         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244
1245         dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1246         dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1247         dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1248         dev_info->max_rx_pktlen = hw->max_mtu;
1249         /* Next should change when PF support is implemented */
1250         dev_info->max_mac_addrs = 1;
1251
1252         if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1253                 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1254
1255         if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1256                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1257                                              DEV_RX_OFFLOAD_UDP_CKSUM |
1258                                              DEV_RX_OFFLOAD_TCP_CKSUM;
1259
1260         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
1261                                      DEV_RX_OFFLOAD_RSS_HASH;
1262
1263         if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1264                 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1265
1266         if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1267                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1268                                              DEV_TX_OFFLOAD_UDP_CKSUM |
1269                                              DEV_TX_OFFLOAD_TCP_CKSUM;
1270
1271         if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
1272                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1273
1274         if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1275                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1276
1277         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1278                 .rx_thresh = {
1279                         .pthresh = DEFAULT_RX_PTHRESH,
1280                         .hthresh = DEFAULT_RX_HTHRESH,
1281                         .wthresh = DEFAULT_RX_WTHRESH,
1282                 },
1283                 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1284                 .rx_drop_en = 0,
1285         };
1286
1287         dev_info->default_txconf = (struct rte_eth_txconf) {
1288                 .tx_thresh = {
1289                         .pthresh = DEFAULT_TX_PTHRESH,
1290                         .hthresh = DEFAULT_TX_HTHRESH,
1291                         .wthresh = DEFAULT_TX_WTHRESH,
1292                 },
1293                 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1294                 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1295         };
1296
1297         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1298                 .nb_max = NFP_NET_MAX_RX_DESC,
1299                 .nb_min = NFP_NET_MIN_RX_DESC,
1300                 .nb_align = NFP_ALIGN_RING_DESC,
1301         };
1302
1303         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1304                 .nb_max = NFP_NET_MAX_TX_DESC,
1305                 .nb_min = NFP_NET_MIN_TX_DESC,
1306                 .nb_align = NFP_ALIGN_RING_DESC,
1307                 .nb_seg_max = NFP_TX_MAX_SEG,
1308                 .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1309         };
1310
1311         dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1312                                            ETH_RSS_NONFRAG_IPV4_TCP |
1313                                            ETH_RSS_NONFRAG_IPV4_UDP |
1314                                            ETH_RSS_IPV6 |
1315                                            ETH_RSS_NONFRAG_IPV6_TCP |
1316                                            ETH_RSS_NONFRAG_IPV6_UDP;
1317
1318         dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1319         dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1320
1321         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1322                                ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1323                                ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1324
1325         return 0;
1326 }
1327
1328 static const uint32_t *
1329 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1330 {
1331         static const uint32_t ptypes[] = {
1332                 /* refers to nfp_net_set_hash() */
1333                 RTE_PTYPE_INNER_L3_IPV4,
1334                 RTE_PTYPE_INNER_L3_IPV6,
1335                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1336                 RTE_PTYPE_INNER_L4_MASK,
1337                 RTE_PTYPE_UNKNOWN
1338         };
1339
1340         if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1341                 return ptypes;
1342         return NULL;
1343 }
1344
1345 static uint32_t
1346 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1347 {
1348         struct nfp_net_rxq *rxq;
1349         struct nfp_net_rx_desc *rxds;
1350         uint32_t idx;
1351         uint32_t count;
1352
1353         rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1354
1355         idx = rxq->rd_p;
1356
1357         count = 0;
1358
1359         /*
1360          * Other PMDs are just checking the DD bit in intervals of 4
1361          * descriptors and counting all four if the first has the DD
1362          * bit on. Of course, this is not accurate but can be good for
1363          * performance. But ideally that should be done in descriptors
1364          * chunks belonging to the same cache line
1365          */
1366
1367         while (count < rxq->rx_count) {
1368                 rxds = &rxq->rxds[idx];
1369                 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1370                         break;
1371
1372                 count++;
1373                 idx++;
1374
1375                 /* Wrapping? */
1376                 if ((idx) == rxq->rx_count)
1377                         idx = 0;
1378         }
1379
1380         return count;
1381 }
1382
1383 static int
1384 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1385 {
1386         struct rte_pci_device *pci_dev;
1387         struct nfp_net_hw *hw;
1388         int base = 0;
1389
1390         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1391         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1392
1393         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1394                 base = 1;
1395
1396         /* Make sure all updates are written before un-masking */
1397         rte_wmb();
1398         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1399                       NFP_NET_CFG_ICR_UNMASKED);
1400         return 0;
1401 }
1402
1403 static int
1404 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1405 {
1406         struct rte_pci_device *pci_dev;
1407         struct nfp_net_hw *hw;
1408         int base = 0;
1409
1410         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1411         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1412
1413         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1414                 base = 1;
1415
1416         /* Make sure all updates are written before un-masking */
1417         rte_wmb();
1418         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1419         return 0;
1420 }
1421
1422 static void
1423 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1424 {
1425         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1426         struct rte_eth_link link;
1427
1428         rte_eth_linkstatus_get(dev, &link);
1429         if (link.link_status)
1430                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1431                             dev->data->port_id, link.link_speed,
1432                             link.link_duplex == ETH_LINK_FULL_DUPLEX
1433                             ? "full-duplex" : "half-duplex");
1434         else
1435                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1436                             dev->data->port_id);
1437
1438         PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1439                     pci_dev->addr.domain, pci_dev->addr.bus,
1440                     pci_dev->addr.devid, pci_dev->addr.function);
1441 }
1442
1443 /* Interrupt configuration and handling */
1444
1445 /*
1446  * nfp_net_irq_unmask - Unmask an interrupt
1447  *
1448  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1449  * clear the ICR for the entry.
1450  */
1451 static void
1452 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1453 {
1454         struct nfp_net_hw *hw;
1455         struct rte_pci_device *pci_dev;
1456
1457         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1458         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1459
1460         if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1461                 /* If MSI-X auto-masking is used, clear the entry */
1462                 rte_wmb();
1463                 rte_intr_ack(&pci_dev->intr_handle);
1464         } else {
1465                 /* Make sure all updates are written before un-masking */
1466                 rte_wmb();
1467                 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1468                               NFP_NET_CFG_ICR_UNMASKED);
1469         }
1470 }
1471
1472 static void
1473 nfp_net_dev_interrupt_handler(void *param)
1474 {
1475         int64_t timeout;
1476         struct rte_eth_link link;
1477         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1478
1479         PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1480
1481         rte_eth_linkstatus_get(dev, &link);
1482
1483         nfp_net_link_update(dev, 0);
1484
1485         /* likely to up */
1486         if (!link.link_status) {
1487                 /* handle it 1 sec later, wait it being stable */
1488                 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1489                 /* likely to down */
1490         } else {
1491                 /* handle it 4 sec later, wait it being stable */
1492                 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1493         }
1494
1495         if (rte_eal_alarm_set(timeout * 1000,
1496                               nfp_net_dev_interrupt_delayed_handler,
1497                               (void *)dev) < 0) {
1498                 PMD_INIT_LOG(ERR, "Error setting alarm");
1499                 /* Unmasking */
1500                 nfp_net_irq_unmask(dev);
1501         }
1502 }
1503
1504 /*
1505  * Interrupt handler which shall be registered for alarm callback for delayed
1506  * handling specific interrupt to wait for the stable nic state. As the NIC
1507  * interrupt state is not stable for nfp after link is just down, it needs
1508  * to wait 4 seconds to get the stable status.
1509  *
1510  * @param handle   Pointer to interrupt handle.
1511  * @param param    The address of parameter (struct rte_eth_dev *)
1512  *
1513  * @return  void
1514  */
1515 static void
1516 nfp_net_dev_interrupt_delayed_handler(void *param)
1517 {
1518         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1519
1520         nfp_net_link_update(dev, 0);
1521         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1522
1523         nfp_net_dev_link_status_print(dev);
1524
1525         /* Unmasking */
1526         nfp_net_irq_unmask(dev);
1527 }
1528
1529 static int
1530 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1531 {
1532         struct nfp_net_hw *hw;
1533
1534         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1535
1536         /* check that mtu is within the allowed range */
1537         if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
1538                 return -EINVAL;
1539
1540         /* mtu setting is forbidden if port is started */
1541         if (dev->data->dev_started) {
1542                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1543                             dev->data->port_id);
1544                 return -EBUSY;
1545         }
1546
1547         /* switch to jumbo mode if needed */
1548         if ((uint32_t)mtu > RTE_ETHER_MTU)
1549                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1550         else
1551                 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1552
1553         /* update max frame size */
1554         dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1555
1556         /* writing to configuration space */
1557         nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1558
1559         hw->mtu = mtu;
1560
1561         return 0;
1562 }
1563
1564 static int
1565 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1566                        uint16_t queue_idx, uint16_t nb_desc,
1567                        unsigned int socket_id,
1568                        const struct rte_eth_rxconf *rx_conf,
1569                        struct rte_mempool *mp)
1570 {
1571         const struct rte_memzone *tz;
1572         struct nfp_net_rxq *rxq;
1573         struct nfp_net_hw *hw;
1574         uint32_t rx_desc_sz;
1575
1576         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1577
1578         PMD_INIT_FUNC_TRACE();
1579
1580         /* Validating number of descriptors */
1581         rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
1582         if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
1583             nb_desc > NFP_NET_MAX_RX_DESC ||
1584             nb_desc < NFP_NET_MIN_RX_DESC) {
1585                 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1586                 return -EINVAL;
1587         }
1588
1589         /*
1590          * Free memory prior to re-allocation if needed. This is the case after
1591          * calling nfp_net_stop
1592          */
1593         if (dev->data->rx_queues[queue_idx]) {
1594                 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1595                 dev->data->rx_queues[queue_idx] = NULL;
1596         }
1597
1598         /* Allocating rx queue data structure */
1599         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1600                                  RTE_CACHE_LINE_SIZE, socket_id);
1601         if (rxq == NULL)
1602                 return -ENOMEM;
1603
1604         /* Hw queues mapping based on firmware configuration */
1605         rxq->qidx = queue_idx;
1606         rxq->fl_qcidx = queue_idx * hw->stride_rx;
1607         rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1608         rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1609         rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1610
1611         /*
1612          * Tracking mbuf size for detecting a potential mbuf overflow due to
1613          * RX offset
1614          */
1615         rxq->mem_pool = mp;
1616         rxq->mbuf_size = rxq->mem_pool->elt_size;
1617         rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1618         hw->flbufsz = rxq->mbuf_size;
1619
1620         rxq->rx_count = nb_desc;
1621         rxq->port_id = dev->data->port_id;
1622         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1623         rxq->drop_en = rx_conf->rx_drop_en;
1624
1625         /*
1626          * Allocate RX ring hardware descriptors. A memzone large enough to
1627          * handle the maximum ring size is allocated in order to allow for
1628          * resizing in later calls to the queue setup function.
1629          */
1630         tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1631                                    sizeof(struct nfp_net_rx_desc) *
1632                                    NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1633                                    socket_id);
1634
1635         if (tz == NULL) {
1636                 PMD_DRV_LOG(ERR, "Error allocating rx dma");
1637                 nfp_net_rx_queue_release(rxq);
1638                 return -ENOMEM;
1639         }
1640
1641         /* Saving physical and virtual addresses for the RX ring */
1642         rxq->dma = (uint64_t)tz->iova;
1643         rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1644
1645         /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1646         rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1647                                          sizeof(*rxq->rxbufs) * nb_desc,
1648                                          RTE_CACHE_LINE_SIZE, socket_id);
1649         if (rxq->rxbufs == NULL) {
1650                 nfp_net_rx_queue_release(rxq);
1651                 return -ENOMEM;
1652         }
1653
1654         PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1655                    rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1656
1657         nfp_net_reset_rx_queue(rxq);
1658
1659         dev->data->rx_queues[queue_idx] = rxq;
1660         rxq->hw = hw;
1661
1662         /*
1663          * Telling the HW about the physical address of the RX ring and number
1664          * of descriptors in log2 format
1665          */
1666         nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1667         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1668
1669         return 0;
1670 }
1671
1672 static int
1673 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1674 {
1675         struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1676         uint64_t dma_addr;
1677         unsigned i;
1678
1679         PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
1680                    rxq->rx_count);
1681
1682         for (i = 0; i < rxq->rx_count; i++) {
1683                 struct nfp_net_rx_desc *rxd;
1684                 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1685
1686                 if (mbuf == NULL) {
1687                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
1688                                 (unsigned)rxq->qidx);
1689                         return -ENOMEM;
1690                 }
1691
1692                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1693
1694                 rxd = &rxq->rxds[i];
1695                 rxd->fld.dd = 0;
1696                 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1697                 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1698                 rxe[i].mbuf = mbuf;
1699                 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
1700         }
1701
1702         /* Make sure all writes are flushed before telling the hardware */
1703         rte_wmb();
1704
1705         /* Not advertising the whole ring as the firmware gets confused if so */
1706         PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
1707                    rxq->rx_count - 1);
1708
1709         nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1710
1711         return 0;
1712 }
1713
1714 static int
1715 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1716                        uint16_t nb_desc, unsigned int socket_id,
1717                        const struct rte_eth_txconf *tx_conf)
1718 {
1719         const struct rte_memzone *tz;
1720         struct nfp_net_txq *txq;
1721         uint16_t tx_free_thresh;
1722         struct nfp_net_hw *hw;
1723         uint32_t tx_desc_sz;
1724
1725         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726
1727         PMD_INIT_FUNC_TRACE();
1728
1729         /* Validating number of descriptors */
1730         tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
1731         if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
1732             nb_desc > NFP_NET_MAX_TX_DESC ||
1733             nb_desc < NFP_NET_MIN_TX_DESC) {
1734                 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1735                 return -EINVAL;
1736         }
1737
1738         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1739                                     tx_conf->tx_free_thresh :
1740                                     DEFAULT_TX_FREE_THRESH);
1741
1742         if (tx_free_thresh > (nb_desc)) {
1743                 PMD_DRV_LOG(ERR,
1744                         "tx_free_thresh must be less than the number of TX "
1745                         "descriptors. (tx_free_thresh=%u port=%d "
1746                         "queue=%d)", (unsigned int)tx_free_thresh,
1747                         dev->data->port_id, (int)queue_idx);
1748                 return -(EINVAL);
1749         }
1750
1751         /*
1752          * Free memory prior to re-allocation if needed. This is the case after
1753          * calling nfp_net_stop
1754          */
1755         if (dev->data->tx_queues[queue_idx]) {
1756                 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1757                            queue_idx);
1758                 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1759                 dev->data->tx_queues[queue_idx] = NULL;
1760         }
1761
1762         /* Allocating tx queue data structure */
1763         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1764                                  RTE_CACHE_LINE_SIZE, socket_id);
1765         if (txq == NULL) {
1766                 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1767                 return -ENOMEM;
1768         }
1769
1770         /*
1771          * Allocate TX ring hardware descriptors. A memzone large enough to
1772          * handle the maximum ring size is allocated in order to allow for
1773          * resizing in later calls to the queue setup function.
1774          */
1775         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1776                                    sizeof(struct nfp_net_tx_desc) *
1777                                    NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1778                                    socket_id);
1779         if (tz == NULL) {
1780                 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1781                 nfp_net_tx_queue_release(txq);
1782                 return -ENOMEM;
1783         }
1784
1785         txq->tx_count = nb_desc;
1786         txq->tx_free_thresh = tx_free_thresh;
1787         txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1788         txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1789         txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1790
1791         /* queue mapping based on firmware configuration */
1792         txq->qidx = queue_idx;
1793         txq->tx_qcidx = queue_idx * hw->stride_tx;
1794         txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1795
1796         txq->port_id = dev->data->port_id;
1797
1798         /* Saving physical and virtual addresses for the TX ring */
1799         txq->dma = (uint64_t)tz->iova;
1800         txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1801
1802         /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1803         txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1804                                          sizeof(*txq->txbufs) * nb_desc,
1805                                          RTE_CACHE_LINE_SIZE, socket_id);
1806         if (txq->txbufs == NULL) {
1807                 nfp_net_tx_queue_release(txq);
1808                 return -ENOMEM;
1809         }
1810         PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1811                    txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1812
1813         nfp_net_reset_tx_queue(txq);
1814
1815         dev->data->tx_queues[queue_idx] = txq;
1816         txq->hw = hw;
1817
1818         /*
1819          * Telling the HW about the physical address of the TX ring and number
1820          * of descriptors in log2 format
1821          */
1822         nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1823         nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1824
1825         return 0;
1826 }
1827
1828 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1829 static inline void
1830 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1831                struct rte_mbuf *mb)
1832 {
1833         uint64_t ol_flags;
1834         struct nfp_net_hw *hw = txq->hw;
1835
1836         if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
1837                 goto clean_txd;
1838
1839         ol_flags = mb->ol_flags;
1840
1841         if (!(ol_flags & PKT_TX_TCP_SEG))
1842                 goto clean_txd;
1843
1844         txd->l3_offset = mb->l2_len;
1845         txd->l4_offset = mb->l2_len + mb->l3_len;
1846         txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
1847         txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
1848         txd->flags = PCIE_DESC_TX_LSO;
1849         return;
1850
1851 clean_txd:
1852         txd->flags = 0;
1853         txd->l3_offset = 0;
1854         txd->l4_offset = 0;
1855         txd->lso_hdrlen = 0;
1856         txd->mss = 0;
1857 }
1858
1859 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1860 static inline void
1861 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1862                  struct rte_mbuf *mb)
1863 {
1864         uint64_t ol_flags;
1865         struct nfp_net_hw *hw = txq->hw;
1866
1867         if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1868                 return;
1869
1870         ol_flags = mb->ol_flags;
1871
1872         /* IPv6 does not need checksum */
1873         if (ol_flags & PKT_TX_IP_CKSUM)
1874                 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1875
1876         switch (ol_flags & PKT_TX_L4_MASK) {
1877         case PKT_TX_UDP_CKSUM:
1878                 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1879                 break;
1880         case PKT_TX_TCP_CKSUM:
1881                 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1882                 break;
1883         }
1884
1885         if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1886                 txd->flags |= PCIE_DESC_TX_CSUM;
1887 }
1888
1889 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1890 static inline void
1891 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1892                  struct rte_mbuf *mb)
1893 {
1894         struct nfp_net_hw *hw = rxq->hw;
1895
1896         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1897                 return;
1898
1899         /* If IPv4 and IP checksum error, fail */
1900         if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1901             !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
1902                 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1903         else
1904                 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1905
1906         /* If neither UDP nor TCP return */
1907         if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1908             !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1909                 return;
1910
1911         if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
1912                 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1913         else
1914                 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1915 }
1916
1917 #define NFP_HASH_OFFSET      ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1918 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1919
1920 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1921
1922 /*
1923  * nfp_net_set_hash - Set mbuf hash data
1924  *
1925  * The RSS hash and hash-type are pre-pended to the packet data.
1926  * Extract and decode it and set the mbuf fields.
1927  */
1928 static inline void
1929 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1930                  struct rte_mbuf *mbuf)
1931 {
1932         struct nfp_net_hw *hw = rxq->hw;
1933         uint8_t *meta_offset;
1934         uint32_t meta_info;
1935         uint32_t hash = 0;
1936         uint32_t hash_type = 0;
1937
1938         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1939                 return;
1940
1941         /* this is true for new firmwares */
1942         if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
1943             (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
1944              NFP_DESC_META_LEN(rxd))) {
1945                 /*
1946                  * new metadata api:
1947                  * <----  32 bit  ----->
1948                  * m    field type word
1949                  * e     data field #2
1950                  * t     data field #1
1951                  * a     data field #0
1952                  * ====================
1953                  *    packet data
1954                  *
1955                  * Field type word contains up to 8 4bit field types
1956                  * A 4bit field type refers to a data field word
1957                  * A data field word can have several 4bit field types
1958                  */
1959                 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1960                 meta_offset -= NFP_DESC_META_LEN(rxd);
1961                 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1962                 meta_offset += 4;
1963                 /* NFP PMD just supports metadata for hashing */
1964                 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1965                 case NFP_NET_META_HASH:
1966                         /* next field type is about the hash type */
1967                         meta_info >>= NFP_NET_META_FIELD_SIZE;
1968                         /* hash value is in the data field */
1969                         hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1970                         hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1971                         break;
1972                 default:
1973                         /* Unsupported metadata can be a performance issue */
1974                         return;
1975                 }
1976         } else {
1977                 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1978                         return;
1979
1980                 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1981                 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1982         }
1983
1984         mbuf->hash.rss = hash;
1985         mbuf->ol_flags |= PKT_RX_RSS_HASH;
1986
1987         switch (hash_type) {
1988         case NFP_NET_RSS_IPV4:
1989                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1990                 break;
1991         case NFP_NET_RSS_IPV6:
1992                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1993                 break;
1994         case NFP_NET_RSS_IPV6_EX:
1995                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1996                 break;
1997         case NFP_NET_RSS_IPV4_TCP:
1998                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1999                 break;
2000         case NFP_NET_RSS_IPV6_TCP:
2001                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2002                 break;
2003         case NFP_NET_RSS_IPV4_UDP:
2004                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2005                 break;
2006         case NFP_NET_RSS_IPV6_UDP:
2007                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2008                 break;
2009         default:
2010                 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
2011         }
2012 }
2013
2014 static inline void
2015 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
2016 {
2017         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
2018 }
2019
2020 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
2021
2022 /*
2023  * RX path design:
2024  *
2025  * There are some decisions to take:
2026  * 1) How to check DD RX descriptors bit
2027  * 2) How and when to allocate new mbufs
2028  *
2029  * Current implementation checks just one single DD bit each loop. As each
2030  * descriptor is 8 bytes, it is likely a good idea to check descriptors in
2031  * a single cache line instead. Tests with this change have not shown any
2032  * performance improvement but it requires further investigation. For example,
2033  * depending on which descriptor is next, the number of descriptors could be
2034  * less than 8 for just checking those in the same cache line. This implies
2035  * extra work which could be counterproductive by itself. Indeed, last firmware
2036  * changes are just doing this: writing several descriptors with the DD bit
2037  * for saving PCIe bandwidth and DMA operations from the NFP.
2038  *
2039  * Mbuf allocation is done when a new packet is received. Then the descriptor
2040  * is automatically linked with the new mbuf and the old one is given to the
2041  * user. The main drawback with this design is mbuf allocation is heavier than
2042  * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
2043  * cache point of view it does not seem allocating the mbuf early on as we are
2044  * doing now have any benefit at all. Again, tests with this change have not
2045  * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
2046  * so looking at the implications of this type of allocation should be studied
2047  * deeply
2048  */
2049
2050 static uint16_t
2051 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2052 {
2053         struct nfp_net_rxq *rxq;
2054         struct nfp_net_rx_desc *rxds;
2055         struct nfp_net_rx_buff *rxb;
2056         struct nfp_net_hw *hw;
2057         struct rte_mbuf *mb;
2058         struct rte_mbuf *new_mb;
2059         uint16_t nb_hold;
2060         uint64_t dma_addr;
2061         int avail;
2062
2063         rxq = rx_queue;
2064         if (unlikely(rxq == NULL)) {
2065                 /*
2066                  * DPDK just checks the queue is lower than max queues
2067                  * enabled. But the queue needs to be configured
2068                  */
2069                 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2070                 return -EINVAL;
2071         }
2072
2073         hw = rxq->hw;
2074         avail = 0;
2075         nb_hold = 0;
2076
2077         while (avail < nb_pkts) {
2078                 rxb = &rxq->rxbufs[rxq->rd_p];
2079                 if (unlikely(rxb == NULL)) {
2080                         RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2081                         break;
2082                 }
2083
2084                 rxds = &rxq->rxds[rxq->rd_p];
2085                 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2086                         break;
2087
2088                 /*
2089                  * Memory barrier to ensure that we won't do other
2090                  * reads before the DD bit.
2091                  */
2092                 rte_rmb();
2093
2094                 /*
2095                  * We got a packet. Let's alloc a new mbuf for refilling the
2096                  * free descriptor ring as soon as possible
2097                  */
2098                 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2099                 if (unlikely(new_mb == NULL)) {
2100                         RTE_LOG_DP(DEBUG, PMD,
2101                         "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2102                                 rxq->port_id, (unsigned int)rxq->qidx);
2103                         nfp_net_mbuf_alloc_failed(rxq);
2104                         break;
2105                 }
2106
2107                 nb_hold++;
2108
2109                 /*
2110                  * Grab the mbuf and refill the descriptor with the
2111                  * previously allocated mbuf
2112                  */
2113                 mb = rxb->mbuf;
2114                 rxb->mbuf = new_mb;
2115
2116                 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
2117                            rxds->rxd.data_len, rxq->mbuf_size);
2118
2119                 /* Size of this segment */
2120                 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2121                 /* Size of the whole packet. We just support 1 segment */
2122                 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2123
2124                 if (unlikely((mb->data_len + hw->rx_offset) >
2125                              rxq->mbuf_size)) {
2126                         /*
2127                          * This should not happen and the user has the
2128                          * responsibility of avoiding it. But we have
2129                          * to give some info about the error
2130                          */
2131                         RTE_LOG_DP(ERR, PMD,
2132                                 "mbuf overflow likely due to the RX offset.\n"
2133                                 "\t\tYour mbuf size should have extra space for"
2134                                 " RX offset=%u bytes.\n"
2135                                 "\t\tCurrently you just have %u bytes available"
2136                                 " but the received packet is %u bytes long",
2137                                 hw->rx_offset,
2138                                 rxq->mbuf_size - hw->rx_offset,
2139                                 mb->data_len);
2140                         return -EINVAL;
2141                 }
2142
2143                 /* Filling the received mbuf with packet info */
2144                 if (hw->rx_offset)
2145                         mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2146                 else
2147                         mb->data_off = RTE_PKTMBUF_HEADROOM +
2148                                        NFP_DESC_META_LEN(rxds);
2149
2150                 /* No scatter mode supported */
2151                 mb->nb_segs = 1;
2152                 mb->next = NULL;
2153
2154                 mb->port = rxq->port_id;
2155
2156                 /* Checking the RSS flag */
2157                 nfp_net_set_hash(rxq, rxds, mb);
2158
2159                 /* Checking the checksum flag */
2160                 nfp_net_rx_cksum(rxq, rxds, mb);
2161
2162                 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2163                     (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2164                         mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2165                         mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2166                 }
2167
2168                 /* Adding the mbuf to the mbuf array passed by the app */
2169                 rx_pkts[avail++] = mb;
2170
2171                 /* Now resetting and updating the descriptor */
2172                 rxds->vals[0] = 0;
2173                 rxds->vals[1] = 0;
2174                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2175                 rxds->fld.dd = 0;
2176                 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2177                 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2178
2179                 rxq->rd_p++;
2180                 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2181                         rxq->rd_p = 0;
2182         }
2183
2184         if (nb_hold == 0)
2185                 return nb_hold;
2186
2187         PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received",
2188                    rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2189
2190         nb_hold += rxq->nb_rx_hold;
2191
2192         /*
2193          * FL descriptors needs to be written before incrementing the
2194          * FL queue WR pointer
2195          */
2196         rte_wmb();
2197         if (nb_hold > rxq->rx_free_thresh) {
2198                 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
2199                            rxq->port_id, (unsigned int)rxq->qidx,
2200                            (unsigned)nb_hold, (unsigned)avail);
2201                 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2202                 nb_hold = 0;
2203         }
2204         rxq->nb_rx_hold = nb_hold;
2205
2206         return avail;
2207 }
2208
2209 /*
2210  * nfp_net_tx_free_bufs - Check for descriptors with a complete
2211  * status
2212  * @txq: TX queue to work with
2213  * Returns number of descriptors freed
2214  */
2215 int
2216 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2217 {
2218         uint32_t qcp_rd_p;
2219         int todo;
2220
2221         PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2222                    " status", txq->qidx);
2223
2224         /* Work out how many packets have been sent */
2225         qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2226
2227         if (qcp_rd_p == txq->rd_p) {
2228                 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2229                            "packets (%u, %u)", txq->qidx,
2230                            qcp_rd_p, txq->rd_p);
2231                 return 0;
2232         }
2233
2234         if (qcp_rd_p > txq->rd_p)
2235                 todo = qcp_rd_p - txq->rd_p;
2236         else
2237                 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2238
2239         PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
2240                    qcp_rd_p, txq->rd_p, txq->rd_p);
2241
2242         if (todo == 0)
2243                 return todo;
2244
2245         txq->rd_p += todo;
2246         if (unlikely(txq->rd_p >= txq->tx_count))
2247                 txq->rd_p -= txq->tx_count;
2248
2249         return todo;
2250 }
2251
2252 /* Leaving always free descriptors for avoiding wrapping confusion */
2253 static inline
2254 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2255 {
2256         if (txq->wr_p >= txq->rd_p)
2257                 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2258         else
2259                 return txq->rd_p - txq->wr_p - 8;
2260 }
2261
2262 /*
2263  * nfp_net_txq_full - Check if the TX queue free descriptors
2264  * is below tx_free_threshold
2265  *
2266  * @txq: TX queue to check
2267  *
2268  * This function uses the host copy* of read/write pointers
2269  */
2270 static inline
2271 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2272 {
2273         return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2274 }
2275
2276 static uint16_t
2277 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2278 {
2279         struct nfp_net_txq *txq;
2280         struct nfp_net_hw *hw;
2281         struct nfp_net_tx_desc *txds, txd;
2282         struct rte_mbuf *pkt;
2283         uint64_t dma_addr;
2284         int pkt_size, dma_size;
2285         uint16_t free_descs, issued_descs;
2286         struct rte_mbuf **lmbuf;
2287         int i;
2288
2289         txq = tx_queue;
2290         hw = txq->hw;
2291         txds = &txq->txds[txq->wr_p];
2292
2293         PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
2294                    txq->qidx, txq->wr_p, nb_pkts);
2295
2296         if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2297                 nfp_net_tx_free_bufs(txq);
2298
2299         free_descs = (uint16_t)nfp_free_tx_desc(txq);
2300         if (unlikely(free_descs == 0))
2301                 return 0;
2302
2303         pkt = *tx_pkts;
2304
2305         i = 0;
2306         issued_descs = 0;
2307         PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
2308                    txq->qidx, nb_pkts);
2309         /* Sending packets */
2310         while ((i < nb_pkts) && free_descs) {
2311                 /* Grabbing the mbuf linked to the current descriptor */
2312                 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2313                 /* Warming the cache for releasing the mbuf later on */
2314                 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2315
2316                 pkt = *(tx_pkts + i);
2317
2318                 if (unlikely((pkt->nb_segs > 1) &&
2319                              !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2320                         PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2321                         rte_panic("Multisegment packet unsupported\n");
2322                 }
2323
2324                 /* Checking if we have enough descriptors */
2325                 if (unlikely(pkt->nb_segs > free_descs))
2326                         goto xmit_end;
2327
2328                 /*
2329                  * Checksum and VLAN flags just in the first descriptor for a
2330                  * multisegment packet, but TSO info needs to be in all of them.
2331                  */
2332                 txd.data_len = pkt->pkt_len;
2333                 nfp_net_tx_tso(txq, &txd, pkt);
2334                 nfp_net_tx_cksum(txq, &txd, pkt);
2335
2336                 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2337                     (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2338                         txd.flags |= PCIE_DESC_TX_VLAN;
2339                         txd.vlan = pkt->vlan_tci;
2340                 }
2341
2342                 /*
2343                  * mbuf data_len is the data in one segment and pkt_len data
2344                  * in the whole packet. When the packet is just one segment,
2345                  * then data_len = pkt_len
2346                  */
2347                 pkt_size = pkt->pkt_len;
2348
2349                 while (pkt) {
2350                         /* Copying TSO, VLAN and cksum info */
2351                         *txds = txd;
2352
2353                         /* Releasing mbuf used by this descriptor previously*/
2354                         if (*lmbuf)
2355                                 rte_pktmbuf_free_seg(*lmbuf);
2356
2357                         /*
2358                          * Linking mbuf with descriptor for being released
2359                          * next time descriptor is used
2360                          */
2361                         *lmbuf = pkt;
2362
2363                         dma_size = pkt->data_len;
2364                         dma_addr = rte_mbuf_data_iova(pkt);
2365                         PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2366                                    "%" PRIx64 "", dma_addr);
2367
2368                         /* Filling descriptors fields */
2369                         txds->dma_len = dma_size;
2370                         txds->data_len = txd.data_len;
2371                         txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2372                         txds->dma_addr_lo = (dma_addr & 0xffffffff);
2373                         ASSERT(free_descs > 0);
2374                         free_descs--;
2375
2376                         txq->wr_p++;
2377                         if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2378                                 txq->wr_p = 0;
2379
2380                         pkt_size -= dma_size;
2381
2382                         /*
2383                          * Making the EOP, packets with just one segment
2384                          * the priority
2385                          */
2386                         if (likely(!pkt_size))
2387                                 txds->offset_eop = PCIE_DESC_TX_EOP;
2388                         else
2389                                 txds->offset_eop = 0;
2390
2391                         pkt = pkt->next;
2392                         /* Referencing next free TX descriptor */
2393                         txds = &txq->txds[txq->wr_p];
2394                         lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2395                         issued_descs++;
2396                 }
2397                 i++;
2398         }
2399
2400 xmit_end:
2401         /* Increment write pointers. Force memory write before we let HW know */
2402         rte_wmb();
2403         nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2404
2405         return i;
2406 }
2407
2408 static int
2409 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2410 {
2411         uint32_t new_ctrl, update;
2412         struct nfp_net_hw *hw;
2413         int ret;
2414
2415         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2416         new_ctrl = 0;
2417
2418         /* Enable vlan strip if it is not configured yet */
2419         if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2420             !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2421                 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2422
2423         /* Disable vlan strip just if it is configured */
2424         if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2425             (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2426                 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2427
2428         if (new_ctrl == 0)
2429                 return 0;
2430
2431         update = NFP_NET_CFG_UPDATE_GEN;
2432
2433         ret = nfp_net_reconfig(hw, new_ctrl, update);
2434         if (!ret)
2435                 hw->ctrl = new_ctrl;
2436
2437         return ret;
2438 }
2439
2440 static int
2441 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2442                     struct rte_eth_rss_reta_entry64 *reta_conf,
2443                     uint16_t reta_size)
2444 {
2445         uint32_t reta, mask;
2446         int i, j;
2447         int idx, shift;
2448         struct nfp_net_hw *hw =
2449                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2450
2451         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2452                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2453                         "(%d) doesn't match the number hardware can supported "
2454                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2455                 return -EINVAL;
2456         }
2457
2458         /*
2459          * Update Redirection Table. There are 128 8bit-entries which can be
2460          * manage as 32 32bit-entries
2461          */
2462         for (i = 0; i < reta_size; i += 4) {
2463                 /* Handling 4 RSS entries per loop */
2464                 idx = i / RTE_RETA_GROUP_SIZE;
2465                 shift = i % RTE_RETA_GROUP_SIZE;
2466                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2467
2468                 if (!mask)
2469                         continue;
2470
2471                 reta = 0;
2472                 /* If all 4 entries were set, don't need read RETA register */
2473                 if (mask != 0xF)
2474                         reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2475
2476                 for (j = 0; j < 4; j++) {
2477                         if (!(mask & (0x1 << j)))
2478                                 continue;
2479                         if (mask != 0xF)
2480                                 /* Clearing the entry bits */
2481                                 reta &= ~(0xFF << (8 * j));
2482                         reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2483                 }
2484                 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2485                               reta);
2486         }
2487         return 0;
2488 }
2489
2490 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2491 static int
2492 nfp_net_reta_update(struct rte_eth_dev *dev,
2493                     struct rte_eth_rss_reta_entry64 *reta_conf,
2494                     uint16_t reta_size)
2495 {
2496         struct nfp_net_hw *hw =
2497                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2498         uint32_t update;
2499         int ret;
2500
2501         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2502                 return -EINVAL;
2503
2504         ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2505         if (ret != 0)
2506                 return ret;
2507
2508         update = NFP_NET_CFG_UPDATE_RSS;
2509
2510         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2511                 return -EIO;
2512
2513         return 0;
2514 }
2515
2516  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2517 static int
2518 nfp_net_reta_query(struct rte_eth_dev *dev,
2519                    struct rte_eth_rss_reta_entry64 *reta_conf,
2520                    uint16_t reta_size)
2521 {
2522         uint8_t i, j, mask;
2523         int idx, shift;
2524         uint32_t reta;
2525         struct nfp_net_hw *hw;
2526
2527         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2528
2529         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2530                 return -EINVAL;
2531
2532         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2533                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2534                         "(%d) doesn't match the number hardware can supported "
2535                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2536                 return -EINVAL;
2537         }
2538
2539         /*
2540          * Reading Redirection Table. There are 128 8bit-entries which can be
2541          * manage as 32 32bit-entries
2542          */
2543         for (i = 0; i < reta_size; i += 4) {
2544                 /* Handling 4 RSS entries per loop */
2545                 idx = i / RTE_RETA_GROUP_SIZE;
2546                 shift = i % RTE_RETA_GROUP_SIZE;
2547                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2548
2549                 if (!mask)
2550                         continue;
2551
2552                 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2553                                     shift);
2554                 for (j = 0; j < 4; j++) {
2555                         if (!(mask & (0x1 << j)))
2556                                 continue;
2557                         reta_conf[idx].reta[shift + j] =
2558                                 (uint8_t)((reta >> (8 * j)) & 0xF);
2559                 }
2560         }
2561         return 0;
2562 }
2563
2564 static int
2565 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2566                         struct rte_eth_rss_conf *rss_conf)
2567 {
2568         struct nfp_net_hw *hw;
2569         uint64_t rss_hf;
2570         uint32_t cfg_rss_ctrl = 0;
2571         uint8_t key;
2572         int i;
2573
2574         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2575
2576         /* Writing the key byte a byte */
2577         for (i = 0; i < rss_conf->rss_key_len; i++) {
2578                 memcpy(&key, &rss_conf->rss_key[i], 1);
2579                 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2580         }
2581
2582         rss_hf = rss_conf->rss_hf;
2583
2584         if (rss_hf & ETH_RSS_IPV4)
2585                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
2586
2587         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2588                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
2589
2590         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2591                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
2592
2593         if (rss_hf & ETH_RSS_IPV6)
2594                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
2595
2596         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2597                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
2598
2599         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2600                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
2601
2602         cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2603         cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2604
2605         /* configuring where to apply the RSS hash */
2606         nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2607
2608         /* Writing the key size */
2609         nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2610
2611         return 0;
2612 }
2613
2614 static int
2615 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2616                         struct rte_eth_rss_conf *rss_conf)
2617 {
2618         uint32_t update;
2619         uint64_t rss_hf;
2620         struct nfp_net_hw *hw;
2621
2622         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2623
2624         rss_hf = rss_conf->rss_hf;
2625
2626         /* Checking if RSS is enabled */
2627         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2628                 if (rss_hf != 0) { /* Enable RSS? */
2629                         PMD_DRV_LOG(ERR, "RSS unsupported");
2630                         return -EINVAL;
2631                 }
2632                 return 0; /* Nothing to do */
2633         }
2634
2635         if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2636                 PMD_DRV_LOG(ERR, "hash key too long");
2637                 return -EINVAL;
2638         }
2639
2640         nfp_net_rss_hash_write(dev, rss_conf);
2641
2642         update = NFP_NET_CFG_UPDATE_RSS;
2643
2644         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2645                 return -EIO;
2646
2647         return 0;
2648 }
2649
2650 static int
2651 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2652                           struct rte_eth_rss_conf *rss_conf)
2653 {
2654         uint64_t rss_hf;
2655         uint32_t cfg_rss_ctrl;
2656         uint8_t key;
2657         int i;
2658         struct nfp_net_hw *hw;
2659
2660         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2661
2662         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2663                 return -EINVAL;
2664
2665         rss_hf = rss_conf->rss_hf;
2666         cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2667
2668         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2669                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2670
2671         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2672                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2673
2674         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2675                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2676
2677         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2678                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2679
2680         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2681                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2682
2683         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2684                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2685
2686         /* Propagate current RSS hash functions to caller */
2687         rss_conf->rss_hf = rss_hf;
2688
2689         /* Reading the key size */
2690         rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2691
2692         /* Reading the key byte a byte */
2693         for (i = 0; i < rss_conf->rss_key_len; i++) {
2694                 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2695                 memcpy(&rss_conf->rss_key[i], &key, 1);
2696         }
2697
2698         return 0;
2699 }
2700
2701 static int
2702 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2703 {
2704         struct rte_eth_conf *dev_conf;
2705         struct rte_eth_rss_conf rss_conf;
2706         struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2707         uint16_t rx_queues = dev->data->nb_rx_queues;
2708         uint16_t queue;
2709         int i, j, ret;
2710
2711         PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
2712                 rx_queues);
2713
2714         nfp_reta_conf[0].mask = ~0x0;
2715         nfp_reta_conf[1].mask = ~0x0;
2716
2717         queue = 0;
2718         for (i = 0; i < 0x40; i += 8) {
2719                 for (j = i; j < (i + 8); j++) {
2720                         nfp_reta_conf[0].reta[j] = queue;
2721                         nfp_reta_conf[1].reta[j] = queue++;
2722                         queue %= rx_queues;
2723                 }
2724         }
2725         ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2726         if (ret != 0)
2727                 return ret;
2728
2729         dev_conf = &dev->data->dev_conf;
2730         if (!dev_conf) {
2731                 PMD_DRV_LOG(INFO, "wrong rss conf");
2732                 return -EINVAL;
2733         }
2734         rss_conf = dev_conf->rx_adv_conf.rss_conf;
2735
2736         ret = nfp_net_rss_hash_write(dev, &rss_conf);
2737
2738         return ret;
2739 }
2740
2741
2742 /* Initialise and register driver with DPDK Application */
2743 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2744         .dev_configure          = nfp_net_configure,
2745         .dev_start              = nfp_net_start,
2746         .dev_stop               = nfp_net_stop,
2747         .dev_set_link_up        = nfp_net_set_link_up,
2748         .dev_set_link_down      = nfp_net_set_link_down,
2749         .dev_close              = nfp_net_close,
2750         .promiscuous_enable     = nfp_net_promisc_enable,
2751         .promiscuous_disable    = nfp_net_promisc_disable,
2752         .link_update            = nfp_net_link_update,
2753         .stats_get              = nfp_net_stats_get,
2754         .stats_reset            = nfp_net_stats_reset,
2755         .dev_infos_get          = nfp_net_infos_get,
2756         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2757         .mtu_set                = nfp_net_dev_mtu_set,
2758         .mac_addr_set           = nfp_set_mac_addr,
2759         .vlan_offload_set       = nfp_net_vlan_offload_set,
2760         .reta_update            = nfp_net_reta_update,
2761         .reta_query             = nfp_net_reta_query,
2762         .rss_hash_update        = nfp_net_rss_hash_update,
2763         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
2764         .rx_queue_setup         = nfp_net_rx_queue_setup,
2765         .rx_queue_release       = nfp_net_rx_queue_release,
2766         .tx_queue_setup         = nfp_net_tx_queue_setup,
2767         .tx_queue_release       = nfp_net_tx_queue_release,
2768         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
2769         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
2770 };
2771
2772
2773 static int
2774 nfp_net_init(struct rte_eth_dev *eth_dev)
2775 {
2776         struct rte_pci_device *pci_dev;
2777         struct nfp_pf_dev *pf_dev;
2778         struct nfp_net_hw *hw;
2779
2780         uint64_t tx_bar_off = 0, rx_bar_off = 0;
2781         uint32_t start_q;
2782         int stride = 4;
2783         int port = 0;
2784         int err;
2785
2786         PMD_INIT_FUNC_TRACE();
2787
2788         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2789
2790         /* Use backpointer here to the PF of this eth_dev */
2791         pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
2792
2793         /* NFP can not handle DMA addresses requiring more than 40 bits */
2794         if (rte_mem_check_dma_mask(40)) {
2795                 RTE_LOG(ERR, PMD, "device %s can not be used:",
2796                                    pci_dev->device.name);
2797                 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
2798                 return -ENODEV;
2799         };
2800
2801         if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2802             (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2803                 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
2804                 if (port < 0 || port > 7) {
2805                         PMD_DRV_LOG(ERR, "Port value is wrong");
2806                         return -ENODEV;
2807                 }
2808
2809                 /* This points to the specific port private data */
2810                 PMD_INIT_LOG(DEBUG, "Working with physical port number %d",
2811                                     port);
2812
2813                 /* Use PF array of physical ports to get pointer to
2814                  * this specific port
2815                  */
2816                 hw = pf_dev->ports[port];
2817
2818         } else {
2819                 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2820         }
2821
2822         eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2823         eth_dev->rx_queue_count = nfp_net_rx_queue_count;
2824         eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2825         eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2826
2827         /* For secondary processes, the primary has done all the work */
2828         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2829                 return 0;
2830
2831         rte_eth_copy_pci_info(eth_dev, pci_dev);
2832
2833         hw->device_id = pci_dev->id.device_id;
2834         hw->vendor_id = pci_dev->id.vendor_id;
2835         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2836         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2837
2838         PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2839                      pci_dev->id.vendor_id, pci_dev->id.device_id,
2840                      pci_dev->addr.domain, pci_dev->addr.bus,
2841                      pci_dev->addr.devid, pci_dev->addr.function);
2842
2843         hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2844         if (hw->ctrl_bar == NULL) {
2845                 PMD_DRV_LOG(ERR,
2846                         "hw->ctrl_bar is NULL. BAR0 not configured");
2847                 return -ENODEV;
2848         }
2849
2850         if (hw->is_phyport) {
2851                 if (port == 0) {
2852                         hw->ctrl_bar = pf_dev->ctrl_bar;
2853                 } else {
2854                         if (!pf_dev->ctrl_bar)
2855                                 return -ENODEV;
2856                         /* Use port offset in pf ctrl_bar for this
2857                          * ports control bar
2858                          */
2859                         hw->ctrl_bar = pf_dev->ctrl_bar +
2860                                        (port * NFP_PF_CSR_SLICE_SIZE);
2861                 }
2862         }
2863
2864         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2865
2866         hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2867         hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2868
2869         /* Work out where in the BAR the queues start. */
2870         switch (pci_dev->id.device_id) {
2871         case PCI_DEVICE_ID_NFP4000_PF_NIC:
2872         case PCI_DEVICE_ID_NFP6000_PF_NIC:
2873         case PCI_DEVICE_ID_NFP6000_VF_NIC:
2874                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2875                 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2876                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2877                 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2878                 break;
2879         default:
2880                 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
2881                 err = -ENODEV;
2882                 goto dev_err_ctrl_map;
2883         }
2884
2885         PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
2886         PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
2887
2888         if (hw->is_phyport) {
2889                 hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
2890                 hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
2891                 eth_dev->data->dev_private = hw;
2892         } else {
2893                 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2894                              tx_bar_off;
2895                 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2896                              rx_bar_off;
2897         }
2898
2899         PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2900                      hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2901
2902         nfp_net_cfg_queue_setup(hw);
2903
2904         /* Get some of the read-only fields from the config BAR */
2905         hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2906         hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2907         hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2908         hw->mtu = RTE_ETHER_MTU;
2909
2910         /* VLAN insertion is incompatible with LSOv2 */
2911         if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
2912                 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
2913
2914         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2915                 hw->rx_offset = NFP_NET_RX_OFFSET;
2916         else
2917                 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2918
2919         PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
2920                            NFD_CFG_MAJOR_VERSION_of(hw->ver),
2921                            NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
2922
2923         PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2924                      hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2925                      hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
2926                      hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
2927                      hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
2928                      hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
2929                      hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
2930                      hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
2931                      hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2932                      hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
2933                      hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
2934                      hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
2935                      hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
2936                      hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
2937                      hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
2938
2939         hw->ctrl = 0;
2940
2941         hw->stride_rx = stride;
2942         hw->stride_tx = stride;
2943
2944         PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2945                      hw->max_rx_queues, hw->max_tx_queues);
2946
2947         /* Initializing spinlock for reconfigs */
2948         rte_spinlock_init(&hw->reconfig_lock);
2949
2950         /* Allocating memory for mac addr */
2951         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2952                                                RTE_ETHER_ADDR_LEN, 0);
2953         if (eth_dev->data->mac_addrs == NULL) {
2954                 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2955                 err = -ENOMEM;
2956                 goto dev_err_queues_map;
2957         }
2958
2959         if (hw->is_phyport) {
2960                 nfp_net_pf_read_mac(pf_dev, port);
2961                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2962         } else {
2963                 nfp_net_vf_read_mac(hw);
2964         }
2965
2966         if (!rte_is_valid_assigned_ether_addr(
2967                     (struct rte_ether_addr *)&hw->mac_addr)) {
2968                 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
2969                                    port);
2970                 /* Using random mac addresses for VFs */
2971                 rte_eth_random_addr(&hw->mac_addr[0]);
2972                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2973         }
2974
2975         /* Copying mac address to DPDK eth_dev struct */
2976         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
2977                         &eth_dev->data->mac_addrs[0]);
2978
2979         if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
2980                 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
2981
2982         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2983
2984         PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2985                      "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2986                      eth_dev->data->port_id, pci_dev->id.vendor_id,
2987                      pci_dev->id.device_id,
2988                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2989                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2990
2991         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2992                 /* Registering LSC interrupt handler */
2993                 rte_intr_callback_register(&pci_dev->intr_handle,
2994                                            nfp_net_dev_interrupt_handler,
2995                                            (void *)eth_dev);
2996                 /* Telling the firmware about the LSC interrupt entry */
2997                 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2998                 /* Recording current stats counters values */
2999                 nfp_net_stats_reset(eth_dev);
3000         }
3001
3002         return 0;
3003
3004 dev_err_queues_map:
3005                 nfp_cpp_area_free(hw->hwqueues_area);
3006 dev_err_ctrl_map:
3007                 nfp_cpp_area_free(hw->ctrl_area);
3008
3009         return err;
3010 }
3011
3012 #define NFP_CPP_MEMIO_BOUNDARY          (1 << 20)
3013
3014 /*
3015  * Serving a write request to NFP from host programs. The request
3016  * sends the write size and the CPP target. The bridge makes use
3017  * of CPP interface handler configured by the PMD setup.
3018  */
3019 static int
3020 nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
3021 {
3022         struct nfp_cpp_area *area;
3023         off_t offset, nfp_offset;
3024         uint32_t cpp_id, pos, len;
3025         uint32_t tmpbuf[16];
3026         size_t count, curlen, totlen = 0;
3027         int err = 0;
3028
3029         PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
3030                 sizeof(off_t), sizeof(size_t));
3031
3032         /* Reading the count param */
3033         err = recv(sockfd, &count, sizeof(off_t), 0);
3034         if (err != sizeof(off_t))
3035                 return -EINVAL;
3036
3037         curlen = count;
3038
3039         /* Reading the offset param */
3040         err = recv(sockfd, &offset, sizeof(off_t), 0);
3041         if (err != sizeof(off_t))
3042                 return -EINVAL;
3043
3044         /* Obtain target's CPP ID and offset in target */
3045         cpp_id = (offset >> 40) << 8;
3046         nfp_offset = offset & ((1ull << 40) - 1);
3047
3048         PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
3049                 offset);
3050         PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
3051                 cpp_id, nfp_offset);
3052
3053         /* Adjust length if not aligned */
3054         if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3055             (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3056                 curlen = NFP_CPP_MEMIO_BOUNDARY -
3057                         (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3058         }
3059
3060         while (count > 0) {
3061                 /* configure a CPP PCIe2CPP BAR for mapping the CPP target */
3062                 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3063                                                     nfp_offset, curlen);
3064                 if (!area) {
3065                         RTE_LOG(ERR, PMD, "%s: area alloc fail\n", __func__);
3066                         return -EIO;
3067                 }
3068
3069                 /* mapping the target */
3070                 err = nfp_cpp_area_acquire(area);
3071                 if (err < 0) {
3072                         RTE_LOG(ERR, PMD, "area acquire failed\n");
3073                         nfp_cpp_area_free(area);
3074                         return -EIO;
3075                 }
3076
3077                 for (pos = 0; pos < curlen; pos += len) {
3078                         len = curlen - pos;
3079                         if (len > sizeof(tmpbuf))
3080                                 len = sizeof(tmpbuf);
3081
3082                         PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__,
3083                                            len, count);
3084                         err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
3085                         if (err != (int)len) {
3086                                 RTE_LOG(ERR, PMD,
3087                                         "%s: error when receiving, %d of %zu\n",
3088                                         __func__, err, count);
3089                                 nfp_cpp_area_release(area);
3090                                 nfp_cpp_area_free(area);
3091                                 return -EIO;
3092                         }
3093                         err = nfp_cpp_area_write(area, pos, tmpbuf, len);
3094                         if (err < 0) {
3095                                 RTE_LOG(ERR, PMD, "nfp_cpp_area_write error\n");
3096                                 nfp_cpp_area_release(area);
3097                                 nfp_cpp_area_free(area);
3098                                 return -EIO;
3099                         }
3100                 }
3101
3102                 nfp_offset += pos;
3103                 totlen += pos;
3104                 nfp_cpp_area_release(area);
3105                 nfp_cpp_area_free(area);
3106
3107                 count -= pos;
3108                 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3109                          NFP_CPP_MEMIO_BOUNDARY : count;
3110         }
3111
3112         return 0;
3113 }
3114
3115 /*
3116  * Serving a read request to NFP from host programs. The request
3117  * sends the read size and the CPP target. The bridge makes use
3118  * of CPP interface handler configured by the PMD setup. The read
3119  * data is sent to the requester using the same socket.
3120  */
3121 static int
3122 nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
3123 {
3124         struct nfp_cpp_area *area;
3125         off_t offset, nfp_offset;
3126         uint32_t cpp_id, pos, len;
3127         uint32_t tmpbuf[16];
3128         size_t count, curlen, totlen = 0;
3129         int err = 0;
3130
3131         PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
3132                 sizeof(off_t), sizeof(size_t));
3133
3134         /* Reading the count param */
3135         err = recv(sockfd, &count, sizeof(off_t), 0);
3136         if (err != sizeof(off_t))
3137                 return -EINVAL;
3138
3139         curlen = count;
3140
3141         /* Reading the offset param */
3142         err = recv(sockfd, &offset, sizeof(off_t), 0);
3143         if (err != sizeof(off_t))
3144                 return -EINVAL;
3145
3146         /* Obtain target's CPP ID and offset in target */
3147         cpp_id = (offset >> 40) << 8;
3148         nfp_offset = offset & ((1ull << 40) - 1);
3149
3150         PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
3151                            offset);
3152         PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
3153                            cpp_id, nfp_offset);
3154
3155         /* Adjust length if not aligned */
3156         if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
3157             (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
3158                 curlen = NFP_CPP_MEMIO_BOUNDARY -
3159                         (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
3160         }
3161
3162         while (count > 0) {
3163                 area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
3164                                                     nfp_offset, curlen);
3165                 if (!area) {
3166                         RTE_LOG(ERR, PMD, "%s: area alloc failed\n", __func__);
3167                         return -EIO;
3168                 }
3169
3170                 err = nfp_cpp_area_acquire(area);
3171                 if (err < 0) {
3172                         RTE_LOG(ERR, PMD, "area acquire failed\n");
3173                         nfp_cpp_area_free(area);
3174                         return -EIO;
3175                 }
3176
3177                 for (pos = 0; pos < curlen; pos += len) {
3178                         len = curlen - pos;
3179                         if (len > sizeof(tmpbuf))
3180                                 len = sizeof(tmpbuf);
3181
3182                         err = nfp_cpp_area_read(area, pos, tmpbuf, len);
3183                         if (err < 0) {
3184                                 RTE_LOG(ERR, PMD, "nfp_cpp_area_read error\n");
3185                                 nfp_cpp_area_release(area);
3186                                 nfp_cpp_area_free(area);
3187                                 return -EIO;
3188                         }
3189                         PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__,
3190                                            len, count);
3191
3192                         err = send(sockfd, tmpbuf, len, 0);
3193                         if (err != (int)len) {
3194                                 RTE_LOG(ERR, PMD,
3195                                         "%s: error when sending: %d of %zu\n",
3196                                         __func__, err, count);
3197                                 nfp_cpp_area_release(area);
3198                                 nfp_cpp_area_free(area);
3199                                 return -EIO;
3200                         }
3201                 }
3202
3203                 nfp_offset += pos;
3204                 totlen += pos;
3205                 nfp_cpp_area_release(area);
3206                 nfp_cpp_area_free(area);
3207
3208                 count -= pos;
3209                 curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
3210                         NFP_CPP_MEMIO_BOUNDARY : count;
3211         }
3212         return 0;
3213 }
3214
3215 #define NFP_IOCTL 'n'
3216 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
3217 /*
3218  * Serving a ioctl command from host NFP tools. This usually goes to
3219  * a kernel driver char driver but it is not available when the PF is
3220  * bound to the PMD. Currently just one ioctl command is served and it
3221  * does not require any CPP access at all.
3222  */
3223 static int
3224 nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp)
3225 {
3226         uint32_t cmd, ident_size, tmp;
3227         int err;
3228
3229         /* Reading now the IOCTL command */
3230         err = recv(sockfd, &cmd, 4, 0);
3231         if (err != 4) {
3232                 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3233                 return -EIO;
3234         }
3235
3236         /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
3237         if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) {
3238                 RTE_LOG(ERR, PMD, "%s: unknown cmd %d\n", __func__, cmd);
3239                 return -EINVAL;
3240         }
3241
3242         err = recv(sockfd, &ident_size, 4, 0);
3243         if (err != 4) {
3244                 RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__);
3245                 return -EIO;
3246         }
3247
3248         tmp = nfp_cpp_model(cpp);
3249
3250         PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp);
3251
3252         err = send(sockfd, &tmp, 4, 0);
3253         if (err != 4) {
3254                 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3255                 return -EIO;
3256         }
3257
3258         tmp = cpp->interface;
3259
3260         PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp);
3261
3262         err = send(sockfd, &tmp, 4, 0);
3263         if (err != 4) {
3264                 RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__);
3265                 return -EIO;
3266         }
3267
3268         return 0;
3269 }
3270
3271 #define NFP_BRIDGE_OP_READ      20
3272 #define NFP_BRIDGE_OP_WRITE     30
3273 #define NFP_BRIDGE_OP_IOCTL     40
3274
3275 /*
3276  * This is the code to be executed by a service core. The CPP bridge interface
3277  * is based on a unix socket and requests usually received by a kernel char
3278  * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools
3279  * can be executed with a wrapper library and LD_LIBRARY being completely
3280  * unaware of the CPP bridge performing the NFP kernel char driver for CPP
3281  * accesses.
3282  */
3283 static int32_t
3284 nfp_cpp_bridge_service_func(void *args)
3285 {
3286         struct sockaddr address;
3287         struct nfp_cpp *cpp = args;
3288         int sockfd, datafd, op, ret;
3289
3290         unlink("/tmp/nfp_cpp");
3291         sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
3292         if (sockfd < 0) {
3293                 RTE_LOG(ERR, PMD, "%s: socket creation error. Service failed\n",
3294                         __func__);
3295                 return -EIO;
3296         }
3297
3298         memset(&address, 0, sizeof(struct sockaddr));
3299
3300         address.sa_family = AF_UNIX;
3301         strcpy(address.sa_data, "/tmp/nfp_cpp");
3302
3303         ret = bind(sockfd, (const struct sockaddr *)&address,
3304                    sizeof(struct sockaddr));
3305         if (ret < 0) {
3306                 RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n",
3307                                   __func__, errno);
3308                 close(sockfd);
3309                 return ret;
3310         }
3311
3312         ret = listen(sockfd, 20);
3313         if (ret < 0) {
3314                 RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n",
3315                                   __func__, errno);
3316                 close(sockfd);
3317                 return ret;
3318         }
3319
3320         for (;;) {
3321                 datafd = accept(sockfd, NULL, NULL);
3322                 if (datafd < 0) {
3323                         RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n",
3324                                           __func__, errno);
3325                         RTE_LOG(ERR, PMD, "%s: service failed\n", __func__);
3326                         close(sockfd);
3327                         return -EIO;
3328                 }
3329
3330                 while (1) {
3331                         ret = recv(datafd, &op, 4, 0);
3332                         if (ret <= 0) {
3333                                 PMD_CPP_LOG(DEBUG, "%s: socket close\n",
3334                                                    __func__);
3335                                 break;
3336                         }
3337
3338                         PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op);
3339
3340                         if (op == NFP_BRIDGE_OP_READ)
3341                                 nfp_cpp_bridge_serve_read(datafd, cpp);
3342
3343                         if (op == NFP_BRIDGE_OP_WRITE)
3344                                 nfp_cpp_bridge_serve_write(datafd, cpp);
3345
3346                         if (op == NFP_BRIDGE_OP_IOCTL)
3347                                 nfp_cpp_bridge_serve_ioctl(datafd, cpp);
3348
3349                         if (op == 0)
3350                                 break;
3351                 }
3352                 close(datafd);
3353         }
3354         close(sockfd);
3355
3356         return 0;
3357 }
3358
3359 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
3360
3361 static int
3362 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3363 {
3364         struct nfp_cpp *cpp = nsp->cpp;
3365         int fw_f;
3366         char *fw_buf;
3367         char fw_name[125];
3368         char serial[40];
3369         struct stat file_stat;
3370         off_t fsize, bytes;
3371
3372         /* Looking for firmware file in order of priority */
3373
3374         /* First try to find a firmware image specific for this device */
3375         snprintf(serial, sizeof(serial),
3376                         "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3377                 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3378                 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3379                 cpp->interface & 0xff);
3380
3381         snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
3382                         serial);
3383
3384         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3385         fw_f = open(fw_name, O_RDONLY);
3386         if (fw_f >= 0)
3387                 goto read_fw;
3388
3389         /* Then try the PCI name */
3390         snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
3391                         dev->device.name);
3392
3393         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3394         fw_f = open(fw_name, O_RDONLY);
3395         if (fw_f >= 0)
3396                 goto read_fw;
3397
3398         /* Finally try the card type and media */
3399         snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
3400         PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3401         fw_f = open(fw_name, O_RDONLY);
3402         if (fw_f < 0) {
3403                 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
3404                 return -ENOENT;
3405         }
3406
3407 read_fw:
3408         if (fstat(fw_f, &file_stat) < 0) {
3409                 PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
3410                 close(fw_f);
3411                 return -ENOENT;
3412         }
3413
3414         fsize = file_stat.st_size;
3415         PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
3416                             fw_name, (uint64_t)fsize);
3417
3418         fw_buf = malloc((size_t)fsize);
3419         if (!fw_buf) {
3420                 PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
3421                 close(fw_f);
3422                 return -ENOMEM;
3423         }
3424         memset(fw_buf, 0, fsize);
3425
3426         bytes = read(fw_f, fw_buf, fsize);
3427         if (bytes != fsize) {
3428                 PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
3429                                    "Just %" PRIu64 " of %" PRIu64 " bytes read",
3430                                    (uint64_t)bytes, (uint64_t)fsize);
3431                 free(fw_buf);
3432                 close(fw_f);
3433                 return -EIO;
3434         }
3435
3436         PMD_DRV_LOG(INFO, "Uploading the firmware ...");
3437         nfp_nsp_load_fw(nsp, fw_buf, bytes);
3438         PMD_DRV_LOG(INFO, "Done");
3439
3440         free(fw_buf);
3441         close(fw_f);
3442
3443         return 0;
3444 }
3445
3446 static int
3447 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3448              struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3449 {
3450         struct nfp_nsp *nsp;
3451         const char *nfp_fw_model;
3452         char card_desc[100];
3453         int err = 0;
3454
3455         nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3456
3457         if (nfp_fw_model) {
3458                 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
3459         } else {
3460                 PMD_DRV_LOG(ERR, "firmware model NOT found");
3461                 return -EIO;
3462         }
3463
3464         if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3465                 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
3466                        nfp_eth_table->count);
3467                 return -EIO;
3468         }
3469
3470         PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
3471                            nfp_eth_table->count);
3472
3473         PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
3474
3475         snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
3476                         nfp_fw_model, nfp_eth_table->count,
3477                         nfp_eth_table->ports[0].speed / 1000);
3478
3479         nsp = nfp_nsp_open(cpp);
3480         if (!nsp) {
3481                 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
3482                 return -EIO;
3483         }
3484
3485         nfp_nsp_device_soft_reset(nsp);
3486         err = nfp_fw_upload(dev, nsp, card_desc);
3487
3488         nfp_nsp_close(nsp);
3489         return err;
3490 }
3491
3492 static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
3493 {
3494         struct nfp_net_hw *hw;
3495         struct rte_eth_dev *eth_dev;
3496         int ret = 0;
3497         int i;
3498
3499         /* Loop through all physical ports on PF */
3500         for (i = 0; i < pf_dev->total_phyports; i++) {
3501                 const unsigned int numa_node = rte_socket_id();
3502                 char port_name[RTE_ETH_NAME_MAX_LEN];
3503
3504                 snprintf(port_name, sizeof(port_name), "%s_port%d",
3505                          pf_dev->pci_dev->device.name, i);
3506
3507                 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3508                         eth_dev = rte_eth_dev_attach_secondary(port_name);
3509                         if (!eth_dev) {
3510                                 RTE_LOG(ERR, EAL,
3511                                 "secondary process attach failed, "
3512                                 "ethdev doesn't exist");
3513                                 ret = -ENODEV;
3514                                 goto error;
3515                         }
3516
3517                         eth_dev->process_private = pf_dev->cpp;
3518                         goto nfp_net_init;
3519                 }
3520
3521                 /* First port has already been initialized */
3522                 if (i == 0) {
3523                         eth_dev = pf_dev->eth_dev;
3524                         goto skip_dev_alloc;
3525                 }
3526
3527                 /* Allocate a eth_dev for remaining ports */
3528                 eth_dev = rte_eth_dev_allocate(port_name);
3529                 if (!eth_dev) {
3530                         ret = -ENODEV;
3531                         goto port_cleanup;
3532                 }
3533
3534                 /* Allocate memory for remaining ports */
3535                 eth_dev->data->dev_private =
3536                         rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
3537                                            RTE_CACHE_LINE_SIZE, numa_node);
3538                 if (!eth_dev->data->dev_private) {
3539                         ret = -ENOMEM;
3540                         rte_eth_dev_release_port(eth_dev);
3541                         goto port_cleanup;
3542                 }
3543
3544 skip_dev_alloc:
3545                 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3546
3547                 /* Add this device to the PF's array of physical ports */
3548                 pf_dev->ports[i] = hw;
3549
3550                 hw->pf_dev = pf_dev;
3551                 hw->cpp = pf_dev->cpp;
3552                 hw->eth_dev = eth_dev;
3553                 hw->idx = i;
3554                 hw->is_phyport = true;
3555
3556 nfp_net_init:
3557                 eth_dev->device = &pf_dev->pci_dev->device;
3558
3559                 /* ctrl/tx/rx BAR mappings and remaining init happens in
3560                  * nfp_net_init
3561                  */
3562                 ret = nfp_net_init(eth_dev);
3563
3564                 if (ret) {
3565                         ret = -ENODEV;
3566                         goto port_cleanup;
3567                 }
3568
3569                 rte_eth_dev_probing_finish(eth_dev);
3570
3571         } /* End loop, all ports on this PF */
3572         return 0;
3573
3574 port_cleanup:
3575         for (i = 0; i < pf_dev->total_phyports; i++) {
3576                 if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
3577                         struct rte_eth_dev *tmp_dev;
3578                         tmp_dev = pf_dev->ports[i]->eth_dev;
3579                         rte_eth_dev_release_port(tmp_dev);
3580                         pf_dev->ports[i] = NULL;
3581                 }
3582         }
3583 error:
3584         return ret;
3585 }
3586
3587 static int nfp_pf_init(struct rte_eth_dev *eth_dev)
3588 {
3589         struct rte_pci_device *pci_dev;
3590         struct nfp_net_hw *hw = NULL;
3591         struct nfp_pf_dev *pf_dev = NULL;
3592         struct nfp_cpp *cpp;
3593         struct nfp_hwinfo *hwinfo;
3594         struct nfp_rtsym_table *sym_tbl;
3595         struct nfp_eth_table *nfp_eth_table = NULL;
3596         struct rte_service_spec service;
3597         char name[RTE_ETH_NAME_MAX_LEN];
3598         int total_ports;
3599         int ret = -ENODEV;
3600         int err;
3601
3602         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3603         hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev);
3604
3605         if (!pci_dev)
3606                 return ret;
3607
3608         /*
3609          * When device bound to UIO, the device could be used, by mistake,
3610          * by two DPDK apps, and the UIO driver does not avoid it. This
3611          * could lead to a serious problem when configuring the NFP CPP
3612          * interface. Here we avoid this telling to the CPP init code to
3613          * use a lock file if UIO is being used.
3614          */
3615         if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
3616                 cpp = nfp_cpp_from_device_name(pci_dev, 0);
3617         else
3618                 cpp = nfp_cpp_from_device_name(pci_dev, 1);
3619
3620         if (!cpp) {
3621                 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
3622                 ret = -EIO;
3623                 goto error;
3624         }
3625
3626         hwinfo = nfp_hwinfo_read(cpp);
3627         if (!hwinfo) {
3628                 PMD_INIT_LOG(ERR, "Error reading hwinfo table");
3629                 ret = -EIO;
3630                 goto error;
3631         }
3632
3633         nfp_eth_table = nfp_eth_read_ports(cpp);
3634         if (!nfp_eth_table) {
3635                 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
3636                 ret = -EIO;
3637                 goto hwinfo_cleanup;
3638         }
3639
3640         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3641                 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
3642                         PMD_INIT_LOG(ERR, "Error when uploading firmware");
3643                         ret = -EIO;
3644                         goto eth_table_cleanup;
3645                 }
3646         }
3647
3648         /* Now the symbol table should be there */
3649         sym_tbl = nfp_rtsym_table_read(cpp);
3650         if (!sym_tbl) {
3651                 PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
3652                                 " symbol table");
3653                 ret = -EIO;
3654                 goto eth_table_cleanup;
3655         }
3656
3657         total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3658         if (total_ports != (int)nfp_eth_table->count) {
3659                 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
3660                 ret = -EIO;
3661                 goto sym_tbl_cleanup;
3662         }
3663
3664         PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
3665
3666         if (total_ports <= 0 || total_ports > 8) {
3667                 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
3668                 ret = -ENODEV;
3669                 goto sym_tbl_cleanup;
3670         }
3671         /* Allocate memory for the PF "device" */
3672         snprintf(name, sizeof(name), "nfp_pf%d", eth_dev->data->port_id);
3673         pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
3674         if (!pf_dev) {
3675                 ret = -ENOMEM;
3676                 goto sym_tbl_cleanup;
3677         }
3678
3679         /* Populate the newly created PF device */
3680         pf_dev->cpp = cpp;
3681         pf_dev->hwinfo = hwinfo;
3682         pf_dev->sym_tbl = sym_tbl;
3683         pf_dev->total_phyports = total_ports;
3684
3685         if (total_ports > 1)
3686                 pf_dev->multiport = true;
3687
3688         pf_dev->pci_dev = pci_dev;
3689
3690         /* The first eth_dev is part of the PF struct */
3691         pf_dev->eth_dev = eth_dev;
3692
3693         /* Map the symbol table */
3694         pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
3695                                      pf_dev->total_phyports * 32768,
3696                                      &pf_dev->ctrl_area);
3697         if (!pf_dev->ctrl_bar) {
3698                 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
3699                 ret = -EIO;
3700                 goto pf_cleanup;
3701         }
3702
3703         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
3704
3705         /* configure access to tx/rx vNIC BARs */
3706         pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
3707                                               NFP_PCIE_QUEUE(0),
3708                                               NFP_QCP_QUEUE_AREA_SZ,
3709                                               &pf_dev->hwqueues_area);
3710         if (!pf_dev->hw_queues) {
3711                 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
3712                 ret = -EIO;
3713                 goto ctrl_area_cleanup;
3714         }
3715
3716         PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
3717
3718         /* Initialize and prep physical ports now
3719          * This will loop through all physical ports
3720          */
3721         ret = nfp_init_phyports(pf_dev);
3722         if (ret) {
3723                 PMD_INIT_LOG(ERR, "Could not create physical ports");
3724                 goto hwqueues_cleanup;
3725         }
3726
3727         /*
3728          * The rte_service needs to be created just once per PMD.
3729          * And the cpp handler needs to be linked to the service.
3730          * Secondary processes will be used for debugging DPDK apps
3731          * when requiring to use the CPP interface for accessing NFP
3732          * components. And the cpp handler for secondary processes is
3733          * available at this point.
3734          */
3735         memset(&service, 0, sizeof(struct rte_service_spec));
3736         snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
3737         service.callback = nfp_cpp_bridge_service_func;
3738         service.callback_userdata = (void *)cpp;
3739
3740         if (rte_service_component_register(&service,
3741                                            &hw->nfp_cpp_service_id))
3742                 RTE_LOG(ERR, PMD, "NFP CPP bridge service register() failed");
3743         else
3744                 RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
3745
3746         return 0;
3747
3748 hwqueues_cleanup:
3749         nfp_cpp_area_free(pf_dev->hwqueues_area);
3750 ctrl_area_cleanup:
3751         nfp_cpp_area_free(pf_dev->ctrl_area);
3752 pf_cleanup:
3753         rte_free(pf_dev);
3754 sym_tbl_cleanup:
3755         free(sym_tbl);
3756 eth_table_cleanup:
3757         free(nfp_eth_table);
3758 hwinfo_cleanup:
3759         free(hwinfo);
3760 error:
3761         return ret;
3762 }
3763
3764 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3765                             struct rte_pci_device *dev)
3766 {
3767         return rte_eth_dev_pci_generic_probe(dev,
3768                 sizeof(struct nfp_net_hw), nfp_pf_init);
3769 }
3770
3771 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3772         {
3773                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3774                                PCI_DEVICE_ID_NFP4000_PF_NIC)
3775         },
3776         {
3777                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3778                                PCI_DEVICE_ID_NFP6000_PF_NIC)
3779         },
3780         {
3781                 .vendor_id = 0,
3782         },
3783 };
3784
3785 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3786         {
3787                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3788                                PCI_DEVICE_ID_NFP6000_VF_NIC)
3789         },
3790         {
3791                 .vendor_id = 0,
3792         },
3793 };
3794
3795 static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
3796 {
3797         struct rte_pci_device *pci_dev;
3798         uint16_t port_id;
3799
3800         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3801
3802         if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
3803             pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
3804                 /* Free up all physical ports under PF */
3805                 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
3806                         rte_eth_dev_close(port_id);
3807                 /*
3808                  * Ports can be closed and freed but hotplugging is not
3809                  * currently supported
3810                  */
3811                 return -ENOTSUP;
3812         }
3813
3814         /* VF cleanup, just free private port data */
3815         return nfp_net_close(eth_dev);
3816 }
3817
3818 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3819         struct rte_pci_device *pci_dev)
3820 {
3821         return rte_eth_dev_pci_generic_probe(pci_dev,
3822                 sizeof(struct nfp_net_adapter), nfp_net_init);
3823 }
3824
3825 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3826 {
3827         return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
3828 }
3829
3830 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3831         .id_table = pci_id_nfp_pf_net_map,
3832         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3833         .probe = nfp_pf_pci_probe,
3834         .remove = eth_nfp_pci_remove,
3835 };
3836
3837 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3838         .id_table = pci_id_nfp_vf_net_map,
3839         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3840         .probe = eth_nfp_pci_probe,
3841         .remove = eth_nfp_pci_remove,
3842 };
3843
3844 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3845 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3846 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3847 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3848 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3849 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3850 RTE_LOG_REGISTER(nfp_logtype_init, pmd.net.nfp.init, NOTICE);
3851 RTE_LOG_REGISTER(nfp_logtype_driver, pmd.net.nfp.driver, NOTICE);
3852 /*
3853  * Local variables:
3854  * c-file-style: "Linux"
3855  * indent-tabs-mode: t
3856  * End:
3857  */