e030bbf9f3451e80b687ff5435abbabdd618a2bc
[dpdk.git] / drivers / net / nfp / nfp_net.c
1 /*
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *  this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *  notice, this list of conditions and the following disclaimer in the
15  *  documentation and/or other materials provided with the distribution
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  *  contributors may be used to endorse or promote products derived from this
19  *  software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  * vim:shiftwidth=8:noexpandtab
36  *
37  * @file dpdk/pmd/nfp_net.c
38  *
39  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
40  */
41
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev_driver.h>
47 #include <rte_ethdev_pci.h>
48 #include <rte_dev.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
57
58 #include "nfpcore/nfp_cpp.h"
59 #include "nfpcore/nfp_nffw.h"
60 #include "nfpcore/nfp_hwinfo.h"
61 #include "nfpcore/nfp_mip.h"
62 #include "nfpcore/nfp_rtsym.h"
63 #include "nfpcore/nfp_nsp.h"
64
65 #include "nfp_net_pmd.h"
66 #include "nfp_net_logs.h"
67 #include "nfp_net_ctrl.h"
68
69 /* Prototypes */
70 static void nfp_net_close(struct rte_eth_dev *dev);
71 static int nfp_net_configure(struct rte_eth_dev *dev);
72 static void nfp_net_dev_interrupt_handler(void *param);
73 static void nfp_net_dev_interrupt_delayed_handler(void *param);
74 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
75 static void nfp_net_infos_get(struct rte_eth_dev *dev,
76                               struct rte_eth_dev_info *dev_info);
77 static int nfp_net_init(struct rte_eth_dev *eth_dev);
78 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
79 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
80 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
81 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
82 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
83                                        uint16_t queue_idx);
84 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
85                                   uint16_t nb_pkts);
86 static void nfp_net_rx_queue_release(void *rxq);
87 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
88                                   uint16_t nb_desc, unsigned int socket_id,
89                                   const struct rte_eth_rxconf *rx_conf,
90                                   struct rte_mempool *mp);
91 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
92 static void nfp_net_tx_queue_release(void *txq);
93 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
94                                   uint16_t nb_desc, unsigned int socket_id,
95                                   const struct rte_eth_txconf *tx_conf);
96 static int nfp_net_start(struct rte_eth_dev *dev);
97 static int nfp_net_stats_get(struct rte_eth_dev *dev,
98                               struct rte_eth_stats *stats);
99 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
100 static void nfp_net_stop(struct rte_eth_dev *dev);
101 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
102                                   uint16_t nb_pkts);
103
104 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
105 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
106                                    struct rte_eth_rss_conf *rss_conf);
107 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
108                     struct rte_eth_rss_reta_entry64 *reta_conf,
109                     uint16_t reta_size);
110 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
111                         struct rte_eth_rss_conf *rss_conf);
112
113 /* The offset of the queue controller queues in the PCIe Target */
114 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
115
116 /* Maximum value which can be added to a queue with one transaction */
117 #define NFP_QCP_MAX_ADD 0x7f
118
119 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
120         (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
121
122 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
123 enum nfp_qcp_ptr {
124         NFP_QCP_READ_PTR = 0,
125         NFP_QCP_WRITE_PTR
126 };
127
128 /*
129  * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
130  * @q: Base address for queue structure
131  * @ptr: Add to the Read or Write pointer
132  * @val: Value to add to the queue pointer
133  *
134  * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
135  */
136 static inline void
137 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
138 {
139         uint32_t off;
140
141         if (ptr == NFP_QCP_READ_PTR)
142                 off = NFP_QCP_QUEUE_ADD_RPTR;
143         else
144                 off = NFP_QCP_QUEUE_ADD_WPTR;
145
146         while (val > NFP_QCP_MAX_ADD) {
147                 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
148                 val -= NFP_QCP_MAX_ADD;
149         }
150
151         nn_writel(rte_cpu_to_le_32(val), q + off);
152 }
153
154 /*
155  * nfp_qcp_read - Read the current Read/Write pointer value for a queue
156  * @q:  Base address for queue structure
157  * @ptr: Read or Write pointer
158  */
159 static inline uint32_t
160 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
161 {
162         uint32_t off;
163         uint32_t val;
164
165         if (ptr == NFP_QCP_READ_PTR)
166                 off = NFP_QCP_QUEUE_STS_LO;
167         else
168                 off = NFP_QCP_QUEUE_STS_HI;
169
170         val = rte_cpu_to_le_32(nn_readl(q + off));
171
172         if (ptr == NFP_QCP_READ_PTR)
173                 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
174         else
175                 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
176 }
177
178 /*
179  * Functions to read/write from/to Config BAR
180  * Performs any endian conversion necessary.
181  */
182 static inline uint8_t
183 nn_cfg_readb(struct nfp_net_hw *hw, int off)
184 {
185         return nn_readb(hw->ctrl_bar + off);
186 }
187
188 static inline void
189 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
190 {
191         nn_writeb(val, hw->ctrl_bar + off);
192 }
193
194 static inline uint32_t
195 nn_cfg_readl(struct nfp_net_hw *hw, int off)
196 {
197         return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
198 }
199
200 static inline void
201 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
202 {
203         nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
204 }
205
206 static inline uint64_t
207 nn_cfg_readq(struct nfp_net_hw *hw, int off)
208 {
209         return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
210 }
211
212 static inline void
213 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
214 {
215         nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
216 }
217
218 static void
219 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
220 {
221         unsigned i;
222
223         if (rxq->rxbufs == NULL)
224                 return;
225
226         for (i = 0; i < rxq->rx_count; i++) {
227                 if (rxq->rxbufs[i].mbuf) {
228                         rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
229                         rxq->rxbufs[i].mbuf = NULL;
230                 }
231         }
232 }
233
234 static void
235 nfp_net_rx_queue_release(void *rx_queue)
236 {
237         struct nfp_net_rxq *rxq = rx_queue;
238
239         if (rxq) {
240                 nfp_net_rx_queue_release_mbufs(rxq);
241                 rte_free(rxq->rxbufs);
242                 rte_free(rxq);
243         }
244 }
245
246 static void
247 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
248 {
249         nfp_net_rx_queue_release_mbufs(rxq);
250         rxq->rd_p = 0;
251         rxq->nb_rx_hold = 0;
252 }
253
254 static void
255 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
256 {
257         unsigned i;
258
259         if (txq->txbufs == NULL)
260                 return;
261
262         for (i = 0; i < txq->tx_count; i++) {
263                 if (txq->txbufs[i].mbuf) {
264                         rte_pktmbuf_free(txq->txbufs[i].mbuf);
265                         txq->txbufs[i].mbuf = NULL;
266                 }
267         }
268 }
269
270 static void
271 nfp_net_tx_queue_release(void *tx_queue)
272 {
273         struct nfp_net_txq *txq = tx_queue;
274
275         if (txq) {
276                 nfp_net_tx_queue_release_mbufs(txq);
277                 rte_free(txq->txbufs);
278                 rte_free(txq);
279         }
280 }
281
282 static void
283 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
284 {
285         nfp_net_tx_queue_release_mbufs(txq);
286         txq->wr_p = 0;
287         txq->rd_p = 0;
288 }
289
290 static int
291 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
292 {
293         int cnt;
294         uint32_t new;
295         struct timespec wait;
296
297         PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
298                     hw->qcp_cfg);
299
300         if (hw->qcp_cfg == NULL)
301                 rte_panic("Bad configuration queue pointer\n");
302
303         nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
304
305         wait.tv_sec = 0;
306         wait.tv_nsec = 1000000;
307
308         PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
309
310         /* Poll update field, waiting for NFP to ack the config */
311         for (cnt = 0; ; cnt++) {
312                 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
313                 if (new == 0)
314                         break;
315                 if (new & NFP_NET_CFG_UPDATE_ERR) {
316                         PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
317                         return -1;
318                 }
319                 if (cnt >= NFP_NET_POLL_TIMEOUT) {
320                         PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
321                                           " %dms", update, cnt);
322                         rte_panic("Exiting\n");
323                 }
324                 nanosleep(&wait, 0); /* waiting for a 1ms */
325         }
326         PMD_DRV_LOG(DEBUG, "Ack DONE\n");
327         return 0;
328 }
329
330 /*
331  * Reconfigure the NIC
332  * @nn:    device to reconfigure
333  * @ctrl:    The value for the ctrl field in the BAR config
334  * @update:  The value for the update field in the BAR config
335  *
336  * Write the update word to the BAR and ping the reconfig queue. Then poll
337  * until the firmware has acknowledged the update by zeroing the update word.
338  */
339 static int
340 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
341 {
342         uint32_t err;
343
344         PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
345                     ctrl, update);
346
347         rte_spinlock_lock(&hw->reconfig_lock);
348
349         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
350         nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
351
352         rte_wmb();
353
354         err = __nfp_net_reconfig(hw, update);
355
356         rte_spinlock_unlock(&hw->reconfig_lock);
357
358         if (!err)
359                 return 0;
360
361         /*
362          * Reconfig errors imply situations where they can be handled.
363          * Otherwise, rte_panic is called inside __nfp_net_reconfig
364          */
365         PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
366                      ctrl, update);
367         return -EIO;
368 }
369
370 /*
371  * Configure an Ethernet device. This function must be invoked first
372  * before any other function in the Ethernet API. This function can
373  * also be re-invoked when a device is in the stopped state.
374  */
375 static int
376 nfp_net_configure(struct rte_eth_dev *dev)
377 {
378         struct rte_eth_conf *dev_conf;
379         struct rte_eth_rxmode *rxmode;
380         struct rte_eth_txmode *txmode;
381         struct nfp_net_hw *hw;
382
383         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
384
385         /*
386          * A DPDK app sends info about how many queues to use and how
387          * those queues need to be configured. This is used by the
388          * DPDK core and it makes sure no more queues than those
389          * advertised by the driver are requested. This function is
390          * called after that internal process
391          */
392
393         PMD_INIT_LOG(DEBUG, "Configure");
394
395         dev_conf = &dev->data->dev_conf;
396         rxmode = &dev_conf->rxmode;
397         txmode = &dev_conf->txmode;
398
399         /* Checking TX mode */
400         if (txmode->mq_mode) {
401                 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
402                 return -EINVAL;
403         }
404
405         /* Checking RX mode */
406         if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
407             !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
408                 PMD_INIT_LOG(INFO, "RSS not supported");
409                 return -EINVAL;
410         }
411
412         /* Checking RX offloads */
413         if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
414                 PMD_INIT_LOG(INFO, "rxmode does not support split header");
415                 return -EINVAL;
416         }
417
418         if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
419             !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
420                 PMD_INIT_LOG(INFO, "RXCSUM not supported");
421
422         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
423                 PMD_INIT_LOG(INFO, "VLAN filter not supported");
424                 return -EINVAL;
425         }
426
427         if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
428             !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
429                 PMD_INIT_LOG(INFO, "hw vlan strip not supported");
430                 return -EINVAL;
431         }
432
433         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
434                 PMD_INIT_LOG(INFO, "VLAN extended not supported");
435                 return -EINVAL;
436         }
437
438         if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
439                 PMD_INIT_LOG(INFO, "LRO not supported");
440                 return -EINVAL;
441         }
442
443         if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
444                 PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
445                 return -EINVAL;
446         }
447
448         if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
449                 PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
450                 return -EINVAL;
451         }
452
453         if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
454                 PMD_INIT_LOG(INFO, "MACSEC strip not supported");
455                 return -EINVAL;
456         }
457
458         if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
459                 PMD_INIT_LOG(INFO, "MACSEC strip not supported");
460                 return -EINVAL;
461         }
462
463         if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
464                 PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
465
466         if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
467             !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
468                 PMD_INIT_LOG(INFO, "Scatter not supported");
469                 return -EINVAL;
470         }
471
472         if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
473                 PMD_INIT_LOG(INFO, "timestamp offfload not supported");
474                 return -EINVAL;
475         }
476
477         if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
478                 PMD_INIT_LOG(INFO, "security offload not supported");
479                 return -EINVAL;
480         }
481
482         /* checking TX offloads */
483         if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
484             !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
485                 PMD_INIT_LOG(INFO, "vlan insert offload not supported");
486                 return -EINVAL;
487         }
488
489         if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
490             !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
491                 PMD_INIT_LOG(INFO, "TX checksum offload not supported");
492                 return -EINVAL;
493         }
494
495         if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
496                 PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
497                 return -EINVAL;
498         }
499
500         if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
501             !(hw->cap & NFP_NET_CFG_CTRL_LSO)) {
502                 PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
503                 return -EINVAL;
504         }
505
506         if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
507                 PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
508                 return -EINVAL;
509         }
510
511         if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
512                 PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
513                 return -EINVAL;
514         }
515
516         if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
517                 PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
518                 return -EINVAL;
519         }
520
521         if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
522             txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
523             txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
524             txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
525                 PMD_INIT_LOG(INFO, "tunneling offload not supported");
526                 return -EINVAL;
527         }
528
529         if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
530                 PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
531                 return -EINVAL;
532         }
533
534         if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
535                 PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
536                 return -EINVAL;
537         }
538
539         if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
540             !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
541                 PMD_INIT_LOG(INFO, "TX multisegs  not supported");
542                 return -EINVAL;
543         }
544
545         if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
546                 PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
547                 return -EINVAL;
548         }
549
550         if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
551                 PMD_INIT_LOG(INFO, "TX security offload not supported");
552                 return -EINVAL;
553         }
554
555         return 0;
556 }
557
558 static void
559 nfp_net_enable_queues(struct rte_eth_dev *dev)
560 {
561         struct nfp_net_hw *hw;
562         uint64_t enabled_queues = 0;
563         int i;
564
565         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
566
567         /* Enabling the required TX queues in the device */
568         for (i = 0; i < dev->data->nb_tx_queues; i++)
569                 enabled_queues |= (1 << i);
570
571         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
572
573         enabled_queues = 0;
574
575         /* Enabling the required RX queues in the device */
576         for (i = 0; i < dev->data->nb_rx_queues; i++)
577                 enabled_queues |= (1 << i);
578
579         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
580 }
581
582 static void
583 nfp_net_disable_queues(struct rte_eth_dev *dev)
584 {
585         struct nfp_net_hw *hw;
586         uint32_t new_ctrl, update = 0;
587
588         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
589
590         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
591         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
592
593         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
594         update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
595                  NFP_NET_CFG_UPDATE_MSIX;
596
597         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
598                 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
599
600         /* If an error when reconfig we avoid to change hw state */
601         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
602                 return;
603
604         hw->ctrl = new_ctrl;
605 }
606
607 static int
608 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
609 {
610         int i;
611
612         for (i = 0; i < dev->data->nb_rx_queues; i++) {
613                 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
614                         return -1;
615         }
616         return 0;
617 }
618
619 static void
620 nfp_net_params_setup(struct nfp_net_hw *hw)
621 {
622         nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
623         nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
624 }
625
626 static void
627 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
628 {
629         hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
630 }
631
632 #define ETH_ADDR_LEN    6
633
634 static void
635 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
636 {
637         int i;
638
639         for (i = 0; i < ETH_ADDR_LEN; i++)
640                 dst[i] = src[i];
641 }
642
643 static int
644 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
645 {
646         struct nfp_eth_table *nfp_eth_table;
647
648         nfp_eth_table = nfp_eth_read_ports(hw->cpp);
649         /*
650          * hw points to port0 private data. We need hw now pointing to
651          * right port.
652          */
653         hw += port;
654         nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
655                          (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
656
657         free(nfp_eth_table);
658         return 0;
659 }
660
661 static void
662 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
663 {
664         uint32_t tmp;
665
666         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
667         memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
668
669         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
670         memcpy(&hw->mac_addr[4], &tmp, 2);
671 }
672
673 static void
674 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
675 {
676         uint32_t mac0 = *(uint32_t *)mac;
677         uint16_t mac1;
678
679         nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
680
681         mac += 4;
682         mac1 = *(uint16_t *)mac;
683         nn_writew(rte_cpu_to_be_16(mac1),
684                   hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
685 }
686
687 static int
688 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
689                            struct rte_intr_handle *intr_handle)
690 {
691         struct nfp_net_hw *hw;
692         int i;
693
694         if (!intr_handle->intr_vec) {
695                 intr_handle->intr_vec =
696                         rte_zmalloc("intr_vec",
697                                     dev->data->nb_rx_queues * sizeof(int), 0);
698                 if (!intr_handle->intr_vec) {
699                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
700                                      " intr_vec", dev->data->nb_rx_queues);
701                         return -ENOMEM;
702                 }
703         }
704
705         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
706
707         if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
708                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
709                 /* UIO just supports one queue and no LSC*/
710                 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
711                 intr_handle->intr_vec[0] = 0;
712         } else {
713                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
714                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
715                         /*
716                          * The first msix vector is reserved for non
717                          * efd interrupts
718                         */
719                         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
720                         intr_handle->intr_vec[i] = i + 1;
721                         PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
722                                             intr_handle->intr_vec[i]);
723                 }
724         }
725
726         /* Avoiding TX interrupts */
727         hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
728         return 0;
729 }
730
731 static uint32_t
732 nfp_check_offloads(struct rte_eth_dev *dev)
733 {
734         struct nfp_net_hw *hw;
735         struct rte_eth_conf *dev_conf;
736         struct rte_eth_rxmode *rxmode;
737         struct rte_eth_txmode *txmode;
738         uint32_t ctrl = 0;
739
740         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
741
742         dev_conf = &dev->data->dev_conf;
743         rxmode = &dev_conf->rxmode;
744         txmode = &dev_conf->txmode;
745
746         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
747                 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
748                         ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
749         }
750
751         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
752                 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
753                         ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
754         }
755
756         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
757                 hw->mtu = rxmode->max_rx_pkt_len;
758
759         if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
760                 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
761
762         /* L2 broadcast */
763         if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
764                 ctrl |= NFP_NET_CFG_CTRL_L2BC;
765
766         /* L2 multicast */
767         if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
768                 ctrl |= NFP_NET_CFG_CTRL_L2MC;
769
770         /* TX checksum offload */
771         if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
772             txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
773             txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
774                 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
775
776         /* LSO offload */
777         if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO)
778                 ctrl |= NFP_NET_CFG_CTRL_LSO;
779
780         /* RX gather */
781         if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
782                 ctrl |= NFP_NET_CFG_CTRL_GATHER;
783
784         return ctrl;
785 }
786
787 static int
788 nfp_net_start(struct rte_eth_dev *dev)
789 {
790         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
791         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
792         uint32_t new_ctrl, update = 0;
793         struct nfp_net_hw *hw;
794         struct rte_eth_conf *dev_conf;
795         struct rte_eth_rxmode *rxmode;
796         uint32_t intr_vector;
797         int ret;
798
799         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800
801         PMD_INIT_LOG(DEBUG, "Start");
802
803         /* Disabling queues just in case... */
804         nfp_net_disable_queues(dev);
805
806         /* Enabling the required queues in the device */
807         nfp_net_enable_queues(dev);
808
809         /* check and configure queue intr-vector mapping */
810         if (dev->data->dev_conf.intr_conf.rxq != 0) {
811                 if (hw->pf_multiport_enabled) {
812                         PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
813                                           "with NFP multiport PF");
814                                 return -EINVAL;
815                 }
816                 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
817                         /*
818                          * Better not to share LSC with RX interrupts.
819                          * Unregistering LSC interrupt handler
820                          */
821                         rte_intr_callback_unregister(&pci_dev->intr_handle,
822                                 nfp_net_dev_interrupt_handler, (void *)dev);
823
824                         if (dev->data->nb_rx_queues > 1) {
825                                 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
826                                              "supports 1 queue with UIO");
827                                 return -EIO;
828                         }
829                 }
830                 intr_vector = dev->data->nb_rx_queues;
831                 if (rte_intr_efd_enable(intr_handle, intr_vector))
832                         return -1;
833
834                 nfp_configure_rx_interrupt(dev, intr_handle);
835                 update = NFP_NET_CFG_UPDATE_MSIX;
836         }
837
838         rte_intr_enable(intr_handle);
839
840         new_ctrl = nfp_check_offloads(dev);
841
842         /* Writing configuration parameters in the device */
843         nfp_net_params_setup(hw);
844
845         dev_conf = &dev->data->dev_conf;
846         rxmode = &dev_conf->rxmode;
847
848         if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
849                 nfp_net_rss_config_default(dev);
850                 update |= NFP_NET_CFG_UPDATE_RSS;
851                 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
852         }
853
854         /* Enable device */
855         new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
856
857         update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
858
859         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
860                 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
861
862         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
863         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
864                 return -EIO;
865
866         /*
867          * Allocating rte mbuffs for configured rx queues.
868          * This requires queues being enabled before
869          */
870         if (nfp_net_rx_freelist_setup(dev) < 0) {
871                 ret = -ENOMEM;
872                 goto error;
873         }
874
875         if (hw->is_pf)
876                 /* Configure the physical port up */
877                 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
878
879         hw->ctrl = new_ctrl;
880
881         return 0;
882
883 error:
884         /*
885          * An error returned by this function should mean the app
886          * exiting and then the system releasing all the memory
887          * allocated even memory coming from hugepages.
888          *
889          * The device could be enabled at this point with some queues
890          * ready for getting packets. This is true if the call to
891          * nfp_net_rx_freelist_setup() succeeds for some queues but
892          * fails for subsequent queues.
893          *
894          * This should make the app exiting but better if we tell the
895          * device first.
896          */
897         nfp_net_disable_queues(dev);
898
899         return ret;
900 }
901
902 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
903 static void
904 nfp_net_stop(struct rte_eth_dev *dev)
905 {
906         int i;
907         struct nfp_net_hw *hw;
908
909         PMD_INIT_LOG(DEBUG, "Stop");
910
911         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
912
913         nfp_net_disable_queues(dev);
914
915         /* Clear queues */
916         for (i = 0; i < dev->data->nb_tx_queues; i++) {
917                 nfp_net_reset_tx_queue(
918                         (struct nfp_net_txq *)dev->data->tx_queues[i]);
919         }
920
921         for (i = 0; i < dev->data->nb_rx_queues; i++) {
922                 nfp_net_reset_rx_queue(
923                         (struct nfp_net_rxq *)dev->data->rx_queues[i]);
924         }
925
926         if (hw->is_pf)
927                 /* Configure the physical port down */
928                 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
929 }
930
931 /* Reset and stop device. The device can not be restarted. */
932 static void
933 nfp_net_close(struct rte_eth_dev *dev)
934 {
935         struct nfp_net_hw *hw;
936         struct rte_pci_device *pci_dev;
937         int i;
938
939         PMD_INIT_LOG(DEBUG, "Close");
940
941         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
943
944         /*
945          * We assume that the DPDK application is stopping all the
946          * threads/queues before calling the device close function.
947          */
948
949         nfp_net_disable_queues(dev);
950
951         /* Clear queues */
952         for (i = 0; i < dev->data->nb_tx_queues; i++) {
953                 nfp_net_reset_tx_queue(
954                         (struct nfp_net_txq *)dev->data->tx_queues[i]);
955         }
956
957         for (i = 0; i < dev->data->nb_rx_queues; i++) {
958                 nfp_net_reset_rx_queue(
959                         (struct nfp_net_rxq *)dev->data->rx_queues[i]);
960         }
961
962         rte_intr_disable(&pci_dev->intr_handle);
963         nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
964
965         /* unregister callback func from eal lib */
966         rte_intr_callback_unregister(&pci_dev->intr_handle,
967                                      nfp_net_dev_interrupt_handler,
968                                      (void *)dev);
969
970         /*
971          * The ixgbe PMD driver disables the pcie master on the
972          * device. The i40e does not...
973          */
974 }
975
976 static void
977 nfp_net_promisc_enable(struct rte_eth_dev *dev)
978 {
979         uint32_t new_ctrl, update = 0;
980         struct nfp_net_hw *hw;
981
982         PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
983
984         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
985
986         if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
987                 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
988                 return;
989         }
990
991         if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
992                 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
993                 return;
994         }
995
996         new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
997         update = NFP_NET_CFG_UPDATE_GEN;
998
999         /*
1000          * DPDK sets promiscuous mode on just after this call assuming
1001          * it can not fail ...
1002          */
1003         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
1004                 return;
1005
1006         hw->ctrl = new_ctrl;
1007 }
1008
1009 static void
1010 nfp_net_promisc_disable(struct rte_eth_dev *dev)
1011 {
1012         uint32_t new_ctrl, update = 0;
1013         struct nfp_net_hw *hw;
1014
1015         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1016
1017         if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
1018                 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
1019                 return;
1020         }
1021
1022         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
1023         update = NFP_NET_CFG_UPDATE_GEN;
1024
1025         /*
1026          * DPDK sets promiscuous mode off just before this call
1027          * assuming it can not fail ...
1028          */
1029         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
1030                 return;
1031
1032         hw->ctrl = new_ctrl;
1033 }
1034
1035 /*
1036  * return 0 means link status changed, -1 means not changed
1037  *
1038  * Wait to complete is needed as it can take up to 9 seconds to get the Link
1039  * status.
1040  */
1041 static int
1042 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1043 {
1044         struct nfp_net_hw *hw;
1045         struct rte_eth_link link;
1046         uint32_t nn_link_status;
1047         int ret;
1048
1049         static const uint32_t ls_to_ethtool[] = {
1050                 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
1051                 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
1052                 [NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
1053                 [NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
1054                 [NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
1055                 [NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
1056                 [NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
1057                 [NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
1058         };
1059
1060         PMD_DRV_LOG(DEBUG, "Link update\n");
1061
1062         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063
1064         nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
1065
1066         memset(&link, 0, sizeof(struct rte_eth_link));
1067
1068         if (nn_link_status & NFP_NET_CFG_STS_LINK)
1069                 link.link_status = ETH_LINK_UP;
1070
1071         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1072
1073         nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
1074                          NFP_NET_CFG_STS_LINK_RATE_MASK;
1075
1076         if (nn_link_status >= RTE_DIM(ls_to_ethtool))
1077                 link.link_speed = ETH_SPEED_NUM_NONE;
1078         else
1079                 link.link_speed = ls_to_ethtool[nn_link_status];
1080
1081         ret = rte_eth_linkstatus_set(dev, &link);
1082         if (ret == 0) {
1083                 if (link.link_status)
1084                         PMD_DRV_LOG(INFO, "NIC Link is Up\n");
1085                 else
1086                         PMD_DRV_LOG(INFO, "NIC Link is Down\n");
1087         }
1088         return ret;
1089 }
1090
1091 static int
1092 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1093 {
1094         int i;
1095         struct nfp_net_hw *hw;
1096         struct rte_eth_stats nfp_dev_stats;
1097
1098         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1099
1100         /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1101
1102         memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1103
1104         /* reading per RX ring stats */
1105         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1106                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1107                         break;
1108
1109                 nfp_dev_stats.q_ipackets[i] =
1110                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1111
1112                 nfp_dev_stats.q_ipackets[i] -=
1113                         hw->eth_stats_base.q_ipackets[i];
1114
1115                 nfp_dev_stats.q_ibytes[i] =
1116                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1117
1118                 nfp_dev_stats.q_ibytes[i] -=
1119                         hw->eth_stats_base.q_ibytes[i];
1120         }
1121
1122         /* reading per TX ring stats */
1123         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1124                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1125                         break;
1126
1127                 nfp_dev_stats.q_opackets[i] =
1128                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1129
1130                 nfp_dev_stats.q_opackets[i] -=
1131                         hw->eth_stats_base.q_opackets[i];
1132
1133                 nfp_dev_stats.q_obytes[i] =
1134                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1135
1136                 nfp_dev_stats.q_obytes[i] -=
1137                         hw->eth_stats_base.q_obytes[i];
1138         }
1139
1140         nfp_dev_stats.ipackets =
1141                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1142
1143         nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1144
1145         nfp_dev_stats.ibytes =
1146                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1147
1148         nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1149
1150         nfp_dev_stats.opackets =
1151                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1152
1153         nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1154
1155         nfp_dev_stats.obytes =
1156                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1157
1158         nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1159
1160         /* reading general device stats */
1161         nfp_dev_stats.ierrors =
1162                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1163
1164         nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1165
1166         nfp_dev_stats.oerrors =
1167                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1168
1169         nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1170
1171         /* RX ring mbuf allocation failures */
1172         nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1173
1174         nfp_dev_stats.imissed =
1175                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1176
1177         nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1178
1179         if (stats) {
1180                 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1181                 return 0;
1182         }
1183         return -EINVAL;
1184 }
1185
1186 static void
1187 nfp_net_stats_reset(struct rte_eth_dev *dev)
1188 {
1189         int i;
1190         struct nfp_net_hw *hw;
1191
1192         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1193
1194         /*
1195          * hw->eth_stats_base records the per counter starting point.
1196          * Lets update it now
1197          */
1198
1199         /* reading per RX ring stats */
1200         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1201                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1202                         break;
1203
1204                 hw->eth_stats_base.q_ipackets[i] =
1205                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1206
1207                 hw->eth_stats_base.q_ibytes[i] =
1208                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1209         }
1210
1211         /* reading per TX ring stats */
1212         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1213                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1214                         break;
1215
1216                 hw->eth_stats_base.q_opackets[i] =
1217                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1218
1219                 hw->eth_stats_base.q_obytes[i] =
1220                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1221         }
1222
1223         hw->eth_stats_base.ipackets =
1224                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1225
1226         hw->eth_stats_base.ibytes =
1227                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1228
1229         hw->eth_stats_base.opackets =
1230                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1231
1232         hw->eth_stats_base.obytes =
1233                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1234
1235         /* reading general device stats */
1236         hw->eth_stats_base.ierrors =
1237                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1238
1239         hw->eth_stats_base.oerrors =
1240                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1241
1242         /* RX ring mbuf allocation failures */
1243         dev->data->rx_mbuf_alloc_failed = 0;
1244
1245         hw->eth_stats_base.imissed =
1246                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1247 }
1248
1249 static void
1250 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1251 {
1252         struct nfp_net_hw *hw;
1253
1254         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1255
1256         dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1257         dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1258         dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1259         dev_info->max_rx_pktlen = hw->max_mtu;
1260         /* Next should change when PF support is implemented */
1261         dev_info->max_mac_addrs = 1;
1262
1263         if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1264                 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1265
1266         if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1267                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1268                                              DEV_RX_OFFLOAD_UDP_CKSUM |
1269                                              DEV_RX_OFFLOAD_TCP_CKSUM;
1270
1271         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1272
1273         if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1274                 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1275
1276         if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1277                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1278                                              DEV_TX_OFFLOAD_UDP_CKSUM |
1279                                              DEV_TX_OFFLOAD_TCP_CKSUM;
1280
1281         if (hw->cap & NFP_NET_CFG_CTRL_LSO)
1282                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1283
1284         if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1285                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1286
1287         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1288                 .rx_thresh = {
1289                         .pthresh = DEFAULT_RX_PTHRESH,
1290                         .hthresh = DEFAULT_RX_HTHRESH,
1291                         .wthresh = DEFAULT_RX_WTHRESH,
1292                 },
1293                 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1294                 .rx_drop_en = 0,
1295         };
1296
1297         dev_info->default_txconf = (struct rte_eth_txconf) {
1298                 .tx_thresh = {
1299                         .pthresh = DEFAULT_TX_PTHRESH,
1300                         .hthresh = DEFAULT_TX_HTHRESH,
1301                         .wthresh = DEFAULT_TX_WTHRESH,
1302                 },
1303                 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1304                 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1305         };
1306
1307         dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
1308                                            ETH_RSS_NONFRAG_IPV4_UDP |
1309                                            ETH_RSS_NONFRAG_IPV6_TCP |
1310                                            ETH_RSS_NONFRAG_IPV6_UDP;
1311
1312         dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1313         dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1314
1315         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1316                                ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1317                                ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1318 }
1319
1320 static const uint32_t *
1321 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1322 {
1323         static const uint32_t ptypes[] = {
1324                 /* refers to nfp_net_set_hash() */
1325                 RTE_PTYPE_INNER_L3_IPV4,
1326                 RTE_PTYPE_INNER_L3_IPV6,
1327                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1328                 RTE_PTYPE_INNER_L4_MASK,
1329                 RTE_PTYPE_UNKNOWN
1330         };
1331
1332         if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1333                 return ptypes;
1334         return NULL;
1335 }
1336
1337 static uint32_t
1338 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1339 {
1340         struct nfp_net_rxq *rxq;
1341         struct nfp_net_rx_desc *rxds;
1342         uint32_t idx;
1343         uint32_t count;
1344
1345         rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1346
1347         idx = rxq->rd_p;
1348
1349         count = 0;
1350
1351         /*
1352          * Other PMDs are just checking the DD bit in intervals of 4
1353          * descriptors and counting all four if the first has the DD
1354          * bit on. Of course, this is not accurate but can be good for
1355          * performance. But ideally that should be done in descriptors
1356          * chunks belonging to the same cache line
1357          */
1358
1359         while (count < rxq->rx_count) {
1360                 rxds = &rxq->rxds[idx];
1361                 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1362                         break;
1363
1364                 count++;
1365                 idx++;
1366
1367                 /* Wrapping? */
1368                 if ((idx) == rxq->rx_count)
1369                         idx = 0;
1370         }
1371
1372         return count;
1373 }
1374
1375 static int
1376 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1377 {
1378         struct rte_pci_device *pci_dev;
1379         struct nfp_net_hw *hw;
1380         int base = 0;
1381
1382         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1383         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1384
1385         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1386                 base = 1;
1387
1388         /* Make sure all updates are written before un-masking */
1389         rte_wmb();
1390         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1391                       NFP_NET_CFG_ICR_UNMASKED);
1392         return 0;
1393 }
1394
1395 static int
1396 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1397 {
1398         struct rte_pci_device *pci_dev;
1399         struct nfp_net_hw *hw;
1400         int base = 0;
1401
1402         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1403         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1404
1405         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1406                 base = 1;
1407
1408         /* Make sure all updates are written before un-masking */
1409         rte_wmb();
1410         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1411         return 0;
1412 }
1413
1414 static void
1415 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1416 {
1417         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1418         struct rte_eth_link link;
1419
1420         rte_eth_linkstatus_get(dev, &link);
1421         if (link.link_status)
1422                 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1423                         dev->data->port_id, link.link_speed,
1424                         link.link_duplex == ETH_LINK_FULL_DUPLEX
1425                         ? "full-duplex" : "half-duplex");
1426         else
1427                 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1428                         dev->data->port_id);
1429
1430         RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1431                 pci_dev->addr.domain, pci_dev->addr.bus,
1432                 pci_dev->addr.devid, pci_dev->addr.function);
1433 }
1434
1435 /* Interrupt configuration and handling */
1436
1437 /*
1438  * nfp_net_irq_unmask - Unmask an interrupt
1439  *
1440  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1441  * clear the ICR for the entry.
1442  */
1443 static void
1444 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1445 {
1446         struct nfp_net_hw *hw;
1447         struct rte_pci_device *pci_dev;
1448
1449         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1451
1452         if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1453                 /* If MSI-X auto-masking is used, clear the entry */
1454                 rte_wmb();
1455                 rte_intr_enable(&pci_dev->intr_handle);
1456         } else {
1457                 /* Make sure all updates are written before un-masking */
1458                 rte_wmb();
1459                 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1460                               NFP_NET_CFG_ICR_UNMASKED);
1461         }
1462 }
1463
1464 static void
1465 nfp_net_dev_interrupt_handler(void *param)
1466 {
1467         int64_t timeout;
1468         struct rte_eth_link link;
1469         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1470
1471         PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1472
1473         rte_eth_linkstatus_get(dev, &link);
1474
1475         nfp_net_link_update(dev, 0);
1476
1477         /* likely to up */
1478         if (!link.link_status) {
1479                 /* handle it 1 sec later, wait it being stable */
1480                 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1481                 /* likely to down */
1482         } else {
1483                 /* handle it 4 sec later, wait it being stable */
1484                 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1485         }
1486
1487         if (rte_eal_alarm_set(timeout * 1000,
1488                               nfp_net_dev_interrupt_delayed_handler,
1489                               (void *)dev) < 0) {
1490                 RTE_LOG(ERR, PMD, "Error setting alarm");
1491                 /* Unmasking */
1492                 nfp_net_irq_unmask(dev);
1493         }
1494 }
1495
1496 /*
1497  * Interrupt handler which shall be registered for alarm callback for delayed
1498  * handling specific interrupt to wait for the stable nic state. As the NIC
1499  * interrupt state is not stable for nfp after link is just down, it needs
1500  * to wait 4 seconds to get the stable status.
1501  *
1502  * @param handle   Pointer to interrupt handle.
1503  * @param param    The address of parameter (struct rte_eth_dev *)
1504  *
1505  * @return  void
1506  */
1507 static void
1508 nfp_net_dev_interrupt_delayed_handler(void *param)
1509 {
1510         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1511
1512         nfp_net_link_update(dev, 0);
1513         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1514
1515         nfp_net_dev_link_status_print(dev);
1516
1517         /* Unmasking */
1518         nfp_net_irq_unmask(dev);
1519 }
1520
1521 static int
1522 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1523 {
1524         struct nfp_net_hw *hw;
1525
1526         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527
1528         /* check that mtu is within the allowed range */
1529         if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1530                 return -EINVAL;
1531
1532         /* mtu setting is forbidden if port is started */
1533         if (dev->data->dev_started) {
1534                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1535                             dev->data->port_id);
1536                 return -EBUSY;
1537         }
1538
1539         /* switch to jumbo mode if needed */
1540         if ((uint32_t)mtu > ETHER_MAX_LEN)
1541                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1542         else
1543                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1544
1545         /* update max frame size */
1546         dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1547
1548         /* writing to configuration space */
1549         nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1550
1551         hw->mtu = mtu;
1552
1553         return 0;
1554 }
1555
1556 static int
1557 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1558                        uint16_t queue_idx, uint16_t nb_desc,
1559                        unsigned int socket_id,
1560                        const struct rte_eth_rxconf *rx_conf,
1561                        struct rte_mempool *mp)
1562 {
1563         const struct rte_memzone *tz;
1564         struct nfp_net_rxq *rxq;
1565         struct nfp_net_hw *hw;
1566         struct rte_eth_conf *dev_conf;
1567         struct rte_eth_rxmode *rxmode;
1568
1569         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1570
1571         PMD_INIT_FUNC_TRACE();
1572
1573         /* Validating number of descriptors */
1574         if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1575             (nb_desc > NFP_NET_MAX_RX_DESC) ||
1576             (nb_desc < NFP_NET_MIN_RX_DESC)) {
1577                 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1578                 return -EINVAL;
1579         }
1580
1581         dev_conf = &dev->data->dev_conf;
1582         rxmode = &dev_conf->rxmode;
1583
1584         if (rx_conf->offloads != rxmode->offloads) {
1585                 RTE_LOG(ERR, PMD, "queue %u rx offloads not as port offloads\n",
1586                                   queue_idx);
1587                 RTE_LOG(ERR, PMD, "\tport: %" PRIx64 "\n", rxmode->offloads);
1588                 RTE_LOG(ERR, PMD, "\tqueue: %" PRIx64 "\n", rx_conf->offloads);
1589                 return -EINVAL;
1590         }
1591
1592         /*
1593          * Free memory prior to re-allocation if needed. This is the case after
1594          * calling nfp_net_stop
1595          */
1596         if (dev->data->rx_queues[queue_idx]) {
1597                 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1598                 dev->data->rx_queues[queue_idx] = NULL;
1599         }
1600
1601         /* Allocating rx queue data structure */
1602         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1603                                  RTE_CACHE_LINE_SIZE, socket_id);
1604         if (rxq == NULL)
1605                 return -ENOMEM;
1606
1607         /* Hw queues mapping based on firmware confifguration */
1608         rxq->qidx = queue_idx;
1609         rxq->fl_qcidx = queue_idx * hw->stride_rx;
1610         rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1611         rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1612         rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1613
1614         /*
1615          * Tracking mbuf size for detecting a potential mbuf overflow due to
1616          * RX offset
1617          */
1618         rxq->mem_pool = mp;
1619         rxq->mbuf_size = rxq->mem_pool->elt_size;
1620         rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1621         hw->flbufsz = rxq->mbuf_size;
1622
1623         rxq->rx_count = nb_desc;
1624         rxq->port_id = dev->data->port_id;
1625         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1626         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1627                                   : ETHER_CRC_LEN);
1628         rxq->drop_en = rx_conf->rx_drop_en;
1629
1630         /*
1631          * Allocate RX ring hardware descriptors. A memzone large enough to
1632          * handle the maximum ring size is allocated in order to allow for
1633          * resizing in later calls to the queue setup function.
1634          */
1635         tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1636                                    sizeof(struct nfp_net_rx_desc) *
1637                                    NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1638                                    socket_id);
1639
1640         if (tz == NULL) {
1641                 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1642                 nfp_net_rx_queue_release(rxq);
1643                 return -ENOMEM;
1644         }
1645
1646         /* Saving physical and virtual addresses for the RX ring */
1647         rxq->dma = (uint64_t)tz->iova;
1648         rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1649
1650         /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1651         rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1652                                          sizeof(*rxq->rxbufs) * nb_desc,
1653                                          RTE_CACHE_LINE_SIZE, socket_id);
1654         if (rxq->rxbufs == NULL) {
1655                 nfp_net_rx_queue_release(rxq);
1656                 return -ENOMEM;
1657         }
1658
1659         PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1660                    rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1661
1662         nfp_net_reset_rx_queue(rxq);
1663
1664         dev->data->rx_queues[queue_idx] = rxq;
1665         rxq->hw = hw;
1666
1667         /*
1668          * Telling the HW about the physical address of the RX ring and number
1669          * of descriptors in log2 format
1670          */
1671         nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1672         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1673
1674         return 0;
1675 }
1676
1677 static int
1678 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1679 {
1680         struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1681         uint64_t dma_addr;
1682         unsigned i;
1683
1684         PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1685                    rxq->rx_count);
1686
1687         for (i = 0; i < rxq->rx_count; i++) {
1688                 struct nfp_net_rx_desc *rxd;
1689                 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1690
1691                 if (mbuf == NULL) {
1692                         RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1693                                 (unsigned)rxq->qidx);
1694                         return -ENOMEM;
1695                 }
1696
1697                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1698
1699                 rxd = &rxq->rxds[i];
1700                 rxd->fld.dd = 0;
1701                 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1702                 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1703                 rxe[i].mbuf = mbuf;
1704                 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1705         }
1706
1707         /* Make sure all writes are flushed before telling the hardware */
1708         rte_wmb();
1709
1710         /* Not advertising the whole ring as the firmware gets confused if so */
1711         PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1712                    rxq->rx_count - 1);
1713
1714         nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1715
1716         return 0;
1717 }
1718
1719 static int
1720 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1721                        uint16_t nb_desc, unsigned int socket_id,
1722                        const struct rte_eth_txconf *tx_conf)
1723 {
1724         const struct rte_memzone *tz;
1725         struct nfp_net_txq *txq;
1726         uint16_t tx_free_thresh;
1727         struct nfp_net_hw *hw;
1728         struct rte_eth_conf *dev_conf;
1729         struct rte_eth_txmode *txmode;
1730
1731         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1732
1733         PMD_INIT_FUNC_TRACE();
1734
1735         /* Validating number of descriptors */
1736         if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1737             (nb_desc > NFP_NET_MAX_TX_DESC) ||
1738             (nb_desc < NFP_NET_MIN_TX_DESC)) {
1739                 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1740                 return -EINVAL;
1741         }
1742
1743         dev_conf = &dev->data->dev_conf;
1744         txmode = &dev_conf->txmode;
1745
1746         if (tx_conf->offloads != txmode->offloads) {
1747                 RTE_LOG(ERR, PMD, "queue %u tx offloads not as port offloads",
1748                                   queue_idx);
1749                 return -EINVAL;
1750         }
1751
1752         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1753                                     tx_conf->tx_free_thresh :
1754                                     DEFAULT_TX_FREE_THRESH);
1755
1756         if (tx_free_thresh > (nb_desc)) {
1757                 RTE_LOG(ERR, PMD,
1758                         "tx_free_thresh must be less than the number of TX "
1759                         "descriptors. (tx_free_thresh=%u port=%d "
1760                         "queue=%d)\n", (unsigned int)tx_free_thresh,
1761                         dev->data->port_id, (int)queue_idx);
1762                 return -(EINVAL);
1763         }
1764
1765         /*
1766          * Free memory prior to re-allocation if needed. This is the case after
1767          * calling nfp_net_stop
1768          */
1769         if (dev->data->tx_queues[queue_idx]) {
1770                 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1771                            queue_idx);
1772                 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1773                 dev->data->tx_queues[queue_idx] = NULL;
1774         }
1775
1776         /* Allocating tx queue data structure */
1777         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1778                                  RTE_CACHE_LINE_SIZE, socket_id);
1779         if (txq == NULL) {
1780                 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1781                 return -ENOMEM;
1782         }
1783
1784         /*
1785          * Allocate TX ring hardware descriptors. A memzone large enough to
1786          * handle the maximum ring size is allocated in order to allow for
1787          * resizing in later calls to the queue setup function.
1788          */
1789         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1790                                    sizeof(struct nfp_net_tx_desc) *
1791                                    NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1792                                    socket_id);
1793         if (tz == NULL) {
1794                 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1795                 nfp_net_tx_queue_release(txq);
1796                 return -ENOMEM;
1797         }
1798
1799         txq->tx_count = nb_desc;
1800         txq->tx_free_thresh = tx_free_thresh;
1801         txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1802         txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1803         txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1804
1805         /* queue mapping based on firmware configuration */
1806         txq->qidx = queue_idx;
1807         txq->tx_qcidx = queue_idx * hw->stride_tx;
1808         txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1809
1810         txq->port_id = dev->data->port_id;
1811
1812         /* Saving physical and virtual addresses for the TX ring */
1813         txq->dma = (uint64_t)tz->iova;
1814         txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1815
1816         /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1817         txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1818                                          sizeof(*txq->txbufs) * nb_desc,
1819                                          RTE_CACHE_LINE_SIZE, socket_id);
1820         if (txq->txbufs == NULL) {
1821                 nfp_net_tx_queue_release(txq);
1822                 return -ENOMEM;
1823         }
1824         PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1825                    txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1826
1827         nfp_net_reset_tx_queue(txq);
1828
1829         dev->data->tx_queues[queue_idx] = txq;
1830         txq->hw = hw;
1831
1832         /*
1833          * Telling the HW about the physical address of the TX ring and number
1834          * of descriptors in log2 format
1835          */
1836         nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1837         nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1838
1839         return 0;
1840 }
1841
1842 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1843 static inline void
1844 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1845                struct rte_mbuf *mb)
1846 {
1847         uint64_t ol_flags;
1848         struct nfp_net_hw *hw = txq->hw;
1849
1850         if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
1851                 goto clean_txd;
1852
1853         ol_flags = mb->ol_flags;
1854
1855         if (!(ol_flags & PKT_TX_TCP_SEG))
1856                 goto clean_txd;
1857
1858         txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
1859         txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
1860         txd->flags = PCIE_DESC_TX_LSO;
1861         return;
1862
1863 clean_txd:
1864         txd->flags = 0;
1865         txd->l4_offset = 0;
1866         txd->lso = 0;
1867 }
1868
1869 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1870 static inline void
1871 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1872                  struct rte_mbuf *mb)
1873 {
1874         uint64_t ol_flags;
1875         struct nfp_net_hw *hw = txq->hw;
1876
1877         if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1878                 return;
1879
1880         ol_flags = mb->ol_flags;
1881
1882         /* IPv6 does not need checksum */
1883         if (ol_flags & PKT_TX_IP_CKSUM)
1884                 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1885
1886         switch (ol_flags & PKT_TX_L4_MASK) {
1887         case PKT_TX_UDP_CKSUM:
1888                 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1889                 break;
1890         case PKT_TX_TCP_CKSUM:
1891                 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1892                 break;
1893         }
1894
1895         if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1896                 txd->flags |= PCIE_DESC_TX_CSUM;
1897 }
1898
1899 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1900 static inline void
1901 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1902                  struct rte_mbuf *mb)
1903 {
1904         struct nfp_net_hw *hw = rxq->hw;
1905
1906         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1907                 return;
1908
1909         /* If IPv4 and IP checksum error, fail */
1910         if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1911             !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
1912                 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1913
1914         /* If neither UDP nor TCP return */
1915         if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1916             !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1917                 return;
1918
1919         if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1920             !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
1921                 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1922
1923         if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
1924             !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
1925                 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1926 }
1927
1928 #define NFP_HASH_OFFSET      ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1929 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1930
1931 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1932
1933 /*
1934  * nfp_net_set_hash - Set mbuf hash data
1935  *
1936  * The RSS hash and hash-type are pre-pended to the packet data.
1937  * Extract and decode it and set the mbuf fields.
1938  */
1939 static inline void
1940 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1941                  struct rte_mbuf *mbuf)
1942 {
1943         struct nfp_net_hw *hw = rxq->hw;
1944         uint8_t *meta_offset;
1945         uint32_t meta_info;
1946         uint32_t hash = 0;
1947         uint32_t hash_type = 0;
1948
1949         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1950                 return;
1951
1952         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
1953                 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1954                         return;
1955
1956                 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1957                 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1958
1959         } else if (NFP_DESC_META_LEN(rxd)) {
1960                 /*
1961                  * new metadata api:
1962                  * <----  32 bit  ----->
1963                  * m    field type word
1964                  * e     data field #2
1965                  * t     data field #1
1966                  * a     data field #0
1967                  * ====================
1968                  *    packet data
1969                  *
1970                  * Field type word contains up to 8 4bit field types
1971                  * A 4bit field type refers to a data field word
1972                  * A data field word can have several 4bit field types
1973                  */
1974                 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1975                 meta_offset -= NFP_DESC_META_LEN(rxd);
1976                 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1977                 meta_offset += 4;
1978                 /* NFP PMD just supports metadata for hashing */
1979                 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1980                 case NFP_NET_META_HASH:
1981                         /* next field type is about the hash type */
1982                         meta_info >>= NFP_NET_META_FIELD_SIZE;
1983                         /* hash value is in the data field */
1984                         hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1985                         hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1986                         break;
1987                 default:
1988                         /* Unsupported metadata can be a performance issue */
1989                         return;
1990                 }
1991         } else {
1992                 return;
1993         }
1994
1995         mbuf->hash.rss = hash;
1996         mbuf->ol_flags |= PKT_RX_RSS_HASH;
1997
1998         switch (hash_type) {
1999         case NFP_NET_RSS_IPV4:
2000                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
2001                 break;
2002         case NFP_NET_RSS_IPV6:
2003                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
2004                 break;
2005         case NFP_NET_RSS_IPV6_EX:
2006                 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
2007                 break;
2008         default:
2009                 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
2010         }
2011 }
2012
2013 static inline void
2014 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
2015 {
2016         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
2017 }
2018
2019 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
2020
2021 /*
2022  * RX path design:
2023  *
2024  * There are some decissions to take:
2025  * 1) How to check DD RX descriptors bit
2026  * 2) How and when to allocate new mbufs
2027  *
2028  * Current implementation checks just one single DD bit each loop. As each
2029  * descriptor is 8 bytes, it is likely a good idea to check descriptors in
2030  * a single cache line instead. Tests with this change have not shown any
2031  * performance improvement but it requires further investigation. For example,
2032  * depending on which descriptor is next, the number of descriptors could be
2033  * less than 8 for just checking those in the same cache line. This implies
2034  * extra work which could be counterproductive by itself. Indeed, last firmware
2035  * changes are just doing this: writing several descriptors with the DD bit
2036  * for saving PCIe bandwidth and DMA operations from the NFP.
2037  *
2038  * Mbuf allocation is done when a new packet is received. Then the descriptor
2039  * is automatically linked with the new mbuf and the old one is given to the
2040  * user. The main drawback with this design is mbuf allocation is heavier than
2041  * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
2042  * cache point of view it does not seem allocating the mbuf early on as we are
2043  * doing now have any benefit at all. Again, tests with this change have not
2044  * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
2045  * so looking at the implications of this type of allocation should be studied
2046  * deeply
2047  */
2048
2049 static uint16_t
2050 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2051 {
2052         struct nfp_net_rxq *rxq;
2053         struct nfp_net_rx_desc *rxds;
2054         struct nfp_net_rx_buff *rxb;
2055         struct nfp_net_hw *hw;
2056         struct rte_mbuf *mb;
2057         struct rte_mbuf *new_mb;
2058         uint16_t nb_hold;
2059         uint64_t dma_addr;
2060         int avail;
2061
2062         rxq = rx_queue;
2063         if (unlikely(rxq == NULL)) {
2064                 /*
2065                  * DPDK just checks the queue is lower than max queues
2066                  * enabled. But the queue needs to be configured
2067                  */
2068                 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
2069                 return -EINVAL;
2070         }
2071
2072         hw = rxq->hw;
2073         avail = 0;
2074         nb_hold = 0;
2075
2076         while (avail < nb_pkts) {
2077                 rxb = &rxq->rxbufs[rxq->rd_p];
2078                 if (unlikely(rxb == NULL)) {
2079                         RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
2080                         break;
2081                 }
2082
2083                 rxds = &rxq->rxds[rxq->rd_p];
2084                 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
2085                         break;
2086
2087                 /*
2088                  * Memory barrier to ensure that we won't do other
2089                  * reads before the DD bit.
2090                  */
2091                 rte_rmb();
2092
2093                 /*
2094                  * We got a packet. Let's alloc a new mbuff for refilling the
2095                  * free descriptor ring as soon as possible
2096                  */
2097                 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
2098                 if (unlikely(new_mb == NULL)) {
2099                         RTE_LOG_DP(DEBUG, PMD,
2100                         "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2101                                 rxq->port_id, (unsigned int)rxq->qidx);
2102                         nfp_net_mbuf_alloc_failed(rxq);
2103                         break;
2104                 }
2105
2106                 nb_hold++;
2107
2108                 /*
2109                  * Grab the mbuff and refill the descriptor with the
2110                  * previously allocated mbuff
2111                  */
2112                 mb = rxb->mbuf;
2113                 rxb->mbuf = new_mb;
2114
2115                 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
2116                            rxds->rxd.data_len, rxq->mbuf_size);
2117
2118                 /* Size of this segment */
2119                 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2120                 /* Size of the whole packet. We just support 1 segment */
2121                 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2122
2123                 if (unlikely((mb->data_len + hw->rx_offset) >
2124                              rxq->mbuf_size)) {
2125                         /*
2126                          * This should not happen and the user has the
2127                          * responsibility of avoiding it. But we have
2128                          * to give some info about the error
2129                          */
2130                         RTE_LOG_DP(ERR, PMD,
2131                                 "mbuf overflow likely due to the RX offset.\n"
2132                                 "\t\tYour mbuf size should have extra space for"
2133                                 " RX offset=%u bytes.\n"
2134                                 "\t\tCurrently you just have %u bytes available"
2135                                 " but the received packet is %u bytes long",
2136                                 hw->rx_offset,
2137                                 rxq->mbuf_size - hw->rx_offset,
2138                                 mb->data_len);
2139                         return -EINVAL;
2140                 }
2141
2142                 /* Filling the received mbuff with packet info */
2143                 if (hw->rx_offset)
2144                         mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2145                 else
2146                         mb->data_off = RTE_PKTMBUF_HEADROOM +
2147                                        NFP_DESC_META_LEN(rxds);
2148
2149                 /* No scatter mode supported */
2150                 mb->nb_segs = 1;
2151                 mb->next = NULL;
2152
2153                 mb->port = rxq->port_id;
2154
2155                 /* Checking the RSS flag */
2156                 nfp_net_set_hash(rxq, rxds, mb);
2157
2158                 /* Checking the checksum flag */
2159                 nfp_net_rx_cksum(rxq, rxds, mb);
2160
2161                 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2162                     (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2163                         mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2164                         mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2165                 }
2166
2167                 /* Adding the mbuff to the mbuff array passed by the app */
2168                 rx_pkts[avail++] = mb;
2169
2170                 /* Now resetting and updating the descriptor */
2171                 rxds->vals[0] = 0;
2172                 rxds->vals[1] = 0;
2173                 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2174                 rxds->fld.dd = 0;
2175                 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2176                 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2177
2178                 rxq->rd_p++;
2179                 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2180                         rxq->rd_p = 0;
2181         }
2182
2183         if (nb_hold == 0)
2184                 return nb_hold;
2185
2186         PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received\n",
2187                    rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2188
2189         nb_hold += rxq->nb_rx_hold;
2190
2191         /*
2192          * FL descriptors needs to be written before incrementing the
2193          * FL queue WR pointer
2194          */
2195         rte_wmb();
2196         if (nb_hold > rxq->rx_free_thresh) {
2197                 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
2198                            rxq->port_id, (unsigned int)rxq->qidx,
2199                            (unsigned)nb_hold, (unsigned)avail);
2200                 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2201                 nb_hold = 0;
2202         }
2203         rxq->nb_rx_hold = nb_hold;
2204
2205         return avail;
2206 }
2207
2208 /*
2209  * nfp_net_tx_free_bufs - Check for descriptors with a complete
2210  * status
2211  * @txq: TX queue to work with
2212  * Returns number of descriptors freed
2213  */
2214 int
2215 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2216 {
2217         uint32_t qcp_rd_p;
2218         int todo;
2219
2220         PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2221                    " status\n", txq->qidx);
2222
2223         /* Work out how many packets have been sent */
2224         qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2225
2226         if (qcp_rd_p == txq->rd_p) {
2227                 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2228                            "packets (%u, %u)\n", txq->qidx,
2229                            qcp_rd_p, txq->rd_p);
2230                 return 0;
2231         }
2232
2233         if (qcp_rd_p > txq->rd_p)
2234                 todo = qcp_rd_p - txq->rd_p;
2235         else
2236                 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2237
2238         PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
2239                    qcp_rd_p, txq->rd_p, txq->rd_p);
2240
2241         if (todo == 0)
2242                 return todo;
2243
2244         txq->rd_p += todo;
2245         if (unlikely(txq->rd_p >= txq->tx_count))
2246                 txq->rd_p -= txq->tx_count;
2247
2248         return todo;
2249 }
2250
2251 /* Leaving always free descriptors for avoiding wrapping confusion */
2252 static inline
2253 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2254 {
2255         if (txq->wr_p >= txq->rd_p)
2256                 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2257         else
2258                 return txq->rd_p - txq->wr_p - 8;
2259 }
2260
2261 /*
2262  * nfp_net_txq_full - Check if the TX queue free descriptors
2263  * is below tx_free_threshold
2264  *
2265  * @txq: TX queue to check
2266  *
2267  * This function uses the host copy* of read/write pointers
2268  */
2269 static inline
2270 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2271 {
2272         return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2273 }
2274
2275 static uint16_t
2276 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2277 {
2278         struct nfp_net_txq *txq;
2279         struct nfp_net_hw *hw;
2280         struct nfp_net_tx_desc *txds, txd;
2281         struct rte_mbuf *pkt;
2282         uint64_t dma_addr;
2283         int pkt_size, dma_size;
2284         uint16_t free_descs, issued_descs;
2285         struct rte_mbuf **lmbuf;
2286         int i;
2287
2288         txq = tx_queue;
2289         hw = txq->hw;
2290         txds = &txq->txds[txq->wr_p];
2291
2292         PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
2293                    txq->qidx, txq->wr_p, nb_pkts);
2294
2295         if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2296                 nfp_net_tx_free_bufs(txq);
2297
2298         free_descs = (uint16_t)nfp_free_tx_desc(txq);
2299         if (unlikely(free_descs == 0))
2300                 return 0;
2301
2302         pkt = *tx_pkts;
2303
2304         i = 0;
2305         issued_descs = 0;
2306         PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
2307                    txq->qidx, nb_pkts);
2308         /* Sending packets */
2309         while ((i < nb_pkts) && free_descs) {
2310                 /* Grabbing the mbuf linked to the current descriptor */
2311                 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2312                 /* Warming the cache for releasing the mbuf later on */
2313                 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2314
2315                 pkt = *(tx_pkts + i);
2316
2317                 if (unlikely((pkt->nb_segs > 1) &&
2318                              !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2319                         PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2320                         rte_panic("Multisegment packet unsupported\n");
2321                 }
2322
2323                 /* Checking if we have enough descriptors */
2324                 if (unlikely(pkt->nb_segs > free_descs))
2325                         goto xmit_end;
2326
2327                 /*
2328                  * Checksum and VLAN flags just in the first descriptor for a
2329                  * multisegment packet, but TSO info needs to be in all of them.
2330                  */
2331                 txd.data_len = pkt->pkt_len;
2332                 nfp_net_tx_tso(txq, &txd, pkt);
2333                 nfp_net_tx_cksum(txq, &txd, pkt);
2334
2335                 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2336                     (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2337                         txd.flags |= PCIE_DESC_TX_VLAN;
2338                         txd.vlan = pkt->vlan_tci;
2339                 }
2340
2341                 /*
2342                  * mbuf data_len is the data in one segment and pkt_len data
2343                  * in the whole packet. When the packet is just one segment,
2344                  * then data_len = pkt_len
2345                  */
2346                 pkt_size = pkt->pkt_len;
2347
2348                 while (pkt) {
2349                         /* Copying TSO, VLAN and cksum info */
2350                         *txds = txd;
2351
2352                         /* Releasing mbuf used by this descriptor previously*/
2353                         if (*lmbuf)
2354                                 rte_pktmbuf_free_seg(*lmbuf);
2355
2356                         /*
2357                          * Linking mbuf with descriptor for being released
2358                          * next time descriptor is used
2359                          */
2360                         *lmbuf = pkt;
2361
2362                         dma_size = pkt->data_len;
2363                         dma_addr = rte_mbuf_data_iova(pkt);
2364                         PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2365                                    "%" PRIx64 "\n", dma_addr);
2366
2367                         /* Filling descriptors fields */
2368                         txds->dma_len = dma_size;
2369                         txds->data_len = txd.data_len;
2370                         txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2371                         txds->dma_addr_lo = (dma_addr & 0xffffffff);
2372                         ASSERT(free_descs > 0);
2373                         free_descs--;
2374
2375                         txq->wr_p++;
2376                         if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2377                                 txq->wr_p = 0;
2378
2379                         pkt_size -= dma_size;
2380                         if (!pkt_size)
2381                                 /* End of packet */
2382                                 txds->offset_eop |= PCIE_DESC_TX_EOP;
2383                         else
2384                                 txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
2385
2386                         pkt = pkt->next;
2387                         /* Referencing next free TX descriptor */
2388                         txds = &txq->txds[txq->wr_p];
2389                         lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2390                         issued_descs++;
2391                 }
2392                 i++;
2393         }
2394
2395 xmit_end:
2396         /* Increment write pointers. Force memory write before we let HW know */
2397         rte_wmb();
2398         nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2399
2400         return i;
2401 }
2402
2403 static int
2404 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2405 {
2406         uint32_t new_ctrl, update;
2407         struct nfp_net_hw *hw;
2408         int ret;
2409
2410         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2411         new_ctrl = 0;
2412
2413         if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2414             (mask & ETH_VLAN_EXTEND_OFFLOAD))
2415                 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2416                         " ETH_VLAN_EXTEND_OFFLOAD");
2417
2418         /* Enable vlan strip if it is not configured yet */
2419         if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2420             !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2421                 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2422
2423         /* Disable vlan strip just if it is configured */
2424         if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2425             (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2426                 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2427
2428         if (new_ctrl == 0)
2429                 return 0;
2430
2431         update = NFP_NET_CFG_UPDATE_GEN;
2432
2433         ret = nfp_net_reconfig(hw, new_ctrl, update);
2434         if (!ret)
2435                 hw->ctrl = new_ctrl;
2436
2437         return ret;
2438 }
2439
2440 static int
2441 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2442                     struct rte_eth_rss_reta_entry64 *reta_conf,
2443                     uint16_t reta_size)
2444 {
2445         uint32_t reta, mask;
2446         int i, j;
2447         int idx, shift;
2448         struct nfp_net_hw *hw =
2449                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2450
2451         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2452                 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2453                         "(%d) doesn't match the number hardware can supported "
2454                         "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2455                 return -EINVAL;
2456         }
2457
2458         /*
2459          * Update Redirection Table. There are 128 8bit-entries which can be
2460          * manage as 32 32bit-entries
2461          */
2462         for (i = 0; i < reta_size; i += 4) {
2463                 /* Handling 4 RSS entries per loop */
2464                 idx = i / RTE_RETA_GROUP_SIZE;
2465                 shift = i % RTE_RETA_GROUP_SIZE;
2466                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2467
2468                 if (!mask)
2469                         continue;
2470
2471                 reta = 0;
2472                 /* If all 4 entries were set, don't need read RETA register */
2473                 if (mask != 0xF)
2474                         reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2475
2476                 for (j = 0; j < 4; j++) {
2477                         if (!(mask & (0x1 << j)))
2478                                 continue;
2479                         if (mask != 0xF)
2480                                 /* Clearing the entry bits */
2481                                 reta &= ~(0xFF << (8 * j));
2482                         reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2483                 }
2484                 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2485                               reta);
2486         }
2487         return 0;
2488 }
2489
2490 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2491 static int
2492 nfp_net_reta_update(struct rte_eth_dev *dev,
2493                     struct rte_eth_rss_reta_entry64 *reta_conf,
2494                     uint16_t reta_size)
2495 {
2496         struct nfp_net_hw *hw =
2497                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2498         uint32_t update;
2499         int ret;
2500
2501         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2502                 return -EINVAL;
2503
2504         ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2505         if (ret != 0)
2506                 return ret;
2507
2508         update = NFP_NET_CFG_UPDATE_RSS;
2509
2510         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2511                 return -EIO;
2512
2513         return 0;
2514 }
2515
2516  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2517 static int
2518 nfp_net_reta_query(struct rte_eth_dev *dev,
2519                    struct rte_eth_rss_reta_entry64 *reta_conf,
2520                    uint16_t reta_size)
2521 {
2522         uint8_t i, j, mask;
2523         int idx, shift;
2524         uint32_t reta;
2525         struct nfp_net_hw *hw;
2526
2527         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2528
2529         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2530                 return -EINVAL;
2531
2532         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2533                 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2534                         "(%d) doesn't match the number hardware can supported "
2535                         "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2536                 return -EINVAL;
2537         }
2538
2539         /*
2540          * Reading Redirection Table. There are 128 8bit-entries which can be
2541          * manage as 32 32bit-entries
2542          */
2543         for (i = 0; i < reta_size; i += 4) {
2544                 /* Handling 4 RSS entries per loop */
2545                 idx = i / RTE_RETA_GROUP_SIZE;
2546                 shift = i % RTE_RETA_GROUP_SIZE;
2547                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2548
2549                 if (!mask)
2550                         continue;
2551
2552                 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2553                                     shift);
2554                 for (j = 0; j < 4; j++) {
2555                         if (!(mask & (0x1 << j)))
2556                                 continue;
2557                         reta_conf->reta[shift + j] =
2558                                 (uint8_t)((reta >> (8 * j)) & 0xF);
2559                 }
2560         }
2561         return 0;
2562 }
2563
2564 static int
2565 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2566                         struct rte_eth_rss_conf *rss_conf)
2567 {
2568         struct nfp_net_hw *hw;
2569         uint64_t rss_hf;
2570         uint32_t cfg_rss_ctrl = 0;
2571         uint8_t key;
2572         int i;
2573
2574         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2575
2576         /* Writing the key byte a byte */
2577         for (i = 0; i < rss_conf->rss_key_len; i++) {
2578                 memcpy(&key, &rss_conf->rss_key[i], 1);
2579                 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2580         }
2581
2582         rss_hf = rss_conf->rss_hf;
2583
2584         if (rss_hf & ETH_RSS_IPV4)
2585                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
2586                                 NFP_NET_CFG_RSS_IPV4_TCP |
2587                                 NFP_NET_CFG_RSS_IPV4_UDP;
2588
2589         if (rss_hf & ETH_RSS_IPV6)
2590                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
2591                                 NFP_NET_CFG_RSS_IPV6_TCP |
2592                                 NFP_NET_CFG_RSS_IPV6_UDP;
2593
2594         cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2595         cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2596
2597         /* configuring where to apply the RSS hash */
2598         nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2599
2600         /* Writing the key size */
2601         nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2602
2603         return 0;
2604 }
2605
2606 static int
2607 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2608                         struct rte_eth_rss_conf *rss_conf)
2609 {
2610         uint32_t update;
2611         uint64_t rss_hf;
2612         struct nfp_net_hw *hw;
2613
2614         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2615
2616         rss_hf = rss_conf->rss_hf;
2617
2618         /* Checking if RSS is enabled */
2619         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2620                 if (rss_hf != 0) { /* Enable RSS? */
2621                         RTE_LOG(ERR, PMD, "RSS unsupported\n");
2622                         return -EINVAL;
2623                 }
2624                 return 0; /* Nothing to do */
2625         }
2626
2627         if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2628                 RTE_LOG(ERR, PMD, "hash key too long\n");
2629                 return -EINVAL;
2630         }
2631
2632         nfp_net_rss_hash_write(dev, rss_conf);
2633
2634         update = NFP_NET_CFG_UPDATE_RSS;
2635
2636         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2637                 return -EIO;
2638
2639         return 0;
2640 }
2641
2642 static int
2643 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2644                           struct rte_eth_rss_conf *rss_conf)
2645 {
2646         uint64_t rss_hf;
2647         uint32_t cfg_rss_ctrl;
2648         uint8_t key;
2649         int i;
2650         struct nfp_net_hw *hw;
2651
2652         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2653
2654         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2655                 return -EINVAL;
2656
2657         rss_hf = rss_conf->rss_hf;
2658         cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2659
2660         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2661                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2662
2663         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2664                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2665
2666         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2667                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2668
2669         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2670                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2671
2672         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2673                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2674
2675         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2676                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2677
2678         /* Reading the key size */
2679         rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2680
2681         /* Reading the key byte a byte */
2682         for (i = 0; i < rss_conf->rss_key_len; i++) {
2683                 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2684                 memcpy(&rss_conf->rss_key[i], &key, 1);
2685         }
2686
2687         return 0;
2688 }
2689
2690 static int
2691 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2692 {
2693         struct rte_eth_conf *dev_conf;
2694         struct rte_eth_rss_conf rss_conf;
2695         struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2696         uint16_t rx_queues = dev->data->nb_rx_queues;
2697         uint16_t queue;
2698         int i, j, ret;
2699
2700         RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
2701                 rx_queues);
2702
2703         nfp_reta_conf[0].mask = ~0x0;
2704         nfp_reta_conf[1].mask = ~0x0;
2705
2706         queue = 0;
2707         for (i = 0; i < 0x40; i += 8) {
2708                 for (j = i; j < (i + 8); j++) {
2709                         nfp_reta_conf[0].reta[j] = queue;
2710                         nfp_reta_conf[1].reta[j] = queue++;
2711                         queue %= rx_queues;
2712                 }
2713         }
2714         ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2715         if (ret != 0)
2716                 return ret;
2717
2718         dev_conf = &dev->data->dev_conf;
2719         if (!dev_conf) {
2720                 RTE_LOG(INFO, PMD, "wrong rss conf");
2721                 return -EINVAL;
2722         }
2723         rss_conf = dev_conf->rx_adv_conf.rss_conf;
2724
2725         ret = nfp_net_rss_hash_write(dev, &rss_conf);
2726
2727         return ret;
2728 }
2729
2730
2731 /* Initialise and register driver with DPDK Application */
2732 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2733         .dev_configure          = nfp_net_configure,
2734         .dev_start              = nfp_net_start,
2735         .dev_stop               = nfp_net_stop,
2736         .dev_close              = nfp_net_close,
2737         .promiscuous_enable     = nfp_net_promisc_enable,
2738         .promiscuous_disable    = nfp_net_promisc_disable,
2739         .link_update            = nfp_net_link_update,
2740         .stats_get              = nfp_net_stats_get,
2741         .stats_reset            = nfp_net_stats_reset,
2742         .dev_infos_get          = nfp_net_infos_get,
2743         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2744         .mtu_set                = nfp_net_dev_mtu_set,
2745         .vlan_offload_set       = nfp_net_vlan_offload_set,
2746         .reta_update            = nfp_net_reta_update,
2747         .reta_query             = nfp_net_reta_query,
2748         .rss_hash_update        = nfp_net_rss_hash_update,
2749         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
2750         .rx_queue_setup         = nfp_net_rx_queue_setup,
2751         .rx_queue_release       = nfp_net_rx_queue_release,
2752         .rx_queue_count         = nfp_net_rx_queue_count,
2753         .tx_queue_setup         = nfp_net_tx_queue_setup,
2754         .tx_queue_release       = nfp_net_tx_queue_release,
2755         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
2756         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
2757 };
2758
2759 /*
2760  * All eth_dev created got its private data, but before nfp_net_init, that
2761  * private data is referencing private data for all the PF ports. This is due
2762  * to how the vNIC bars are mapped based on first port, so all ports need info
2763  * about port 0 private data. Inside nfp_net_init the private data pointer is
2764  * changed to the right address for each port once the bars have been mapped.
2765  *
2766  * This functions helps to find out which port and therefore which offset
2767  * inside the private data array to use.
2768  */
2769 static int
2770 get_pf_port_number(char *name)
2771 {
2772         char *pf_str = name;
2773         int size = 0;
2774
2775         while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2776                 pf_str++;
2777
2778         if (size == 30)
2779                 /*
2780                  * This should not happen at all and it would mean major
2781                  * implementation fault.
2782                  */
2783                 rte_panic("nfp_net: problem with pf device name\n");
2784
2785         /* Expecting _portX with X within [0,7] */
2786         pf_str += 5;
2787
2788         return (int)strtol(pf_str, NULL, 10);
2789 }
2790
2791 static int
2792 nfp_net_init(struct rte_eth_dev *eth_dev)
2793 {
2794         struct rte_pci_device *pci_dev;
2795         struct nfp_net_hw *hw, *hwport0;
2796
2797         uint64_t tx_bar_off = 0, rx_bar_off = 0;
2798         uint32_t start_q;
2799         int stride = 4;
2800         int port = 0;
2801         int err;
2802
2803         PMD_INIT_FUNC_TRACE();
2804
2805         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2806
2807         if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2808             (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2809                 port = get_pf_port_number(eth_dev->data->name);
2810                 if (port < 0 || port > 7) {
2811                         RTE_LOG(ERR, PMD, "Port value is wrong\n");
2812                         return -ENODEV;
2813                 }
2814
2815                 PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
2816
2817                 /* This points to port 0 private data */
2818                 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2819
2820                 /* This points to the specific port private data */
2821                 hw = &hwport0[port];
2822         } else {
2823                 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2824                 hwport0 = 0;
2825         }
2826
2827         eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2828         eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2829         eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2830
2831         /* For secondary processes, the primary has done all the work */
2832         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2833                 return 0;
2834
2835         rte_eth_copy_pci_info(eth_dev, pci_dev);
2836
2837         hw->device_id = pci_dev->id.device_id;
2838         hw->vendor_id = pci_dev->id.vendor_id;
2839         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2840         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2841
2842         PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2843                      pci_dev->id.vendor_id, pci_dev->id.device_id,
2844                      pci_dev->addr.domain, pci_dev->addr.bus,
2845                      pci_dev->addr.devid, pci_dev->addr.function);
2846
2847         hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2848         if (hw->ctrl_bar == NULL) {
2849                 RTE_LOG(ERR, PMD,
2850                         "hw->ctrl_bar is NULL. BAR0 not configured\n");
2851                 return -ENODEV;
2852         }
2853
2854         if (hw->is_pf && port == 0) {
2855                 hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
2856                                              hw->total_ports * 32768,
2857                                              &hw->ctrl_area);
2858                 if (!hw->ctrl_bar) {
2859                         printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar\n");
2860                         return -EIO;
2861                 }
2862
2863                 PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2864         }
2865
2866         if (port > 0) {
2867                 if (!hwport0->ctrl_bar)
2868                         return -ENODEV;
2869
2870                 /* address based on port0 offset */
2871                 hw->ctrl_bar = hwport0->ctrl_bar +
2872                                (port * NFP_PF_CSR_SLICE_SIZE);
2873         }
2874
2875         PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
2876
2877         hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2878         hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2879
2880         /* Work out where in the BAR the queues start. */
2881         switch (pci_dev->id.device_id) {
2882         case PCI_DEVICE_ID_NFP4000_PF_NIC:
2883         case PCI_DEVICE_ID_NFP6000_PF_NIC:
2884         case PCI_DEVICE_ID_NFP6000_VF_NIC:
2885                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2886                 tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
2887                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2888                 rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
2889                 break;
2890         default:
2891                 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2892                 err = -ENODEV;
2893                 goto dev_err_ctrl_map;
2894         }
2895
2896         PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
2897         PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
2898
2899         if (hw->is_pf && port == 0) {
2900                 /* configure access to tx/rx vNIC BARs */
2901                 hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
2902                                                       NFP_PCIE_QUEUE(0),
2903                                                       NFP_QCP_QUEUE_AREA_SZ,
2904                                                       &hw->hwqueues_area);
2905
2906                 if (!hwport0->hw_queues) {
2907                         printf("nfp_rtsym_map fails for net.qc\n");
2908                         err = -EIO;
2909                         goto dev_err_ctrl_map;
2910                 }
2911
2912                 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p\n",
2913                                     hwport0->hw_queues);
2914         }
2915
2916         if (hw->is_pf) {
2917                 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2918                 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2919                 eth_dev->data->dev_private = hw;
2920         } else {
2921                 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2922                              tx_bar_off;
2923                 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2924                              rx_bar_off;
2925         }
2926
2927         PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2928                      hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2929
2930         nfp_net_cfg_queue_setup(hw);
2931
2932         /* Get some of the read-only fields from the config BAR */
2933         hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2934         hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2935         hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2936         hw->mtu = ETHER_MTU;
2937
2938         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2939                 hw->rx_offset = NFP_NET_RX_OFFSET;
2940         else
2941                 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2942
2943         PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
2944                      hw->ver, hw->max_mtu);
2945         PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2946                      hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2947                      hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
2948                      hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
2949                      hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
2950                      hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
2951                      hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
2952                      hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
2953                      hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2954                      hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
2955                      hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
2956                      hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "");
2957
2958         hw->ctrl = 0;
2959
2960         hw->stride_rx = stride;
2961         hw->stride_tx = stride;
2962
2963         PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2964                      hw->max_rx_queues, hw->max_tx_queues);
2965
2966         /* Initializing spinlock for reconfigs */
2967         rte_spinlock_init(&hw->reconfig_lock);
2968
2969         /* Allocating memory for mac addr */
2970         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2971         if (eth_dev->data->mac_addrs == NULL) {
2972                 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2973                 err = -ENOMEM;
2974                 goto dev_err_queues_map;
2975         }
2976
2977         if (hw->is_pf) {
2978                 nfp_net_pf_read_mac(hwport0, port);
2979                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2980         } else {
2981                 nfp_net_vf_read_mac(hw);
2982         }
2983
2984         if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
2985                 PMD_INIT_LOG(INFO, "Using random mac address for port %d\n",
2986                                    port);
2987                 /* Using random mac addresses for VFs */
2988                 eth_random_addr(&hw->mac_addr[0]);
2989                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2990         }
2991
2992         /* Copying mac address to DPDK eth_dev struct */
2993         ether_addr_copy((struct ether_addr *)hw->mac_addr,
2994                         &eth_dev->data->mac_addrs[0]);
2995
2996         PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2997                      "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2998                      eth_dev->data->port_id, pci_dev->id.vendor_id,
2999                      pci_dev->id.device_id,
3000                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
3001                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
3002
3003         /* Registering LSC interrupt handler */
3004         rte_intr_callback_register(&pci_dev->intr_handle,
3005                                    nfp_net_dev_interrupt_handler,
3006                                    (void *)eth_dev);
3007
3008         /* Telling the firmware about the LSC interrupt entry */
3009         nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3010
3011         /* Recording current stats counters values */
3012         nfp_net_stats_reset(eth_dev);
3013
3014         return 0;
3015
3016 dev_err_queues_map:
3017                 nfp_cpp_area_free(hw->hwqueues_area);
3018 dev_err_ctrl_map:
3019                 nfp_cpp_area_free(hw->ctrl_area);
3020
3021         return err;
3022 }
3023
3024 static int
3025 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
3026                   struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
3027                   int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
3028 {
3029         struct rte_eth_dev *eth_dev;
3030         struct nfp_net_hw *hw;
3031         char *port_name;
3032         int ret;
3033
3034         port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
3035         if (!port_name)
3036                 return -ENOMEM;
3037
3038         if (ports > 1)
3039                 sprintf(port_name, "%s_port%d", dev->device.name, port);
3040         else
3041                 sprintf(port_name, "%s", dev->device.name);
3042
3043         eth_dev = rte_eth_dev_allocate(port_name);
3044         if (!eth_dev)
3045                 return -ENOMEM;
3046
3047         if (port == 0) {
3048                 *priv = rte_zmalloc(port_name,
3049                                     sizeof(struct nfp_net_adapter) * ports,
3050                                     RTE_CACHE_LINE_SIZE);
3051                 if (!*priv) {
3052                         rte_eth_dev_release_port(eth_dev);
3053                         return -ENOMEM;
3054                 }
3055         }
3056
3057         eth_dev->data->dev_private = *priv;
3058
3059         /*
3060          * dev_private pointing to port0 dev_private because we need
3061          * to configure vNIC bars based on port0 at nfp_net_init.
3062          * Then dev_private is adjusted per port.
3063          */
3064         hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
3065         hw->cpp = cpp;
3066         hw->hwinfo = hwinfo;
3067         hw->sym_tbl = sym_tbl;
3068         hw->pf_port_idx = phys_port;
3069         hw->is_pf = 1;
3070         if (ports > 1)
3071                 hw->pf_multiport_enabled = 1;
3072
3073         hw->total_ports = ports;
3074
3075         eth_dev->device = &dev->device;
3076         rte_eth_copy_pci_info(eth_dev, dev);
3077
3078         ret = nfp_net_init(eth_dev);
3079
3080         if (ret)
3081                 rte_eth_dev_release_port(eth_dev);
3082
3083         rte_free(port_name);
3084
3085         return ret;
3086 }
3087
3088 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
3089
3090 static int
3091 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3092 {
3093         struct nfp_cpp *cpp = nsp->cpp;
3094         int fw_f;
3095         char *fw_buf;
3096         char fw_name[100];
3097         char serial[100];
3098         struct stat file_stat;
3099         off_t fsize, bytes;
3100
3101         /* Looking for firmware file in order of priority */
3102
3103         /* First try to find a firmware image specific for this device */
3104         sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3105                 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3106                 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3107                 cpp->interface & 0xff);
3108
3109         sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
3110
3111         RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3112         fw_f = open(fw_name, O_RDONLY);
3113         if (fw_f > 0)
3114                 goto read_fw;
3115
3116         /* Then try the PCI name */
3117         sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
3118
3119         RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3120         fw_f = open(fw_name, O_RDONLY);
3121         if (fw_f > 0)
3122                 goto read_fw;
3123
3124         /* Finally try the card type and media */
3125         sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
3126         RTE_LOG(DEBUG, PMD, "Trying with fw file: %s\n", fw_name);
3127         fw_f = open(fw_name, O_RDONLY);
3128         if (fw_f < 0) {
3129                 RTE_LOG(INFO, PMD, "Firmware file %s not found.", fw_name);
3130                 return -ENOENT;
3131         }
3132
3133 read_fw:
3134         if (fstat(fw_f, &file_stat) < 0) {
3135                 RTE_LOG(INFO, PMD, "Firmware file %s size is unknown", fw_name);
3136                 close(fw_f);
3137                 return -ENOENT;
3138         }
3139
3140         fsize = file_stat.st_size;
3141         RTE_LOG(INFO, PMD, "Firmware file found at %s with size: %" PRIu64 "\n",
3142                             fw_name, (uint64_t)fsize);
3143
3144         fw_buf = malloc((size_t)fsize);
3145         if (!fw_buf) {
3146                 RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
3147                 close(fw_f);
3148                 return -ENOMEM;
3149         }
3150         memset(fw_buf, 0, fsize);
3151
3152         bytes = read(fw_f, fw_buf, fsize);
3153         if (bytes != fsize) {
3154                 RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n"
3155                                    "Just %" PRIu64 " of %" PRIu64 " bytes read",
3156                                    (uint64_t)bytes, (uint64_t)fsize);
3157                 free(fw_buf);
3158                 close(fw_f);
3159                 return -EIO;
3160         }
3161
3162         RTE_LOG(INFO, PMD, "Uploading the firmware ...");
3163         nfp_nsp_load_fw(nsp, fw_buf, bytes);
3164         RTE_LOG(INFO, PMD, "Done");
3165
3166         free(fw_buf);
3167         close(fw_f);
3168
3169         return 0;
3170 }
3171
3172 static int
3173 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3174              struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3175 {
3176         struct nfp_nsp *nsp;
3177         const char *nfp_fw_model;
3178         char card_desc[100];
3179         int err = 0;
3180
3181         nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3182
3183         if (nfp_fw_model) {
3184                 RTE_LOG(INFO, PMD, "firmware model found: %s\n", nfp_fw_model);
3185         } else {
3186                 RTE_LOG(ERR, PMD, "firmware model NOT found\n");
3187                 return -EIO;
3188         }
3189
3190         if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3191                 RTE_LOG(ERR, PMD, "NFP ethernet table reports wrong ports: %u\n",
3192                        nfp_eth_table->count);
3193                 return -EIO;
3194         }
3195
3196         RTE_LOG(INFO, PMD, "NFP ethernet port table reports %u ports\n",
3197                            nfp_eth_table->count);
3198
3199         RTE_LOG(INFO, PMD, "Port speed: %u\n", nfp_eth_table->ports[0].speed);
3200
3201         sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
3202                 nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
3203
3204         nsp = nfp_nsp_open(cpp);
3205         if (!nsp) {
3206                 RTE_LOG(ERR, PMD, "NFP error when obtaining NSP handle\n");
3207                 return -EIO;
3208         }
3209
3210         nfp_nsp_device_soft_reset(nsp);
3211         err = nfp_fw_upload(dev, nsp, card_desc);
3212
3213         nfp_nsp_close(nsp);
3214         return err;
3215 }
3216
3217 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3218                             struct rte_pci_device *dev)
3219 {
3220         struct nfp_cpp *cpp;
3221         struct nfp_hwinfo *hwinfo;
3222         struct nfp_rtsym_table *sym_tbl;
3223         struct nfp_eth_table *nfp_eth_table = NULL;
3224         int total_ports;
3225         void *priv = 0;
3226         int ret = -ENODEV;
3227         int err;
3228         int i;
3229
3230         if (!dev)
3231                 return ret;
3232
3233         cpp = nfp_cpp_from_device_name(dev->device.name);
3234         if (!cpp) {
3235                 RTE_LOG(ERR, PMD, "A CPP handle can not be obtained");
3236                 ret = -EIO;
3237                 goto error;
3238         }
3239
3240         hwinfo = nfp_hwinfo_read(cpp);
3241         if (!hwinfo) {
3242                 RTE_LOG(ERR, PMD, "Error reading hwinfo table");
3243                 return -EIO;
3244         }
3245
3246         nfp_eth_table = nfp_eth_read_ports(cpp);
3247         if (!nfp_eth_table) {
3248                 RTE_LOG(ERR, PMD, "Error reading NFP ethernet table\n");
3249                 return -EIO;
3250         }
3251
3252         if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
3253                 RTE_LOG(INFO, PMD, "Error when uploading firmware\n");
3254                 ret = -EIO;
3255                 goto error;
3256         }
3257
3258         /* Now the symbol table should be there */
3259         sym_tbl = nfp_rtsym_table_read(cpp);
3260         if (!sym_tbl) {
3261                 RTE_LOG(ERR, PMD, "Something is wrong with the firmware"
3262                                 " symbol table");
3263                 ret = -EIO;
3264                 goto error;
3265         }
3266
3267         total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3268         if (total_ports != (int)nfp_eth_table->count) {
3269                 RTE_LOG(ERR, PMD, "Inconsistent number of ports\n");
3270                 ret = -EIO;
3271                 goto error;
3272         }
3273         PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
3274
3275         if (total_ports <= 0 || total_ports > 8) {
3276                 RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
3277                 ret = -ENODEV;
3278                 goto error;
3279         }
3280
3281         for (i = 0; i < total_ports; i++) {
3282                 ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
3283                                         nfp_eth_table->ports[i].index,
3284                                         sym_tbl, &priv);
3285                 if (ret)
3286                         break;
3287         }
3288
3289 error:
3290         free(nfp_eth_table);
3291         return ret;
3292 }
3293
3294 int nfp_logtype_init;
3295 int nfp_logtype_driver;
3296
3297 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3298         {
3299                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3300                                PCI_DEVICE_ID_NFP4000_PF_NIC)
3301         },
3302         {
3303                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3304                                PCI_DEVICE_ID_NFP6000_PF_NIC)
3305         },
3306         {
3307                 .vendor_id = 0,
3308         },
3309 };
3310
3311 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3312         {
3313                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3314                                PCI_DEVICE_ID_NFP6000_VF_NIC)
3315         },
3316         {
3317                 .vendor_id = 0,
3318         },
3319 };
3320
3321 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3322         struct rte_pci_device *pci_dev)
3323 {
3324         return rte_eth_dev_pci_generic_probe(pci_dev,
3325                 sizeof(struct nfp_net_adapter), nfp_net_init);
3326 }
3327
3328 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3329 {
3330         struct rte_eth_dev *eth_dev;
3331         struct nfp_net_hw *hw, *hwport0;
3332         int port = 0;
3333
3334         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
3335         if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
3336             (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
3337                 port = get_pf_port_number(eth_dev->data->name);
3338                 /*
3339                  * hotplug is not possible with multiport PF although freeing
3340                  * data structures can be done for first port.
3341                  */
3342                 if (port != 0)
3343                         return -ENOTSUP;
3344                 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3345                 hw = &hwport0[port];
3346                 nfp_cpp_area_free(hw->ctrl_area);
3347                 nfp_cpp_area_free(hw->hwqueues_area);
3348                 free(hw->hwinfo);
3349                 free(hw->sym_tbl);
3350                 nfp_cpp_free(hw->cpp);
3351         } else {
3352                 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3353         }
3354         /* hotplug is not possible with multiport PF */
3355         if (hw->pf_multiport_enabled)
3356                 return -ENOTSUP;
3357         return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3358 }
3359
3360 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3361         .id_table = pci_id_nfp_pf_net_map,
3362         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3363         .probe = nfp_pf_pci_probe,
3364         .remove = eth_nfp_pci_remove,
3365 };
3366
3367 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3368         .id_table = pci_id_nfp_vf_net_map,
3369         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3370         .probe = eth_nfp_pci_probe,
3371         .remove = eth_nfp_pci_remove,
3372 };
3373
3374 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3375 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3376 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3377 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3378 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3379 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3380
3381 RTE_INIT(nfp_init_log);
3382 static void
3383 nfp_init_log(void)
3384 {
3385         nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
3386         if (nfp_logtype_init >= 0)
3387                 rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
3388         nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
3389         if (nfp_logtype_driver >= 0)
3390                 rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
3391 }
3392 /*
3393  * Local variables:
3394  * c-file-style: "Linux"
3395  * indent-tabs-mode: t
3396  * End:
3397  */