net/ice: fix outer L4 checksum in scalar Rx
[dpdk.git] / drivers / net / nfp / nfp_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_common.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Common files
14  */
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_dev.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
32
33 #include "nfpcore/nfp_cpp.h"
34 #include "nfpcore/nfp_nffw.h"
35 #include "nfpcore/nfp_hwinfo.h"
36 #include "nfpcore/nfp_mip.h"
37 #include "nfpcore/nfp_rtsym.h"
38 #include "nfpcore/nfp_nsp.h"
39
40 #include "nfp_common.h"
41 #include "nfp_rxtx.h"
42 #include "nfp_logs.h"
43 #include "nfp_ctrl.h"
44 #include "nfp_cpp_bridge.h"
45
46 #include <sys/types.h>
47 #include <sys/socket.h>
48 #include <sys/un.h>
49 #include <unistd.h>
50 #include <stdio.h>
51 #include <sys/ioctl.h>
52 #include <errno.h>
53
54 static int
55 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
56 {
57         int cnt;
58         uint32_t new;
59         struct timespec wait;
60
61         PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
62                     hw->qcp_cfg);
63
64         if (hw->qcp_cfg == NULL)
65                 rte_panic("Bad configuration queue pointer\n");
66
67         nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
68
69         wait.tv_sec = 0;
70         wait.tv_nsec = 1000000;
71
72         PMD_DRV_LOG(DEBUG, "Polling for update ack...");
73
74         /* Poll update field, waiting for NFP to ack the config */
75         for (cnt = 0; ; cnt++) {
76                 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
77                 if (new == 0)
78                         break;
79                 if (new & NFP_NET_CFG_UPDATE_ERR) {
80                         PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
81                         return -1;
82                 }
83                 if (cnt >= NFP_NET_POLL_TIMEOUT) {
84                         PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
85                                           " %dms", update, cnt);
86                         rte_panic("Exiting\n");
87                 }
88                 nanosleep(&wait, 0); /* waiting for a 1ms */
89         }
90         PMD_DRV_LOG(DEBUG, "Ack DONE");
91         return 0;
92 }
93
94 /*
95  * Reconfigure the NIC
96  * @nn:    device to reconfigure
97  * @ctrl:    The value for the ctrl field in the BAR config
98  * @update:  The value for the update field in the BAR config
99  *
100  * Write the update word to the BAR and ping the reconfig queue. Then poll
101  * until the firmware has acknowledged the update by zeroing the update word.
102  */
103 int
104 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
105 {
106         uint32_t err;
107
108         PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
109                     ctrl, update);
110
111         rte_spinlock_lock(&hw->reconfig_lock);
112
113         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
114         nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
115
116         rte_wmb();
117
118         err = __nfp_net_reconfig(hw, update);
119
120         rte_spinlock_unlock(&hw->reconfig_lock);
121
122         if (!err)
123                 return 0;
124
125         /*
126          * Reconfig errors imply situations where they can be handled.
127          * Otherwise, rte_panic is called inside __nfp_net_reconfig
128          */
129         PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
130                      ctrl, update);
131         return -EIO;
132 }
133
134 /*
135  * Configure an Ethernet device. This function must be invoked first
136  * before any other function in the Ethernet API. This function can
137  * also be re-invoked when a device is in the stopped state.
138  */
139 int
140 nfp_net_configure(struct rte_eth_dev *dev)
141 {
142         struct rte_eth_conf *dev_conf;
143         struct rte_eth_rxmode *rxmode;
144         struct rte_eth_txmode *txmode;
145         struct nfp_net_hw *hw;
146
147         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
148
149         /*
150          * A DPDK app sends info about how many queues to use and how
151          * those queues need to be configured. This is used by the
152          * DPDK core and it makes sure no more queues than those
153          * advertised by the driver are requested. This function is
154          * called after that internal process
155          */
156
157         PMD_INIT_LOG(DEBUG, "Configure");
158
159         dev_conf = &dev->data->dev_conf;
160         rxmode = &dev_conf->rxmode;
161         txmode = &dev_conf->txmode;
162
163         if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
164                 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
165
166         /* Checking TX mode */
167         if (txmode->mq_mode) {
168                 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
169                 return -EINVAL;
170         }
171
172         /* Checking RX mode */
173         if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
174             !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
175                 PMD_INIT_LOG(INFO, "RSS not supported");
176                 return -EINVAL;
177         }
178
179         /* Checking MTU set */
180         if (rxmode->mtu > hw->flbufsz) {
181                 PMD_INIT_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported",
182                                     rxmode->mtu, hw->flbufsz);
183                 return -ERANGE;
184         }
185
186         return 0;
187 }
188
189 void
190 nfp_net_enable_queues(struct rte_eth_dev *dev)
191 {
192         struct nfp_net_hw *hw;
193         uint64_t enabled_queues = 0;
194         int i;
195
196         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
197
198         /* Enabling the required TX queues in the device */
199         for (i = 0; i < dev->data->nb_tx_queues; i++)
200                 enabled_queues |= (1 << i);
201
202         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
203
204         enabled_queues = 0;
205
206         /* Enabling the required RX queues in the device */
207         for (i = 0; i < dev->data->nb_rx_queues; i++)
208                 enabled_queues |= (1 << i);
209
210         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
211 }
212
213 void
214 nfp_net_disable_queues(struct rte_eth_dev *dev)
215 {
216         struct nfp_net_hw *hw;
217         uint32_t new_ctrl, update = 0;
218
219         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
220
221         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
222         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
223
224         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
225         update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
226                  NFP_NET_CFG_UPDATE_MSIX;
227
228         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
229                 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
230
231         /* If an error when reconfig we avoid to change hw state */
232         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
233                 return;
234
235         hw->ctrl = new_ctrl;
236 }
237
238 void
239 nfp_net_params_setup(struct nfp_net_hw *hw)
240 {
241         nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
242         nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
243 }
244
245 void
246 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
247 {
248         hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
249 }
250
251 #define ETH_ADDR_LEN    6
252
253 void
254 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
255 {
256         int i;
257
258         for (i = 0; i < ETH_ADDR_LEN; i++)
259                 dst[i] = src[i];
260 }
261
262 void
263 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
264 {
265         uint32_t mac0 = *(uint32_t *)mac;
266         uint16_t mac1;
267
268         nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
269
270         mac += 4;
271         mac1 = *(uint16_t *)mac;
272         nn_writew(rte_cpu_to_be_16(mac1),
273                   hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
274 }
275
276 int
277 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
278 {
279         struct nfp_net_hw *hw;
280         uint32_t update, ctrl;
281
282         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
283         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
284             !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
285                 PMD_INIT_LOG(INFO, "MAC address unable to change when"
286                                   " port enabled");
287                 return -EBUSY;
288         }
289
290         /* Writing new MAC to the specific port BAR address */
291         nfp_net_write_mac(hw, (uint8_t *)mac_addr);
292
293         /* Signal the NIC about the change */
294         update = NFP_NET_CFG_UPDATE_MACADDR;
295         ctrl = hw->ctrl;
296         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
297             (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
298                 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
299         if (nfp_net_reconfig(hw, ctrl, update) < 0) {
300                 PMD_INIT_LOG(INFO, "MAC address update failed");
301                 return -EIO;
302         }
303         return 0;
304 }
305
306 int
307 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
308                            struct rte_intr_handle *intr_handle)
309 {
310         struct nfp_net_hw *hw;
311         int i;
312
313         if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
314                                     dev->data->nb_rx_queues)) {
315                 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
316                              " intr_vec", dev->data->nb_rx_queues);
317                 return -ENOMEM;
318         }
319
320         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
321
322         if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
323                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
324                 /* UIO just supports one queue and no LSC*/
325                 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
326                 if (rte_intr_vec_list_index_set(intr_handle, 0, 0))
327                         return -1;
328         } else {
329                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
330                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
331                         /*
332                          * The first msix vector is reserved for non
333                          * efd interrupts
334                         */
335                         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
336                         if (rte_intr_vec_list_index_set(intr_handle, i,
337                                                                i + 1))
338                                 return -1;
339                         PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
340                                 rte_intr_vec_list_index_get(intr_handle,
341                                                                    i));
342                 }
343         }
344
345         /* Avoiding TX interrupts */
346         hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
347         return 0;
348 }
349
350 uint32_t
351 nfp_check_offloads(struct rte_eth_dev *dev)
352 {
353         struct nfp_net_hw *hw;
354         struct rte_eth_conf *dev_conf;
355         struct rte_eth_rxmode *rxmode;
356         struct rte_eth_txmode *txmode;
357         uint32_t ctrl = 0;
358
359         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
360
361         dev_conf = &dev->data->dev_conf;
362         rxmode = &dev_conf->rxmode;
363         txmode = &dev_conf->txmode;
364
365         if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
366                 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
367                         ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
368         }
369
370         if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
371                 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
372                         ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
373         }
374
375         hw->mtu = dev->data->mtu;
376
377         if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
378                 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
379
380         /* L2 broadcast */
381         if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
382                 ctrl |= NFP_NET_CFG_CTRL_L2BC;
383
384         /* L2 multicast */
385         if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
386                 ctrl |= NFP_NET_CFG_CTRL_L2MC;
387
388         /* TX checksum offload */
389         if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
390             txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
391             txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
392                 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
393
394         /* LSO offload */
395         if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
396                 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
397                         ctrl |= NFP_NET_CFG_CTRL_LSO;
398                 else
399                         ctrl |= NFP_NET_CFG_CTRL_LSO2;
400         }
401
402         /* RX gather */
403         if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
404                 ctrl |= NFP_NET_CFG_CTRL_GATHER;
405
406         return ctrl;
407 }
408
409 int
410 nfp_net_promisc_enable(struct rte_eth_dev *dev)
411 {
412         uint32_t new_ctrl, update = 0;
413         struct nfp_net_hw *hw;
414         int ret;
415
416         PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
417
418         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
419
420         if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
421                 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
422                 return -ENOTSUP;
423         }
424
425         if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
426                 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
427                 return 0;
428         }
429
430         new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
431         update = NFP_NET_CFG_UPDATE_GEN;
432
433         /*
434          * DPDK sets promiscuous mode on just after this call assuming
435          * it can not fail ...
436          */
437         ret = nfp_net_reconfig(hw, new_ctrl, update);
438         if (ret < 0)
439                 return ret;
440
441         hw->ctrl = new_ctrl;
442
443         return 0;
444 }
445
446 int
447 nfp_net_promisc_disable(struct rte_eth_dev *dev)
448 {
449         uint32_t new_ctrl, update = 0;
450         struct nfp_net_hw *hw;
451         int ret;
452
453         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
454
455         if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
456                 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
457                 return 0;
458         }
459
460         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
461         update = NFP_NET_CFG_UPDATE_GEN;
462
463         /*
464          * DPDK sets promiscuous mode off just before this call
465          * assuming it can not fail ...
466          */
467         ret = nfp_net_reconfig(hw, new_ctrl, update);
468         if (ret < 0)
469                 return ret;
470
471         hw->ctrl = new_ctrl;
472
473         return 0;
474 }
475
476 /*
477  * return 0 means link status changed, -1 means not changed
478  *
479  * Wait to complete is needed as it can take up to 9 seconds to get the Link
480  * status.
481  */
482 int
483 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
484 {
485         struct nfp_net_hw *hw;
486         struct rte_eth_link link;
487         uint32_t nn_link_status;
488         int ret;
489
490         static const uint32_t ls_to_ethtool[] = {
491                 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
492                 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
493                 [NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
494                 [NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
495                 [NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
496                 [NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
497                 [NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
498                 [NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
499         };
500
501         PMD_DRV_LOG(DEBUG, "Link update");
502
503         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
504
505         nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
506
507         memset(&link, 0, sizeof(struct rte_eth_link));
508
509         if (nn_link_status & NFP_NET_CFG_STS_LINK)
510                 link.link_status = RTE_ETH_LINK_UP;
511
512         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
513
514         nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
515                          NFP_NET_CFG_STS_LINK_RATE_MASK;
516
517         if (nn_link_status >= RTE_DIM(ls_to_ethtool))
518                 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
519         else
520                 link.link_speed = ls_to_ethtool[nn_link_status];
521
522         ret = rte_eth_linkstatus_set(dev, &link);
523         if (ret == 0) {
524                 if (link.link_status)
525                         PMD_DRV_LOG(INFO, "NIC Link is Up");
526                 else
527                         PMD_DRV_LOG(INFO, "NIC Link is Down");
528         }
529         return ret;
530 }
531
532 int
533 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
534 {
535         int i;
536         struct nfp_net_hw *hw;
537         struct rte_eth_stats nfp_dev_stats;
538
539         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
540
541         /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
542
543         memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
544
545         /* reading per RX ring stats */
546         for (i = 0; i < dev->data->nb_rx_queues; i++) {
547                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
548                         break;
549
550                 nfp_dev_stats.q_ipackets[i] =
551                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
552
553                 nfp_dev_stats.q_ipackets[i] -=
554                         hw->eth_stats_base.q_ipackets[i];
555
556                 nfp_dev_stats.q_ibytes[i] =
557                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
558
559                 nfp_dev_stats.q_ibytes[i] -=
560                         hw->eth_stats_base.q_ibytes[i];
561         }
562
563         /* reading per TX ring stats */
564         for (i = 0; i < dev->data->nb_tx_queues; i++) {
565                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
566                         break;
567
568                 nfp_dev_stats.q_opackets[i] =
569                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
570
571                 nfp_dev_stats.q_opackets[i] -=
572                         hw->eth_stats_base.q_opackets[i];
573
574                 nfp_dev_stats.q_obytes[i] =
575                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
576
577                 nfp_dev_stats.q_obytes[i] -=
578                         hw->eth_stats_base.q_obytes[i];
579         }
580
581         nfp_dev_stats.ipackets =
582                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
583
584         nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
585
586         nfp_dev_stats.ibytes =
587                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
588
589         nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
590
591         nfp_dev_stats.opackets =
592                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
593
594         nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
595
596         nfp_dev_stats.obytes =
597                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
598
599         nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
600
601         /* reading general device stats */
602         nfp_dev_stats.ierrors =
603                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
604
605         nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
606
607         nfp_dev_stats.oerrors =
608                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
609
610         nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
611
612         /* RX ring mbuf allocation failures */
613         nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
614
615         nfp_dev_stats.imissed =
616                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
617
618         nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
619
620         if (stats) {
621                 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
622                 return 0;
623         }
624         return -EINVAL;
625 }
626
627 int
628 nfp_net_stats_reset(struct rte_eth_dev *dev)
629 {
630         int i;
631         struct nfp_net_hw *hw;
632
633         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634
635         /*
636          * hw->eth_stats_base records the per counter starting point.
637          * Lets update it now
638          */
639
640         /* reading per RX ring stats */
641         for (i = 0; i < dev->data->nb_rx_queues; i++) {
642                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
643                         break;
644
645                 hw->eth_stats_base.q_ipackets[i] =
646                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
647
648                 hw->eth_stats_base.q_ibytes[i] =
649                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
650         }
651
652         /* reading per TX ring stats */
653         for (i = 0; i < dev->data->nb_tx_queues; i++) {
654                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
655                         break;
656
657                 hw->eth_stats_base.q_opackets[i] =
658                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
659
660                 hw->eth_stats_base.q_obytes[i] =
661                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
662         }
663
664         hw->eth_stats_base.ipackets =
665                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
666
667         hw->eth_stats_base.ibytes =
668                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
669
670         hw->eth_stats_base.opackets =
671                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
672
673         hw->eth_stats_base.obytes =
674                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
675
676         /* reading general device stats */
677         hw->eth_stats_base.ierrors =
678                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
679
680         hw->eth_stats_base.oerrors =
681                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
682
683         /* RX ring mbuf allocation failures */
684         dev->data->rx_mbuf_alloc_failed = 0;
685
686         hw->eth_stats_base.imissed =
687                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
688
689         return 0;
690 }
691
692 int
693 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
694 {
695         struct nfp_net_hw *hw;
696
697         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
698
699         dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
700         dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
701         dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
702         /*
703          * The maximum rx packet length (max_rx_pktlen) is set to the
704          * maximum supported frame size that the NFP can handle. This
705          * includes layer 2 headers, CRC and other metadata that can
706          * optionally be used.
707          * The maximum layer 3 MTU (max_mtu) is read from hardware,
708          * which was set by the firmware loaded onto the card.
709          */
710         dev_info->max_rx_pktlen = NFP_FRAME_SIZE_MAX;
711         dev_info->max_mtu = hw->max_mtu;
712         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
713         /* Next should change when PF support is implemented */
714         dev_info->max_mac_addrs = 1;
715
716         if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
717                 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
718
719         if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
720                 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
721                                              RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
722                                              RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
723
724         if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
725                 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
726
727         if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
728                 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
729                                              RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
730                                              RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
731
732         if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
733                 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
734
735         if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
736                 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
737
738         dev_info->default_rxconf = (struct rte_eth_rxconf) {
739                 .rx_thresh = {
740                         .pthresh = DEFAULT_RX_PTHRESH,
741                         .hthresh = DEFAULT_RX_HTHRESH,
742                         .wthresh = DEFAULT_RX_WTHRESH,
743                 },
744                 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
745                 .rx_drop_en = 0,
746         };
747
748         dev_info->default_txconf = (struct rte_eth_txconf) {
749                 .tx_thresh = {
750                         .pthresh = DEFAULT_TX_PTHRESH,
751                         .hthresh = DEFAULT_TX_HTHRESH,
752                         .wthresh = DEFAULT_TX_WTHRESH,
753                 },
754                 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
755                 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
756         };
757
758         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
759                 .nb_max = NFP_NET_MAX_RX_DESC,
760                 .nb_min = NFP_NET_MIN_RX_DESC,
761                 .nb_align = NFP_ALIGN_RING_DESC,
762         };
763
764         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
765                 .nb_max = NFP_NET_MAX_TX_DESC,
766                 .nb_min = NFP_NET_MIN_TX_DESC,
767                 .nb_align = NFP_ALIGN_RING_DESC,
768                 .nb_seg_max = NFP_TX_MAX_SEG,
769                 .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
770         };
771
772         if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
773                 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
774
775                 dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
776                                                    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
777                                                    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
778                                                    RTE_ETH_RSS_IPV6 |
779                                                    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
780                                                    RTE_ETH_RSS_NONFRAG_IPV6_UDP;
781
782                 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
783                 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
784         }
785
786         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
787                                RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
788                                RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
789
790         return 0;
791 }
792
793 const uint32_t *
794 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
795 {
796         static const uint32_t ptypes[] = {
797                 /* refers to nfp_net_set_hash() */
798                 RTE_PTYPE_INNER_L3_IPV4,
799                 RTE_PTYPE_INNER_L3_IPV6,
800                 RTE_PTYPE_INNER_L3_IPV6_EXT,
801                 RTE_PTYPE_INNER_L4_MASK,
802                 RTE_PTYPE_UNKNOWN
803         };
804
805         if (dev->rx_pkt_burst == nfp_net_recv_pkts)
806                 return ptypes;
807         return NULL;
808 }
809
810 int
811 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
812 {
813         struct rte_pci_device *pci_dev;
814         struct nfp_net_hw *hw;
815         int base = 0;
816
817         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
819
820         if (rte_intr_type_get(pci_dev->intr_handle) !=
821                                                         RTE_INTR_HANDLE_UIO)
822                 base = 1;
823
824         /* Make sure all updates are written before un-masking */
825         rte_wmb();
826         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
827                       NFP_NET_CFG_ICR_UNMASKED);
828         return 0;
829 }
830
831 int
832 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
833 {
834         struct rte_pci_device *pci_dev;
835         struct nfp_net_hw *hw;
836         int base = 0;
837
838         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
839         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
840
841         if (rte_intr_type_get(pci_dev->intr_handle) !=
842                                                         RTE_INTR_HANDLE_UIO)
843                 base = 1;
844
845         /* Make sure all updates are written before un-masking */
846         rte_wmb();
847         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
848         return 0;
849 }
850
851 static void
852 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
853 {
854         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
855         struct rte_eth_link link;
856
857         rte_eth_linkstatus_get(dev, &link);
858         if (link.link_status)
859                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
860                             dev->data->port_id, link.link_speed,
861                             link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
862                             ? "full-duplex" : "half-duplex");
863         else
864                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
865                             dev->data->port_id);
866
867         PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
868                     pci_dev->addr.domain, pci_dev->addr.bus,
869                     pci_dev->addr.devid, pci_dev->addr.function);
870 }
871
872 /* Interrupt configuration and handling */
873
874 /*
875  * nfp_net_irq_unmask - Unmask an interrupt
876  *
877  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
878  * clear the ICR for the entry.
879  */
880 static void
881 nfp_net_irq_unmask(struct rte_eth_dev *dev)
882 {
883         struct nfp_net_hw *hw;
884         struct rte_pci_device *pci_dev;
885
886         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
888
889         if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
890                 /* If MSI-X auto-masking is used, clear the entry */
891                 rte_wmb();
892                 rte_intr_ack(pci_dev->intr_handle);
893         } else {
894                 /* Make sure all updates are written before un-masking */
895                 rte_wmb();
896                 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
897                               NFP_NET_CFG_ICR_UNMASKED);
898         }
899 }
900
901 /*
902  * Interrupt handler which shall be registered for alarm callback for delayed
903  * handling specific interrupt to wait for the stable nic state. As the NIC
904  * interrupt state is not stable for nfp after link is just down, it needs
905  * to wait 4 seconds to get the stable status.
906  *
907  * @param handle   Pointer to interrupt handle.
908  * @param param    The address of parameter (struct rte_eth_dev *)
909  *
910  * @return  void
911  */
912 void
913 nfp_net_dev_interrupt_delayed_handler(void *param)
914 {
915         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
916
917         nfp_net_link_update(dev, 0);
918         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
919
920         nfp_net_dev_link_status_print(dev);
921
922         /* Unmasking */
923         nfp_net_irq_unmask(dev);
924 }
925
926 void
927 nfp_net_dev_interrupt_handler(void *param)
928 {
929         int64_t timeout;
930         struct rte_eth_link link;
931         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
932
933         PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
934
935         rte_eth_linkstatus_get(dev, &link);
936
937         nfp_net_link_update(dev, 0);
938
939         /* likely to up */
940         if (!link.link_status) {
941                 /* handle it 1 sec later, wait it being stable */
942                 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
943                 /* likely to down */
944         } else {
945                 /* handle it 4 sec later, wait it being stable */
946                 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
947         }
948
949         if (rte_eal_alarm_set(timeout * 1000,
950                               nfp_net_dev_interrupt_delayed_handler,
951                               (void *)dev) < 0) {
952                 PMD_INIT_LOG(ERR, "Error setting alarm");
953                 /* Unmasking */
954                 nfp_net_irq_unmask(dev);
955         }
956 }
957
958 int
959 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
960 {
961         struct nfp_net_hw *hw;
962
963         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
964
965         /* mtu setting is forbidden if port is started */
966         if (dev->data->dev_started) {
967                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
968                             dev->data->port_id);
969                 return -EBUSY;
970         }
971
972         /* MTU larger then current mbufsize not supported */
973         if (mtu > hw->flbufsz) {
974                 PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) not supported",
975                             mtu, hw->flbufsz);
976                 return -ERANGE;
977         }
978
979         /* writing to configuration space */
980         nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
981
982         hw->mtu = mtu;
983
984         return 0;
985 }
986
987 int
988 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
989 {
990         uint32_t new_ctrl, update;
991         struct nfp_net_hw *hw;
992         struct rte_eth_conf *dev_conf;
993         int ret;
994
995         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
996         dev_conf = &dev->data->dev_conf;
997         new_ctrl = hw->ctrl;
998
999         /*
1000          * Vlan stripping setting
1001          * Enable or disable VLAN stripping
1002          */
1003         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1004                 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1005                         new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
1006                 else
1007                         new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
1008         }
1009
1010         if (new_ctrl == hw->ctrl)
1011                 return 0;
1012
1013         update = NFP_NET_CFG_UPDATE_GEN;
1014
1015         ret = nfp_net_reconfig(hw, new_ctrl, update);
1016         if (!ret)
1017                 hw->ctrl = new_ctrl;
1018
1019         return ret;
1020 }
1021
1022 static int
1023 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1024                     struct rte_eth_rss_reta_entry64 *reta_conf,
1025                     uint16_t reta_size)
1026 {
1027         uint32_t reta, mask;
1028         int i, j;
1029         int idx, shift;
1030         struct nfp_net_hw *hw =
1031                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1032
1033         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1034                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1035                         "(%d) doesn't match the number hardware can supported "
1036                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1037                 return -EINVAL;
1038         }
1039
1040         /*
1041          * Update Redirection Table. There are 128 8bit-entries which can be
1042          * manage as 32 32bit-entries
1043          */
1044         for (i = 0; i < reta_size; i += 4) {
1045                 /* Handling 4 RSS entries per loop */
1046                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1047                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1048                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1049
1050                 if (!mask)
1051                         continue;
1052
1053                 reta = 0;
1054                 /* If all 4 entries were set, don't need read RETA register */
1055                 if (mask != 0xF)
1056                         reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1057
1058                 for (j = 0; j < 4; j++) {
1059                         if (!(mask & (0x1 << j)))
1060                                 continue;
1061                         if (mask != 0xF)
1062                                 /* Clearing the entry bits */
1063                                 reta &= ~(0xFF << (8 * j));
1064                         reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1065                 }
1066                 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
1067                               reta);
1068         }
1069         return 0;
1070 }
1071
1072 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1073 int
1074 nfp_net_reta_update(struct rte_eth_dev *dev,
1075                     struct rte_eth_rss_reta_entry64 *reta_conf,
1076                     uint16_t reta_size)
1077 {
1078         struct nfp_net_hw *hw =
1079                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1080         uint32_t update;
1081         int ret;
1082
1083         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1084                 return -EINVAL;
1085
1086         ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1087         if (ret != 0)
1088                 return ret;
1089
1090         update = NFP_NET_CFG_UPDATE_RSS;
1091
1092         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1093                 return -EIO;
1094
1095         return 0;
1096 }
1097
1098  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1099 int
1100 nfp_net_reta_query(struct rte_eth_dev *dev,
1101                    struct rte_eth_rss_reta_entry64 *reta_conf,
1102                    uint16_t reta_size)
1103 {
1104         uint8_t i, j, mask;
1105         int idx, shift;
1106         uint32_t reta;
1107         struct nfp_net_hw *hw;
1108
1109         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1110
1111         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1112                 return -EINVAL;
1113
1114         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1115                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1116                         "(%d) doesn't match the number hardware can supported "
1117                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1118                 return -EINVAL;
1119         }
1120
1121         /*
1122          * Reading Redirection Table. There are 128 8bit-entries which can be
1123          * manage as 32 32bit-entries
1124          */
1125         for (i = 0; i < reta_size; i += 4) {
1126                 /* Handling 4 RSS entries per loop */
1127                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1128                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1129                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1130
1131                 if (!mask)
1132                         continue;
1133
1134                 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
1135                                     shift);
1136                 for (j = 0; j < 4; j++) {
1137                         if (!(mask & (0x1 << j)))
1138                                 continue;
1139                         reta_conf[idx].reta[shift + j] =
1140                                 (uint8_t)((reta >> (8 * j)) & 0xF);
1141                 }
1142         }
1143         return 0;
1144 }
1145
1146 static int
1147 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1148                         struct rte_eth_rss_conf *rss_conf)
1149 {
1150         struct nfp_net_hw *hw;
1151         uint64_t rss_hf;
1152         uint32_t cfg_rss_ctrl = 0;
1153         uint8_t key;
1154         int i;
1155
1156         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1157
1158         /* Writing the key byte a byte */
1159         for (i = 0; i < rss_conf->rss_key_len; i++) {
1160                 memcpy(&key, &rss_conf->rss_key[i], 1);
1161                 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1162         }
1163
1164         rss_hf = rss_conf->rss_hf;
1165
1166         if (rss_hf & RTE_ETH_RSS_IPV4)
1167                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1168
1169         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1170                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1171
1172         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1173                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1174
1175         if (rss_hf & RTE_ETH_RSS_IPV6)
1176                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1177
1178         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1179                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1180
1181         if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1182                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1183
1184         cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1185         cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1186
1187         /* configuring where to apply the RSS hash */
1188         nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1189
1190         /* Writing the key size */
1191         nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1192
1193         return 0;
1194 }
1195
1196 int
1197 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1198                         struct rte_eth_rss_conf *rss_conf)
1199 {
1200         uint32_t update;
1201         uint64_t rss_hf;
1202         struct nfp_net_hw *hw;
1203
1204         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205
1206         rss_hf = rss_conf->rss_hf;
1207
1208         /* Checking if RSS is enabled */
1209         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1210                 if (rss_hf != 0) { /* Enable RSS? */
1211                         PMD_DRV_LOG(ERR, "RSS unsupported");
1212                         return -EINVAL;
1213                 }
1214                 return 0; /* Nothing to do */
1215         }
1216
1217         if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1218                 PMD_DRV_LOG(ERR, "hash key too long");
1219                 return -EINVAL;
1220         }
1221
1222         nfp_net_rss_hash_write(dev, rss_conf);
1223
1224         update = NFP_NET_CFG_UPDATE_RSS;
1225
1226         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1227                 return -EIO;
1228
1229         return 0;
1230 }
1231
1232 int
1233 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1234                           struct rte_eth_rss_conf *rss_conf)
1235 {
1236         uint64_t rss_hf;
1237         uint32_t cfg_rss_ctrl;
1238         uint8_t key;
1239         int i;
1240         struct nfp_net_hw *hw;
1241
1242         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1243
1244         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1245                 return -EINVAL;
1246
1247         rss_hf = rss_conf->rss_hf;
1248         cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1249
1250         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1251                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1252
1253         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1254                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1255
1256         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1257                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1258
1259         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1260                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1261
1262         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1263                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1264
1265         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1266                 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1267
1268         /* Propagate current RSS hash functions to caller */
1269         rss_conf->rss_hf = rss_hf;
1270
1271         /* Reading the key size */
1272         rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1273
1274         /* Reading the key byte a byte */
1275         for (i = 0; i < rss_conf->rss_key_len; i++) {
1276                 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1277                 memcpy(&rss_conf->rss_key[i], &key, 1);
1278         }
1279
1280         return 0;
1281 }
1282
1283 int
1284 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1285 {
1286         struct rte_eth_conf *dev_conf;
1287         struct rte_eth_rss_conf rss_conf;
1288         struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1289         uint16_t rx_queues = dev->data->nb_rx_queues;
1290         uint16_t queue;
1291         int i, j, ret;
1292
1293         PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
1294                 rx_queues);
1295
1296         nfp_reta_conf[0].mask = ~0x0;
1297         nfp_reta_conf[1].mask = ~0x0;
1298
1299         queue = 0;
1300         for (i = 0; i < 0x40; i += 8) {
1301                 for (j = i; j < (i + 8); j++) {
1302                         nfp_reta_conf[0].reta[j] = queue;
1303                         nfp_reta_conf[1].reta[j] = queue++;
1304                         queue %= rx_queues;
1305                 }
1306         }
1307         ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1308         if (ret != 0)
1309                 return ret;
1310
1311         dev_conf = &dev->data->dev_conf;
1312         if (!dev_conf) {
1313                 PMD_DRV_LOG(INFO, "wrong rss conf");
1314                 return -EINVAL;
1315         }
1316         rss_conf = dev_conf->rx_adv_conf.rss_conf;
1317
1318         ret = nfp_net_rss_hash_write(dev, &rss_conf);
1319
1320         return ret;
1321 }
1322
1323 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
1324 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
1325 /*
1326  * Local variables:
1327  * c-file-style: "Linux"
1328  * indent-tabs-mode: t
1329  * End:
1330  */