net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / nfp / nfp_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_common.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Common files
14  */
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_dev.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
32
33 #include "nfpcore/nfp_cpp.h"
34 #include "nfpcore/nfp_nffw.h"
35 #include "nfpcore/nfp_hwinfo.h"
36 #include "nfpcore/nfp_mip.h"
37 #include "nfpcore/nfp_rtsym.h"
38 #include "nfpcore/nfp_nsp.h"
39
40 #include "nfp_common.h"
41 #include "nfp_rxtx.h"
42 #include "nfp_logs.h"
43 #include "nfp_ctrl.h"
44 #include "nfp_cpp_bridge.h"
45
46 #include <sys/types.h>
47 #include <sys/socket.h>
48 #include <sys/un.h>
49 #include <unistd.h>
50 #include <stdio.h>
51 #include <sys/ioctl.h>
52 #include <errno.h>
53
54 static int
55 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
56 {
57         int cnt;
58         uint32_t new;
59         struct timespec wait;
60
61         PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
62                     hw->qcp_cfg);
63
64         if (hw->qcp_cfg == NULL)
65                 rte_panic("Bad configuration queue pointer\n");
66
67         nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
68
69         wait.tv_sec = 0;
70         wait.tv_nsec = 1000000;
71
72         PMD_DRV_LOG(DEBUG, "Polling for update ack...");
73
74         /* Poll update field, waiting for NFP to ack the config */
75         for (cnt = 0; ; cnt++) {
76                 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
77                 if (new == 0)
78                         break;
79                 if (new & NFP_NET_CFG_UPDATE_ERR) {
80                         PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
81                         return -1;
82                 }
83                 if (cnt >= NFP_NET_POLL_TIMEOUT) {
84                         PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
85                                           " %dms", update, cnt);
86                         rte_panic("Exiting\n");
87                 }
88                 nanosleep(&wait, 0); /* waiting for a 1ms */
89         }
90         PMD_DRV_LOG(DEBUG, "Ack DONE");
91         return 0;
92 }
93
94 /*
95  * Reconfigure the NIC
96  * @nn:    device to reconfigure
97  * @ctrl:    The value for the ctrl field in the BAR config
98  * @update:  The value for the update field in the BAR config
99  *
100  * Write the update word to the BAR and ping the reconfig queue. Then poll
101  * until the firmware has acknowledged the update by zeroing the update word.
102  */
103 int
104 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
105 {
106         uint32_t err;
107
108         PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
109                     ctrl, update);
110
111         rte_spinlock_lock(&hw->reconfig_lock);
112
113         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
114         nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
115
116         rte_wmb();
117
118         err = __nfp_net_reconfig(hw, update);
119
120         rte_spinlock_unlock(&hw->reconfig_lock);
121
122         if (!err)
123                 return 0;
124
125         /*
126          * Reconfig errors imply situations where they can be handled.
127          * Otherwise, rte_panic is called inside __nfp_net_reconfig
128          */
129         PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
130                      ctrl, update);
131         return -EIO;
132 }
133
134 /*
135  * Configure an Ethernet device. This function must be invoked first
136  * before any other function in the Ethernet API. This function can
137  * also be re-invoked when a device is in the stopped state.
138  */
139 int
140 nfp_net_configure(struct rte_eth_dev *dev)
141 {
142         struct rte_eth_conf *dev_conf;
143         struct rte_eth_rxmode *rxmode;
144         struct rte_eth_txmode *txmode;
145         struct nfp_net_hw *hw;
146
147         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
148
149         /*
150          * A DPDK app sends info about how many queues to use and how
151          * those queues need to be configured. This is used by the
152          * DPDK core and it makes sure no more queues than those
153          * advertised by the driver are requested. This function is
154          * called after that internal process
155          */
156
157         PMD_INIT_LOG(DEBUG, "Configure");
158
159         dev_conf = &dev->data->dev_conf;
160         rxmode = &dev_conf->rxmode;
161         txmode = &dev_conf->txmode;
162
163         if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
164                 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
165
166         /* Checking TX mode */
167         if (txmode->mq_mode) {
168                 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
169                 return -EINVAL;
170         }
171
172         /* Checking RX mode */
173         if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
174             !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
175                 PMD_INIT_LOG(INFO, "RSS not supported");
176                 return -EINVAL;
177         }
178
179         return 0;
180 }
181
182 void
183 nfp_net_enable_queues(struct rte_eth_dev *dev)
184 {
185         struct nfp_net_hw *hw;
186         uint64_t enabled_queues = 0;
187         int i;
188
189         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190
191         /* Enabling the required TX queues in the device */
192         for (i = 0; i < dev->data->nb_tx_queues; i++)
193                 enabled_queues |= (1 << i);
194
195         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
196
197         enabled_queues = 0;
198
199         /* Enabling the required RX queues in the device */
200         for (i = 0; i < dev->data->nb_rx_queues; i++)
201                 enabled_queues |= (1 << i);
202
203         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
204 }
205
206 void
207 nfp_net_disable_queues(struct rte_eth_dev *dev)
208 {
209         struct nfp_net_hw *hw;
210         uint32_t new_ctrl, update = 0;
211
212         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
213
214         nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
215         nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
216
217         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
218         update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
219                  NFP_NET_CFG_UPDATE_MSIX;
220
221         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
222                 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
223
224         /* If an error when reconfig we avoid to change hw state */
225         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
226                 return;
227
228         hw->ctrl = new_ctrl;
229 }
230
231 void
232 nfp_net_params_setup(struct nfp_net_hw *hw)
233 {
234         nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
235         nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
236 }
237
238 void
239 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
240 {
241         hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
242 }
243
244 #define ETH_ADDR_LEN    6
245
246 void
247 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
248 {
249         int i;
250
251         for (i = 0; i < ETH_ADDR_LEN; i++)
252                 dst[i] = src[i];
253 }
254
255 void
256 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
257 {
258         uint32_t mac0 = *(uint32_t *)mac;
259         uint16_t mac1;
260
261         nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
262
263         mac += 4;
264         mac1 = *(uint16_t *)mac;
265         nn_writew(rte_cpu_to_be_16(mac1),
266                   hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
267 }
268
269 int
270 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
271 {
272         struct nfp_net_hw *hw;
273         uint32_t update, ctrl;
274
275         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
276         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
277             !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
278                 PMD_INIT_LOG(INFO, "MAC address unable to change when"
279                                   " port enabled");
280                 return -EBUSY;
281         }
282
283         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
284             !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
285                 return -EBUSY;
286
287         /* Writing new MAC to the specific port BAR address */
288         nfp_net_write_mac(hw, (uint8_t *)mac_addr);
289
290         /* Signal the NIC about the change */
291         update = NFP_NET_CFG_UPDATE_MACADDR;
292         ctrl = hw->ctrl;
293         if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
294             (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
295                 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
296         if (nfp_net_reconfig(hw, ctrl, update) < 0) {
297                 PMD_INIT_LOG(INFO, "MAC address update failed");
298                 return -EIO;
299         }
300         return 0;
301 }
302
303 int
304 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
305                            struct rte_intr_handle *intr_handle)
306 {
307         struct nfp_net_hw *hw;
308         int i;
309
310         if (!intr_handle->intr_vec) {
311                 intr_handle->intr_vec =
312                         rte_zmalloc("intr_vec",
313                                     dev->data->nb_rx_queues * sizeof(int), 0);
314                 if (!intr_handle->intr_vec) {
315                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
316                                      " intr_vec", dev->data->nb_rx_queues);
317                         return -ENOMEM;
318                 }
319         }
320
321         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
322
323         if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
324                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
325                 /* UIO just supports one queue and no LSC*/
326                 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
327                 intr_handle->intr_vec[0] = 0;
328         } else {
329                 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
330                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
331                         /*
332                          * The first msix vector is reserved for non
333                          * efd interrupts
334                         */
335                         nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
336                         intr_handle->intr_vec[i] = i + 1;
337                         PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
338                                             intr_handle->intr_vec[i]);
339                 }
340         }
341
342         /* Avoiding TX interrupts */
343         hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
344         return 0;
345 }
346
347 uint32_t
348 nfp_check_offloads(struct rte_eth_dev *dev)
349 {
350         struct nfp_net_hw *hw;
351         struct rte_eth_conf *dev_conf;
352         struct rte_eth_rxmode *rxmode;
353         struct rte_eth_txmode *txmode;
354         uint32_t ctrl = 0;
355
356         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
357
358         dev_conf = &dev->data->dev_conf;
359         rxmode = &dev_conf->rxmode;
360         txmode = &dev_conf->txmode;
361
362         if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
363                 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
364                         ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
365         }
366
367         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
368                 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
369                         ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
370         }
371
372         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
373                 hw->mtu = rxmode->max_rx_pkt_len;
374
375         if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
376                 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
377
378         /* L2 broadcast */
379         if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
380                 ctrl |= NFP_NET_CFG_CTRL_L2BC;
381
382         /* L2 multicast */
383         if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
384                 ctrl |= NFP_NET_CFG_CTRL_L2MC;
385
386         /* TX checksum offload */
387         if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
388             txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
389             txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
390                 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
391
392         /* LSO offload */
393         if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
394                 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
395                         ctrl |= NFP_NET_CFG_CTRL_LSO;
396                 else
397                         ctrl |= NFP_NET_CFG_CTRL_LSO2;
398         }
399
400         /* RX gather */
401         if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
402                 ctrl |= NFP_NET_CFG_CTRL_GATHER;
403
404         return ctrl;
405 }
406
407 int
408 nfp_net_promisc_enable(struct rte_eth_dev *dev)
409 {
410         uint32_t new_ctrl, update = 0;
411         struct nfp_net_hw *hw;
412         int ret;
413
414         PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
415
416         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
417
418         if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
419                 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
420                 return -ENOTSUP;
421         }
422
423         if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
424                 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
425                 return 0;
426         }
427
428         new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
429         update = NFP_NET_CFG_UPDATE_GEN;
430
431         /*
432          * DPDK sets promiscuous mode on just after this call assuming
433          * it can not fail ...
434          */
435         ret = nfp_net_reconfig(hw, new_ctrl, update);
436         if (ret < 0)
437                 return ret;
438
439         hw->ctrl = new_ctrl;
440
441         return 0;
442 }
443
444 int
445 nfp_net_promisc_disable(struct rte_eth_dev *dev)
446 {
447         uint32_t new_ctrl, update = 0;
448         struct nfp_net_hw *hw;
449         int ret;
450
451         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
452
453         if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
454                 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
455                 return 0;
456         }
457
458         new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
459         update = NFP_NET_CFG_UPDATE_GEN;
460
461         /*
462          * DPDK sets promiscuous mode off just before this call
463          * assuming it can not fail ...
464          */
465         ret = nfp_net_reconfig(hw, new_ctrl, update);
466         if (ret < 0)
467                 return ret;
468
469         hw->ctrl = new_ctrl;
470
471         return 0;
472 }
473
474 /*
475  * return 0 means link status changed, -1 means not changed
476  *
477  * Wait to complete is needed as it can take up to 9 seconds to get the Link
478  * status.
479  */
480 int
481 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
482 {
483         struct nfp_net_hw *hw;
484         struct rte_eth_link link;
485         uint32_t nn_link_status;
486         int ret;
487
488         static const uint32_t ls_to_ethtool[] = {
489                 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
490                 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
491                 [NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
492                 [NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
493                 [NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
494                 [NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
495                 [NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
496                 [NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
497         };
498
499         PMD_DRV_LOG(DEBUG, "Link update");
500
501         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
502
503         nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
504
505         memset(&link, 0, sizeof(struct rte_eth_link));
506
507         if (nn_link_status & NFP_NET_CFG_STS_LINK)
508                 link.link_status = ETH_LINK_UP;
509
510         link.link_duplex = ETH_LINK_FULL_DUPLEX;
511
512         nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
513                          NFP_NET_CFG_STS_LINK_RATE_MASK;
514
515         if (nn_link_status >= RTE_DIM(ls_to_ethtool))
516                 link.link_speed = ETH_SPEED_NUM_NONE;
517         else
518                 link.link_speed = ls_to_ethtool[nn_link_status];
519
520         ret = rte_eth_linkstatus_set(dev, &link);
521         if (ret == 0) {
522                 if (link.link_status)
523                         PMD_DRV_LOG(INFO, "NIC Link is Up");
524                 else
525                         PMD_DRV_LOG(INFO, "NIC Link is Down");
526         }
527         return ret;
528 }
529
530 int
531 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
532 {
533         int i;
534         struct nfp_net_hw *hw;
535         struct rte_eth_stats nfp_dev_stats;
536
537         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
538
539         /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
540
541         memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
542
543         /* reading per RX ring stats */
544         for (i = 0; i < dev->data->nb_rx_queues; i++) {
545                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
546                         break;
547
548                 nfp_dev_stats.q_ipackets[i] =
549                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
550
551                 nfp_dev_stats.q_ipackets[i] -=
552                         hw->eth_stats_base.q_ipackets[i];
553
554                 nfp_dev_stats.q_ibytes[i] =
555                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
556
557                 nfp_dev_stats.q_ibytes[i] -=
558                         hw->eth_stats_base.q_ibytes[i];
559         }
560
561         /* reading per TX ring stats */
562         for (i = 0; i < dev->data->nb_tx_queues; i++) {
563                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
564                         break;
565
566                 nfp_dev_stats.q_opackets[i] =
567                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
568
569                 nfp_dev_stats.q_opackets[i] -=
570                         hw->eth_stats_base.q_opackets[i];
571
572                 nfp_dev_stats.q_obytes[i] =
573                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
574
575                 nfp_dev_stats.q_obytes[i] -=
576                         hw->eth_stats_base.q_obytes[i];
577         }
578
579         nfp_dev_stats.ipackets =
580                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
581
582         nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
583
584         nfp_dev_stats.ibytes =
585                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
586
587         nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
588
589         nfp_dev_stats.opackets =
590                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
591
592         nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
593
594         nfp_dev_stats.obytes =
595                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
596
597         nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
598
599         /* reading general device stats */
600         nfp_dev_stats.ierrors =
601                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
602
603         nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
604
605         nfp_dev_stats.oerrors =
606                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
607
608         nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
609
610         /* RX ring mbuf allocation failures */
611         nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
612
613         nfp_dev_stats.imissed =
614                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
615
616         nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
617
618         if (stats) {
619                 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
620                 return 0;
621         }
622         return -EINVAL;
623 }
624
625 int
626 nfp_net_stats_reset(struct rte_eth_dev *dev)
627 {
628         int i;
629         struct nfp_net_hw *hw;
630
631         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
632
633         /*
634          * hw->eth_stats_base records the per counter starting point.
635          * Lets update it now
636          */
637
638         /* reading per RX ring stats */
639         for (i = 0; i < dev->data->nb_rx_queues; i++) {
640                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
641                         break;
642
643                 hw->eth_stats_base.q_ipackets[i] =
644                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
645
646                 hw->eth_stats_base.q_ibytes[i] =
647                         nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
648         }
649
650         /* reading per TX ring stats */
651         for (i = 0; i < dev->data->nb_tx_queues; i++) {
652                 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
653                         break;
654
655                 hw->eth_stats_base.q_opackets[i] =
656                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
657
658                 hw->eth_stats_base.q_obytes[i] =
659                         nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
660         }
661
662         hw->eth_stats_base.ipackets =
663                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
664
665         hw->eth_stats_base.ibytes =
666                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
667
668         hw->eth_stats_base.opackets =
669                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
670
671         hw->eth_stats_base.obytes =
672                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
673
674         /* reading general device stats */
675         hw->eth_stats_base.ierrors =
676                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
677
678         hw->eth_stats_base.oerrors =
679                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
680
681         /* RX ring mbuf allocation failures */
682         dev->data->rx_mbuf_alloc_failed = 0;
683
684         hw->eth_stats_base.imissed =
685                 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
686
687         return 0;
688 }
689
690 int
691 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
692 {
693         struct nfp_net_hw *hw;
694
695         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
696
697         dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
698         dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
699         dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
700         dev_info->max_rx_pktlen = hw->max_mtu;
701         /* Next should change when PF support is implemented */
702         dev_info->max_mac_addrs = 1;
703
704         if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
705                 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
706
707         if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
708                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
709                                              DEV_RX_OFFLOAD_UDP_CKSUM |
710                                              DEV_RX_OFFLOAD_TCP_CKSUM;
711
712         if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
713                 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
714
715         if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
716                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
717                                              DEV_TX_OFFLOAD_UDP_CKSUM |
718                                              DEV_TX_OFFLOAD_TCP_CKSUM;
719
720         if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
721                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
722
723         if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
724                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
725
726         dev_info->default_rxconf = (struct rte_eth_rxconf) {
727                 .rx_thresh = {
728                         .pthresh = DEFAULT_RX_PTHRESH,
729                         .hthresh = DEFAULT_RX_HTHRESH,
730                         .wthresh = DEFAULT_RX_WTHRESH,
731                 },
732                 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
733                 .rx_drop_en = 0,
734         };
735
736         dev_info->default_txconf = (struct rte_eth_txconf) {
737                 .tx_thresh = {
738                         .pthresh = DEFAULT_TX_PTHRESH,
739                         .hthresh = DEFAULT_TX_HTHRESH,
740                         .wthresh = DEFAULT_TX_WTHRESH,
741                 },
742                 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
743                 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
744         };
745
746         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
747                 .nb_max = NFP_NET_MAX_RX_DESC,
748                 .nb_min = NFP_NET_MIN_RX_DESC,
749                 .nb_align = NFP_ALIGN_RING_DESC,
750         };
751
752         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
753                 .nb_max = NFP_NET_MAX_TX_DESC,
754                 .nb_min = NFP_NET_MIN_TX_DESC,
755                 .nb_align = NFP_ALIGN_RING_DESC,
756                 .nb_seg_max = NFP_TX_MAX_SEG,
757                 .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
758         };
759
760         /* All NFP devices support jumbo frames */
761         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
762
763         if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
764                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
765
766                 dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
767                                                    ETH_RSS_NONFRAG_IPV4_TCP |
768                                                    ETH_RSS_NONFRAG_IPV4_UDP |
769                                                    ETH_RSS_IPV6 |
770                                                    ETH_RSS_NONFRAG_IPV6_TCP |
771                                                    ETH_RSS_NONFRAG_IPV6_UDP;
772
773                 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
774                 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
775         }
776
777         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
778                                ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
779                                ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
780
781         return 0;
782 }
783
784 const uint32_t *
785 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
786 {
787         static const uint32_t ptypes[] = {
788                 /* refers to nfp_net_set_hash() */
789                 RTE_PTYPE_INNER_L3_IPV4,
790                 RTE_PTYPE_INNER_L3_IPV6,
791                 RTE_PTYPE_INNER_L3_IPV6_EXT,
792                 RTE_PTYPE_INNER_L4_MASK,
793                 RTE_PTYPE_UNKNOWN
794         };
795
796         if (dev->rx_pkt_burst == nfp_net_recv_pkts)
797                 return ptypes;
798         return NULL;
799 }
800
801 int
802 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
803 {
804         struct rte_pci_device *pci_dev;
805         struct nfp_net_hw *hw;
806         int base = 0;
807
808         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
809         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
810
811         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
812                 base = 1;
813
814         /* Make sure all updates are written before un-masking */
815         rte_wmb();
816         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
817                       NFP_NET_CFG_ICR_UNMASKED);
818         return 0;
819 }
820
821 int
822 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
823 {
824         struct rte_pci_device *pci_dev;
825         struct nfp_net_hw *hw;
826         int base = 0;
827
828         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
829         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
830
831         if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
832                 base = 1;
833
834         /* Make sure all updates are written before un-masking */
835         rte_wmb();
836         nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
837         return 0;
838 }
839
840 static void
841 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
842 {
843         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
844         struct rte_eth_link link;
845
846         rte_eth_linkstatus_get(dev, &link);
847         if (link.link_status)
848                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
849                             dev->data->port_id, link.link_speed,
850                             link.link_duplex == ETH_LINK_FULL_DUPLEX
851                             ? "full-duplex" : "half-duplex");
852         else
853                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
854                             dev->data->port_id);
855
856         PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
857                     pci_dev->addr.domain, pci_dev->addr.bus,
858                     pci_dev->addr.devid, pci_dev->addr.function);
859 }
860
861 /* Interrupt configuration and handling */
862
863 /*
864  * nfp_net_irq_unmask - Unmask an interrupt
865  *
866  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
867  * clear the ICR for the entry.
868  */
869 static void
870 nfp_net_irq_unmask(struct rte_eth_dev *dev)
871 {
872         struct nfp_net_hw *hw;
873         struct rte_pci_device *pci_dev;
874
875         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
876         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
877
878         if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
879                 /* If MSI-X auto-masking is used, clear the entry */
880                 rte_wmb();
881                 rte_intr_ack(&pci_dev->intr_handle);
882         } else {
883                 /* Make sure all updates are written before un-masking */
884                 rte_wmb();
885                 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
886                               NFP_NET_CFG_ICR_UNMASKED);
887         }
888 }
889
890 /*
891  * Interrupt handler which shall be registered for alarm callback for delayed
892  * handling specific interrupt to wait for the stable nic state. As the NIC
893  * interrupt state is not stable for nfp after link is just down, it needs
894  * to wait 4 seconds to get the stable status.
895  *
896  * @param handle   Pointer to interrupt handle.
897  * @param param    The address of parameter (struct rte_eth_dev *)
898  *
899  * @return  void
900  */
901 void
902 nfp_net_dev_interrupt_delayed_handler(void *param)
903 {
904         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
905
906         nfp_net_link_update(dev, 0);
907         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
908
909         nfp_net_dev_link_status_print(dev);
910
911         /* Unmasking */
912         nfp_net_irq_unmask(dev);
913 }
914
915 void
916 nfp_net_dev_interrupt_handler(void *param)
917 {
918         int64_t timeout;
919         struct rte_eth_link link;
920         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
921
922         PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
923
924         rte_eth_linkstatus_get(dev, &link);
925
926         nfp_net_link_update(dev, 0);
927
928         /* likely to up */
929         if (!link.link_status) {
930                 /* handle it 1 sec later, wait it being stable */
931                 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
932                 /* likely to down */
933         } else {
934                 /* handle it 4 sec later, wait it being stable */
935                 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
936         }
937
938         if (rte_eal_alarm_set(timeout * 1000,
939                               nfp_net_dev_interrupt_delayed_handler,
940                               (void *)dev) < 0) {
941                 PMD_INIT_LOG(ERR, "Error setting alarm");
942                 /* Unmasking */
943                 nfp_net_irq_unmask(dev);
944         }
945 }
946
947 int
948 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
949 {
950         struct nfp_net_hw *hw;
951
952         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
953
954         /* check that mtu is within the allowed range */
955         if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
956                 return -EINVAL;
957
958         /* mtu setting is forbidden if port is started */
959         if (dev->data->dev_started) {
960                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
961                             dev->data->port_id);
962                 return -EBUSY;
963         }
964
965         /* switch to jumbo mode if needed */
966         if ((uint32_t)mtu > RTE_ETHER_MTU)
967                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
968         else
969                 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
970
971         /* update max frame size */
972         dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
973
974         /* writing to configuration space */
975         nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
976
977         hw->mtu = mtu;
978
979         return 0;
980 }
981
982 int
983 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
984 {
985         uint32_t new_ctrl, update;
986         struct nfp_net_hw *hw;
987         int ret;
988
989         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
990         new_ctrl = 0;
991
992         /* Enable vlan strip if it is not configured yet */
993         if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
994             !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
995                 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
996
997         /* Disable vlan strip just if it is configured */
998         if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
999             (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
1000                 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
1001
1002         if (new_ctrl == 0)
1003                 return 0;
1004
1005         update = NFP_NET_CFG_UPDATE_GEN;
1006
1007         ret = nfp_net_reconfig(hw, new_ctrl, update);
1008         if (!ret)
1009                 hw->ctrl = new_ctrl;
1010
1011         return ret;
1012 }
1013
1014 static int
1015 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1016                     struct rte_eth_rss_reta_entry64 *reta_conf,
1017                     uint16_t reta_size)
1018 {
1019         uint32_t reta, mask;
1020         int i, j;
1021         int idx, shift;
1022         struct nfp_net_hw *hw =
1023                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1024
1025         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1026                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1027                         "(%d) doesn't match the number hardware can supported "
1028                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1029                 return -EINVAL;
1030         }
1031
1032         /*
1033          * Update Redirection Table. There are 128 8bit-entries which can be
1034          * manage as 32 32bit-entries
1035          */
1036         for (i = 0; i < reta_size; i += 4) {
1037                 /* Handling 4 RSS entries per loop */
1038                 idx = i / RTE_RETA_GROUP_SIZE;
1039                 shift = i % RTE_RETA_GROUP_SIZE;
1040                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1041
1042                 if (!mask)
1043                         continue;
1044
1045                 reta = 0;
1046                 /* If all 4 entries were set, don't need read RETA register */
1047                 if (mask != 0xF)
1048                         reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1049
1050                 for (j = 0; j < 4; j++) {
1051                         if (!(mask & (0x1 << j)))
1052                                 continue;
1053                         if (mask != 0xF)
1054                                 /* Clearing the entry bits */
1055                                 reta &= ~(0xFF << (8 * j));
1056                         reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1057                 }
1058                 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
1059                               reta);
1060         }
1061         return 0;
1062 }
1063
1064 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1065 int
1066 nfp_net_reta_update(struct rte_eth_dev *dev,
1067                     struct rte_eth_rss_reta_entry64 *reta_conf,
1068                     uint16_t reta_size)
1069 {
1070         struct nfp_net_hw *hw =
1071                 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072         uint32_t update;
1073         int ret;
1074
1075         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1076                 return -EINVAL;
1077
1078         ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1079         if (ret != 0)
1080                 return ret;
1081
1082         update = NFP_NET_CFG_UPDATE_RSS;
1083
1084         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1085                 return -EIO;
1086
1087         return 0;
1088 }
1089
1090  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1091 int
1092 nfp_net_reta_query(struct rte_eth_dev *dev,
1093                    struct rte_eth_rss_reta_entry64 *reta_conf,
1094                    uint16_t reta_size)
1095 {
1096         uint8_t i, j, mask;
1097         int idx, shift;
1098         uint32_t reta;
1099         struct nfp_net_hw *hw;
1100
1101         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1102
1103         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1104                 return -EINVAL;
1105
1106         if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1107                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1108                         "(%d) doesn't match the number hardware can supported "
1109                         "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1110                 return -EINVAL;
1111         }
1112
1113         /*
1114          * Reading Redirection Table. There are 128 8bit-entries which can be
1115          * manage as 32 32bit-entries
1116          */
1117         for (i = 0; i < reta_size; i += 4) {
1118                 /* Handling 4 RSS entries per loop */
1119                 idx = i / RTE_RETA_GROUP_SIZE;
1120                 shift = i % RTE_RETA_GROUP_SIZE;
1121                 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1122
1123                 if (!mask)
1124                         continue;
1125
1126                 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
1127                                     shift);
1128                 for (j = 0; j < 4; j++) {
1129                         if (!(mask & (0x1 << j)))
1130                                 continue;
1131                         reta_conf[idx].reta[shift + j] =
1132                                 (uint8_t)((reta >> (8 * j)) & 0xF);
1133                 }
1134         }
1135         return 0;
1136 }
1137
1138 static int
1139 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1140                         struct rte_eth_rss_conf *rss_conf)
1141 {
1142         struct nfp_net_hw *hw;
1143         uint64_t rss_hf;
1144         uint32_t cfg_rss_ctrl = 0;
1145         uint8_t key;
1146         int i;
1147
1148         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149
1150         /* Writing the key byte a byte */
1151         for (i = 0; i < rss_conf->rss_key_len; i++) {
1152                 memcpy(&key, &rss_conf->rss_key[i], 1);
1153                 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1154         }
1155
1156         rss_hf = rss_conf->rss_hf;
1157
1158         if (rss_hf & ETH_RSS_IPV4)
1159                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1160
1161         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1162                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1163
1164         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1165                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1166
1167         if (rss_hf & ETH_RSS_IPV6)
1168                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1169
1170         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1171                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1172
1173         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1174                 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1175
1176         cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1177         cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1178
1179         /* configuring where to apply the RSS hash */
1180         nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1181
1182         /* Writing the key size */
1183         nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1184
1185         return 0;
1186 }
1187
1188 int
1189 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1190                         struct rte_eth_rss_conf *rss_conf)
1191 {
1192         uint32_t update;
1193         uint64_t rss_hf;
1194         struct nfp_net_hw *hw;
1195
1196         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1197
1198         rss_hf = rss_conf->rss_hf;
1199
1200         /* Checking if RSS is enabled */
1201         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1202                 if (rss_hf != 0) { /* Enable RSS? */
1203                         PMD_DRV_LOG(ERR, "RSS unsupported");
1204                         return -EINVAL;
1205                 }
1206                 return 0; /* Nothing to do */
1207         }
1208
1209         if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1210                 PMD_DRV_LOG(ERR, "hash key too long");
1211                 return -EINVAL;
1212         }
1213
1214         nfp_net_rss_hash_write(dev, rss_conf);
1215
1216         update = NFP_NET_CFG_UPDATE_RSS;
1217
1218         if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1219                 return -EIO;
1220
1221         return 0;
1222 }
1223
1224 int
1225 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1226                           struct rte_eth_rss_conf *rss_conf)
1227 {
1228         uint64_t rss_hf;
1229         uint32_t cfg_rss_ctrl;
1230         uint8_t key;
1231         int i;
1232         struct nfp_net_hw *hw;
1233
1234         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235
1236         if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1237                 return -EINVAL;
1238
1239         rss_hf = rss_conf->rss_hf;
1240         cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1241
1242         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1243                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
1244
1245         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1246                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1247
1248         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1249                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1250
1251         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1252                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1253
1254         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1255                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1256
1257         if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1258                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
1259
1260         /* Propagate current RSS hash functions to caller */
1261         rss_conf->rss_hf = rss_hf;
1262
1263         /* Reading the key size */
1264         rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1265
1266         /* Reading the key byte a byte */
1267         for (i = 0; i < rss_conf->rss_key_len; i++) {
1268                 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1269                 memcpy(&rss_conf->rss_key[i], &key, 1);
1270         }
1271
1272         return 0;
1273 }
1274
1275 int
1276 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1277 {
1278         struct rte_eth_conf *dev_conf;
1279         struct rte_eth_rss_conf rss_conf;
1280         struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1281         uint16_t rx_queues = dev->data->nb_rx_queues;
1282         uint16_t queue;
1283         int i, j, ret;
1284
1285         PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
1286                 rx_queues);
1287
1288         nfp_reta_conf[0].mask = ~0x0;
1289         nfp_reta_conf[1].mask = ~0x0;
1290
1291         queue = 0;
1292         for (i = 0; i < 0x40; i += 8) {
1293                 for (j = i; j < (i + 8); j++) {
1294                         nfp_reta_conf[0].reta[j] = queue;
1295                         nfp_reta_conf[1].reta[j] = queue++;
1296                         queue %= rx_queues;
1297                 }
1298         }
1299         ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1300         if (ret != 0)
1301                 return ret;
1302
1303         dev_conf = &dev->data->dev_conf;
1304         if (!dev_conf) {
1305                 PMD_DRV_LOG(INFO, "wrong rss conf");
1306                 return -EINVAL;
1307         }
1308         rss_conf = dev_conf->rx_adv_conf.rss_conf;
1309
1310         ret = nfp_net_rss_hash_write(dev, &rss_conf);
1311
1312         return ret;
1313 }
1314
1315 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
1316 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
1317 /*
1318  * Local variables:
1319  * c-file-style: "Linux"
1320  * indent-tabs-mode: t
1321  * End:
1322  */