net/enic: do not overwrite admin Tx queue limit
[dpdk.git] / drivers / net / enic / enic_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8
9 #include <rte_dev.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_kvargs.h>
15 #include <rte_string_fns.h>
16
17 #include "vnic_intr.h"
18 #include "vnic_cq.h"
19 #include "vnic_wq.h"
20 #include "vnic_rq.h"
21 #include "vnic_enet.h"
22 #include "enic.h"
23
24 int enicpmd_logtype_init;
25 int enicpmd_logtype_flow;
26
27 #define PMD_INIT_LOG(level, fmt, args...) \
28         rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
29                 "%s" fmt "\n", __func__, ##args)
30
31 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
32
33 /*
34  * The set of PCI devices this driver supports
35  */
36 #define CISCO_PCI_VENDOR_ID 0x1137
37 static const struct rte_pci_id pci_id_enic_map[] = {
38         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
39         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
40         {.vendor_id = 0, /* sentinel */},
41 };
42
43 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
44
45 RTE_INIT(enicpmd_init_log);
46 static void
47 enicpmd_init_log(void)
48 {
49         enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
50         if (enicpmd_logtype_init >= 0)
51                 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
52         enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
53         if (enicpmd_logtype_flow >= 0)
54                 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
55 }
56
57 static int
58 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
59                         enum rte_filter_op filter_op, void *arg)
60 {
61         struct enic *enic = pmd_priv(eth_dev);
62         int ret = 0;
63
64         ENICPMD_FUNC_TRACE();
65         if (filter_op == RTE_ETH_FILTER_NOP)
66                 return 0;
67
68         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
69                 return -EINVAL;
70
71         switch (filter_op) {
72         case RTE_ETH_FILTER_ADD:
73         case RTE_ETH_FILTER_UPDATE:
74                 ret = enic_fdir_add_fltr(enic,
75                         (struct rte_eth_fdir_filter *)arg);
76                 break;
77
78         case RTE_ETH_FILTER_DELETE:
79                 ret = enic_fdir_del_fltr(enic,
80                         (struct rte_eth_fdir_filter *)arg);
81                 break;
82
83         case RTE_ETH_FILTER_STATS:
84                 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
85                 break;
86
87         case RTE_ETH_FILTER_FLUSH:
88                 dev_warning(enic, "unsupported operation %u", filter_op);
89                 ret = -ENOTSUP;
90                 break;
91         case RTE_ETH_FILTER_INFO:
92                 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
93                 break;
94         default:
95                 dev_err(enic, "unknown operation %u", filter_op);
96                 ret = -EINVAL;
97                 break;
98         }
99         return ret;
100 }
101
102 static int
103 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
104                      enum rte_filter_type filter_type,
105                      enum rte_filter_op filter_op,
106                      void *arg)
107 {
108         int ret = 0;
109
110         ENICPMD_FUNC_TRACE();
111
112         switch (filter_type) {
113         case RTE_ETH_FILTER_GENERIC:
114                 if (filter_op != RTE_ETH_FILTER_GET)
115                         return -EINVAL;
116                 *(const void **)arg = &enic_flow_ops;
117                 break;
118         case RTE_ETH_FILTER_FDIR:
119                 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
120                 break;
121         default:
122                 dev_warning(enic, "Filter type (%d) not supported",
123                         filter_type);
124                 ret = -EINVAL;
125                 break;
126         }
127
128         return ret;
129 }
130
131 static void enicpmd_dev_tx_queue_release(void *txq)
132 {
133         ENICPMD_FUNC_TRACE();
134
135         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
136                 return;
137
138         enic_free_wq(txq);
139 }
140
141 static int enicpmd_dev_setup_intr(struct enic *enic)
142 {
143         int ret;
144         unsigned int index;
145
146         ENICPMD_FUNC_TRACE();
147
148         /* Are we done with the init of all the queues? */
149         for (index = 0; index < enic->cq_count; index++) {
150                 if (!enic->cq[index].ctrl)
151                         break;
152         }
153         if (enic->cq_count != index)
154                 return 0;
155         for (index = 0; index < enic->wq_count; index++) {
156                 if (!enic->wq[index].ctrl)
157                         break;
158         }
159         if (enic->wq_count != index)
160                 return 0;
161         /* check start of packet (SOP) RQs only in case scatter is disabled. */
162         for (index = 0; index < enic->rq_count; index++) {
163                 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
164                         break;
165         }
166         if (enic->rq_count != index)
167                 return 0;
168
169         ret = enic_alloc_intr_resources(enic);
170         if (ret) {
171                 dev_err(enic, "alloc intr failed\n");
172                 return ret;
173         }
174         enic_init_vnic_resources(enic);
175
176         ret = enic_setup_finish(enic);
177         if (ret)
178                 dev_err(enic, "setup could not be finished\n");
179
180         return ret;
181 }
182
183 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
184         uint16_t queue_idx,
185         uint16_t nb_desc,
186         unsigned int socket_id,
187         __rte_unused const struct rte_eth_txconf *tx_conf)
188 {
189         int ret;
190         struct enic *enic = pmd_priv(eth_dev);
191
192         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
193                 return -E_RTE_SECONDARY;
194
195         ENICPMD_FUNC_TRACE();
196         RTE_ASSERT(queue_idx < enic->conf_wq_count);
197         eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
198
199         ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
200         if (ret) {
201                 dev_err(enic, "error in allocating wq\n");
202                 return ret;
203         }
204
205         return enicpmd_dev_setup_intr(enic);
206 }
207
208 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
209         uint16_t queue_idx)
210 {
211         struct enic *enic = pmd_priv(eth_dev);
212
213         ENICPMD_FUNC_TRACE();
214
215         enic_start_wq(enic, queue_idx);
216
217         return 0;
218 }
219
220 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
221         uint16_t queue_idx)
222 {
223         int ret;
224         struct enic *enic = pmd_priv(eth_dev);
225
226         ENICPMD_FUNC_TRACE();
227
228         ret = enic_stop_wq(enic, queue_idx);
229         if (ret)
230                 dev_err(enic, "error in stopping wq %d\n", queue_idx);
231
232         return ret;
233 }
234
235 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
236         uint16_t queue_idx)
237 {
238         struct enic *enic = pmd_priv(eth_dev);
239
240         ENICPMD_FUNC_TRACE();
241
242         enic_start_rq(enic, queue_idx);
243
244         return 0;
245 }
246
247 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
248         uint16_t queue_idx)
249 {
250         int ret;
251         struct enic *enic = pmd_priv(eth_dev);
252
253         ENICPMD_FUNC_TRACE();
254
255         ret = enic_stop_rq(enic, queue_idx);
256         if (ret)
257                 dev_err(enic, "error in stopping rq %d\n", queue_idx);
258
259         return ret;
260 }
261
262 static void enicpmd_dev_rx_queue_release(void *rxq)
263 {
264         ENICPMD_FUNC_TRACE();
265
266         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
267                 return;
268
269         enic_free_rq(rxq);
270 }
271
272 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
273                                            uint16_t rx_queue_id)
274 {
275         struct enic *enic = pmd_priv(dev);
276         uint32_t queue_count = 0;
277         struct vnic_cq *cq;
278         uint32_t cq_tail;
279         uint16_t cq_idx;
280         int rq_num;
281
282         rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
283         cq = &enic->cq[enic_cq_rq(enic, rq_num)];
284         cq_idx = cq->to_clean;
285
286         cq_tail = ioread32(&cq->ctrl->cq_tail);
287
288         if (cq_tail < cq_idx)
289                 cq_tail += cq->ring.desc_count;
290
291         queue_count = cq_tail - cq_idx;
292
293         return queue_count;
294 }
295
296 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
297         uint16_t queue_idx,
298         uint16_t nb_desc,
299         unsigned int socket_id,
300         const struct rte_eth_rxconf *rx_conf,
301         struct rte_mempool *mp)
302 {
303         int ret;
304         struct enic *enic = pmd_priv(eth_dev);
305
306         ENICPMD_FUNC_TRACE();
307
308         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
309                 return -E_RTE_SECONDARY;
310         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
311         eth_dev->data->rx_queues[queue_idx] =
312                 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
313
314         ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
315                             rx_conf->rx_free_thresh);
316         if (ret) {
317                 dev_err(enic, "error in allocating rq\n");
318                 return ret;
319         }
320
321         return enicpmd_dev_setup_intr(enic);
322 }
323
324 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
325 {
326         struct enic *enic = pmd_priv(eth_dev);
327         uint64_t offloads;
328
329         ENICPMD_FUNC_TRACE();
330
331         offloads = eth_dev->data->dev_conf.rxmode.offloads;
332         if (mask & ETH_VLAN_STRIP_MASK) {
333                 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
334                         enic->ig_vlan_strip_en = 1;
335                 else
336                         enic->ig_vlan_strip_en = 0;
337         }
338
339         if ((mask & ETH_VLAN_FILTER_MASK) &&
340             (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
341                 dev_warning(enic,
342                         "Configuration of VLAN filter is not supported\n");
343         }
344
345         if ((mask & ETH_VLAN_EXTEND_MASK) &&
346             (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
347                 dev_warning(enic,
348                         "Configuration of extended VLAN is not supported\n");
349         }
350
351         return enic_set_vlan_strip(enic);
352 }
353
354 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
355 {
356         int ret;
357         int mask;
358         struct enic *enic = pmd_priv(eth_dev);
359
360         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
361                 return -E_RTE_SECONDARY;
362
363         ENICPMD_FUNC_TRACE();
364         ret = enic_set_vnic_res(enic);
365         if (ret) {
366                 dev_err(enic, "Set vNIC resource num  failed, aborting\n");
367                 return ret;
368         }
369
370         enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
371                                   DEV_RX_OFFLOAD_CHECKSUM);
372         /* All vlan offload masks to apply the current settings */
373         mask = ETH_VLAN_STRIP_MASK |
374                 ETH_VLAN_FILTER_MASK |
375                 ETH_VLAN_EXTEND_MASK;
376         ret = enicpmd_vlan_offload_set(eth_dev, mask);
377         if (ret) {
378                 dev_err(enic, "Failed to configure VLAN offloads\n");
379                 return ret;
380         }
381         /*
382          * Initialize RSS with the default reta and key. If the user key is
383          * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
384          * default key.
385          */
386         return enic_init_rss_nic_cfg(enic);
387 }
388
389 /* Start the device.
390  * It returns 0 on success.
391  */
392 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
393 {
394         struct enic *enic = pmd_priv(eth_dev);
395
396         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
397                 return -E_RTE_SECONDARY;
398
399         ENICPMD_FUNC_TRACE();
400         return enic_enable(enic);
401 }
402
403 /*
404  * Stop device: disable rx and tx functions to allow for reconfiguring.
405  */
406 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
407 {
408         struct rte_eth_link link;
409         struct enic *enic = pmd_priv(eth_dev);
410
411         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
412                 return;
413
414         ENICPMD_FUNC_TRACE();
415         enic_disable(enic);
416
417         memset(&link, 0, sizeof(link));
418         rte_eth_linkstatus_set(eth_dev, &link);
419 }
420
421 /*
422  * Stop device.
423  */
424 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
425 {
426         struct enic *enic = pmd_priv(eth_dev);
427
428         ENICPMD_FUNC_TRACE();
429         enic_remove(enic);
430 }
431
432 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
433         __rte_unused int wait_to_complete)
434 {
435         struct enic *enic = pmd_priv(eth_dev);
436
437         ENICPMD_FUNC_TRACE();
438         return enic_link_update(enic);
439 }
440
441 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
442         struct rte_eth_stats *stats)
443 {
444         struct enic *enic = pmd_priv(eth_dev);
445
446         ENICPMD_FUNC_TRACE();
447         return enic_dev_stats_get(enic, stats);
448 }
449
450 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
451 {
452         struct enic *enic = pmd_priv(eth_dev);
453
454         ENICPMD_FUNC_TRACE();
455         enic_dev_stats_clear(enic);
456 }
457
458 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
459         struct rte_eth_dev_info *device_info)
460 {
461         struct enic *enic = pmd_priv(eth_dev);
462
463         ENICPMD_FUNC_TRACE();
464         /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
465         device_info->max_rx_queues = enic->conf_rq_count / 2;
466         device_info->max_tx_queues = enic->conf_wq_count;
467         device_info->min_rx_bufsize = ENIC_MIN_MTU;
468         /* "Max" mtu is not a typo. HW receives packet sizes up to the
469          * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
470          * a hint to the driver to size receive buffers accordingly so that
471          * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
472          * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
473          * ignoring vNIC mtu.
474          */
475         device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
476         device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
477         device_info->rx_offload_capa = enic->rx_offload_capa;
478         device_info->tx_offload_capa = enic->tx_offload_capa;
479         device_info->default_rxconf = (struct rte_eth_rxconf) {
480                 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
481         };
482         device_info->reta_size = enic->reta_size;
483         device_info->hash_key_size = enic->hash_key_size;
484         device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
485 }
486
487 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
488 {
489         static const uint32_t ptypes[] = {
490                 RTE_PTYPE_L2_ETHER,
491                 RTE_PTYPE_L2_ETHER_VLAN,
492                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
493                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
494                 RTE_PTYPE_L4_TCP,
495                 RTE_PTYPE_L4_UDP,
496                 RTE_PTYPE_L4_FRAG,
497                 RTE_PTYPE_L4_NONFRAG,
498                 RTE_PTYPE_UNKNOWN
499         };
500
501         if (dev->rx_pkt_burst == enic_recv_pkts)
502                 return ptypes;
503         return NULL;
504 }
505
506 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
507 {
508         struct enic *enic = pmd_priv(eth_dev);
509
510         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
511                 return;
512
513         ENICPMD_FUNC_TRACE();
514
515         enic->promisc = 1;
516         enic_add_packet_filter(enic);
517 }
518
519 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
520 {
521         struct enic *enic = pmd_priv(eth_dev);
522
523         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
524                 return;
525
526         ENICPMD_FUNC_TRACE();
527         enic->promisc = 0;
528         enic_add_packet_filter(enic);
529 }
530
531 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
532 {
533         struct enic *enic = pmd_priv(eth_dev);
534
535         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
536                 return;
537
538         ENICPMD_FUNC_TRACE();
539         enic->allmulti = 1;
540         enic_add_packet_filter(enic);
541 }
542
543 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
544 {
545         struct enic *enic = pmd_priv(eth_dev);
546
547         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
548                 return;
549
550         ENICPMD_FUNC_TRACE();
551         enic->allmulti = 0;
552         enic_add_packet_filter(enic);
553 }
554
555 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
556         struct ether_addr *mac_addr,
557         __rte_unused uint32_t index, __rte_unused uint32_t pool)
558 {
559         struct enic *enic = pmd_priv(eth_dev);
560
561         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562                 return -E_RTE_SECONDARY;
563
564         ENICPMD_FUNC_TRACE();
565         return enic_set_mac_address(enic, mac_addr->addr_bytes);
566 }
567
568 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
569 {
570         struct enic *enic = pmd_priv(eth_dev);
571
572         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
573                 return;
574
575         ENICPMD_FUNC_TRACE();
576         if (enic_del_mac_address(enic, index))
577                 dev_err(enic, "del mac addr failed\n");
578 }
579
580 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
581                                 struct ether_addr *addr)
582 {
583         struct enic *enic = pmd_priv(eth_dev);
584         int ret;
585
586         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
587                 return -E_RTE_SECONDARY;
588
589         ENICPMD_FUNC_TRACE();
590         ret = enic_del_mac_address(enic, 0);
591         if (ret)
592                 return ret;
593         return enic_set_mac_address(enic, addr->addr_bytes);
594 }
595
596 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
597 {
598         struct enic *enic = pmd_priv(eth_dev);
599
600         ENICPMD_FUNC_TRACE();
601         return enic_set_mtu(enic, mtu);
602 }
603
604 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
605                                       struct rte_eth_rss_reta_entry64
606                                       *reta_conf,
607                                       uint16_t reta_size)
608 {
609         struct enic *enic = pmd_priv(dev);
610         uint16_t i, idx, shift;
611
612         ENICPMD_FUNC_TRACE();
613         if (reta_size != ENIC_RSS_RETA_SIZE) {
614                 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
615                         reta_size, ENIC_RSS_RETA_SIZE);
616                 return -EINVAL;
617         }
618
619         for (i = 0; i < reta_size; i++) {
620                 idx = i / RTE_RETA_GROUP_SIZE;
621                 shift = i % RTE_RETA_GROUP_SIZE;
622                 if (reta_conf[idx].mask & (1ULL << shift))
623                         reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
624                                 enic->rss_cpu.cpu[i / 4].b[i % 4]);
625         }
626
627         return 0;
628 }
629
630 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
631                                        struct rte_eth_rss_reta_entry64
632                                        *reta_conf,
633                                        uint16_t reta_size)
634 {
635         struct enic *enic = pmd_priv(dev);
636         union vnic_rss_cpu rss_cpu;
637         uint16_t i, idx, shift;
638
639         ENICPMD_FUNC_TRACE();
640         if (reta_size != ENIC_RSS_RETA_SIZE) {
641                 dev_err(enic, "reta_update: wrong reta_size. given=%u"
642                         " expected=%u\n",
643                         reta_size, ENIC_RSS_RETA_SIZE);
644                 return -EINVAL;
645         }
646         /*
647          * Start with the current reta and modify it per reta_conf, as we
648          * need to push the entire reta even if we only modify one entry.
649          */
650         rss_cpu = enic->rss_cpu;
651         for (i = 0; i < reta_size; i++) {
652                 idx = i / RTE_RETA_GROUP_SIZE;
653                 shift = i % RTE_RETA_GROUP_SIZE;
654                 if (reta_conf[idx].mask & (1ULL << shift))
655                         rss_cpu.cpu[i / 4].b[i % 4] =
656                                 enic_rte_rq_idx_to_sop_idx(
657                                         reta_conf[idx].reta[shift]);
658         }
659         return enic_set_rss_reta(enic, &rss_cpu);
660 }
661
662 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
663                                        struct rte_eth_rss_conf *rss_conf)
664 {
665         struct enic *enic = pmd_priv(dev);
666
667         ENICPMD_FUNC_TRACE();
668         return enic_set_rss_conf(enic, rss_conf);
669 }
670
671 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
672                                          struct rte_eth_rss_conf *rss_conf)
673 {
674         struct enic *enic = pmd_priv(dev);
675
676         ENICPMD_FUNC_TRACE();
677         if (rss_conf == NULL)
678                 return -EINVAL;
679         if (rss_conf->rss_key != NULL &&
680             rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
681                 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
682                         " expected=%u+\n",
683                         rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
684                 return -EINVAL;
685         }
686         rss_conf->rss_hf = enic->rss_hf;
687         if (rss_conf->rss_key != NULL) {
688                 int i;
689                 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
690                         rss_conf->rss_key[i] =
691                                 enic->rss_key.key[i / 10].b[i % 10];
692                 }
693                 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
694         }
695         return 0;
696 }
697
698 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
699                                      uint16_t rx_queue_id,
700                                      struct rte_eth_rxq_info *qinfo)
701 {
702         struct enic *enic = pmd_priv(dev);
703         struct vnic_rq *rq_sop;
704         struct vnic_rq *rq_data;
705         struct rte_eth_rxconf *conf;
706         uint16_t sop_queue_idx;
707         uint16_t data_queue_idx;
708
709         ENICPMD_FUNC_TRACE();
710         sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
711         data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
712         rq_sop = &enic->rq[sop_queue_idx];
713         rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
714         qinfo->mp = rq_sop->mp;
715         qinfo->scattered_rx = rq_sop->data_queue_enable;
716         qinfo->nb_desc = rq_sop->ring.desc_count;
717         if (qinfo->scattered_rx)
718                 qinfo->nb_desc += rq_data->ring.desc_count;
719         conf = &qinfo->conf;
720         memset(conf, 0, sizeof(*conf));
721         conf->rx_free_thresh = rq_sop->rx_free_thresh;
722         conf->rx_drop_en = 1;
723         /*
724          * Except VLAN stripping (port setting), all the checksum offloads
725          * are always enabled.
726          */
727         conf->offloads = enic->rx_offload_capa;
728         if (!enic->ig_vlan_strip_en)
729                 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
730         /* rx_thresh and other fields are not applicable for enic */
731 }
732
733 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
734                                      uint16_t tx_queue_id,
735                                      struct rte_eth_txq_info *qinfo)
736 {
737         struct enic *enic = pmd_priv(dev);
738         struct vnic_wq *wq = &enic->wq[tx_queue_id];
739
740         ENICPMD_FUNC_TRACE();
741         qinfo->nb_desc = wq->ring.desc_count;
742         memset(&qinfo->conf, 0, sizeof(qinfo->conf));
743         qinfo->conf.offloads = enic->tx_offload_capa;
744         /* tx_thresh, and all the other fields are not applicable for enic */
745 }
746
747 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
748                                             uint16_t rx_queue_id)
749 {
750         struct enic *enic = pmd_priv(eth_dev);
751
752         ENICPMD_FUNC_TRACE();
753         vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
754         return 0;
755 }
756
757 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
758                                              uint16_t rx_queue_id)
759 {
760         struct enic *enic = pmd_priv(eth_dev);
761
762         ENICPMD_FUNC_TRACE();
763         vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
764         return 0;
765 }
766
767 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
768         .dev_configure        = enicpmd_dev_configure,
769         .dev_start            = enicpmd_dev_start,
770         .dev_stop             = enicpmd_dev_stop,
771         .dev_set_link_up      = NULL,
772         .dev_set_link_down    = NULL,
773         .dev_close            = enicpmd_dev_close,
774         .promiscuous_enable   = enicpmd_dev_promiscuous_enable,
775         .promiscuous_disable  = enicpmd_dev_promiscuous_disable,
776         .allmulticast_enable  = enicpmd_dev_allmulticast_enable,
777         .allmulticast_disable = enicpmd_dev_allmulticast_disable,
778         .link_update          = enicpmd_dev_link_update,
779         .stats_get            = enicpmd_dev_stats_get,
780         .stats_reset          = enicpmd_dev_stats_reset,
781         .queue_stats_mapping_set = NULL,
782         .dev_infos_get        = enicpmd_dev_info_get,
783         .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
784         .mtu_set              = enicpmd_mtu_set,
785         .vlan_filter_set      = NULL,
786         .vlan_tpid_set        = NULL,
787         .vlan_offload_set     = enicpmd_vlan_offload_set,
788         .vlan_strip_queue_set = NULL,
789         .rx_queue_start       = enicpmd_dev_rx_queue_start,
790         .rx_queue_stop        = enicpmd_dev_rx_queue_stop,
791         .tx_queue_start       = enicpmd_dev_tx_queue_start,
792         .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
793         .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
794         .rx_queue_release     = enicpmd_dev_rx_queue_release,
795         .rx_queue_count       = enicpmd_dev_rx_queue_count,
796         .rx_descriptor_done   = NULL,
797         .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
798         .tx_queue_release     = enicpmd_dev_tx_queue_release,
799         .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
800         .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
801         .rxq_info_get         = enicpmd_dev_rxq_info_get,
802         .txq_info_get         = enicpmd_dev_txq_info_get,
803         .dev_led_on           = NULL,
804         .dev_led_off          = NULL,
805         .flow_ctrl_get        = NULL,
806         .flow_ctrl_set        = NULL,
807         .priority_flow_ctrl_set = NULL,
808         .mac_addr_add         = enicpmd_add_mac_addr,
809         .mac_addr_remove      = enicpmd_remove_mac_addr,
810         .mac_addr_set         = enicpmd_set_mac_addr,
811         .filter_ctrl          = enicpmd_dev_filter_ctrl,
812         .reta_query           = enicpmd_dev_rss_reta_query,
813         .reta_update          = enicpmd_dev_rss_reta_update,
814         .rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
815         .rss_hash_update      = enicpmd_dev_rss_hash_update,
816 };
817
818 static int enic_parse_disable_overlay(__rte_unused const char *key,
819                                       const char *value,
820                                       void *opaque)
821 {
822         struct enic *enic;
823
824         enic = (struct enic *)opaque;
825         if (strcmp(value, "0") == 0) {
826                 enic->disable_overlay = false;
827         } else if (strcmp(value, "1") == 0) {
828                 enic->disable_overlay = true;
829         } else {
830                 dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
831                         ": expected=0|1 given=%s\n", value);
832                 return -EINVAL;
833         }
834         return 0;
835 }
836
837 static int enic_check_devargs(struct rte_eth_dev *dev)
838 {
839         static const char *const valid_keys[] = {
840                 ENIC_DEVARG_DISABLE_OVERLAY, NULL};
841         struct enic *enic = pmd_priv(dev);
842         struct rte_kvargs *kvlist;
843
844         ENICPMD_FUNC_TRACE();
845
846         enic->disable_overlay = false;
847         if (!dev->device->devargs)
848                 return 0;
849         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
850         if (!kvlist)
851                 return -EINVAL;
852         if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
853                                enic_parse_disable_overlay, enic) < 0) {
854                 rte_kvargs_free(kvlist);
855                 return -EINVAL;
856         }
857         rte_kvargs_free(kvlist);
858         return 0;
859 }
860
861 struct enic *enicpmd_list_head = NULL;
862 /* Initialize the driver
863  * It returns 0 on success.
864  */
865 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
866 {
867         struct rte_pci_device *pdev;
868         struct rte_pci_addr *addr;
869         struct enic *enic = pmd_priv(eth_dev);
870         int err;
871
872         ENICPMD_FUNC_TRACE();
873
874         enic->port_id = eth_dev->data->port_id;
875         enic->rte_dev = eth_dev;
876         eth_dev->dev_ops = &enicpmd_eth_dev_ops;
877         eth_dev->rx_pkt_burst = &enic_recv_pkts;
878         eth_dev->tx_pkt_burst = &enic_xmit_pkts;
879         eth_dev->tx_pkt_prepare = &enic_prep_pkts;
880
881         pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
882         rte_eth_copy_pci_info(eth_dev, pdev);
883         enic->pdev = pdev;
884         addr = &pdev->addr;
885
886         snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
887                 addr->domain, addr->bus, addr->devid, addr->function);
888
889         err = enic_check_devargs(eth_dev);
890         if (err)
891                 return err;
892         return enic_probe(enic);
893 }
894
895 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
896         struct rte_pci_device *pci_dev)
897 {
898         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
899                 eth_enicpmd_dev_init);
900 }
901
902 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
903 {
904         return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
905 }
906
907 static struct rte_pci_driver rte_enic_pmd = {
908         .id_table = pci_id_enic_map,
909         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
910         .probe = eth_enic_pci_probe,
911         .remove = eth_enic_pci_remove,
912 };
913
914 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
915 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
916 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
917 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
918                               ENIC_DEVARG_DISABLE_OVERLAY "=<0|1> ");