net/enic: heed the requested max Rx packet size
[dpdk.git] / drivers / net / enic / enic_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8
9 #include <rte_dev.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_string_fns.h>
15
16 #include "vnic_intr.h"
17 #include "vnic_cq.h"
18 #include "vnic_wq.h"
19 #include "vnic_rq.h"
20 #include "vnic_enet.h"
21 #include "enic.h"
22
23 int enicpmd_logtype_init;
24 int enicpmd_logtype_flow;
25
26 #define PMD_INIT_LOG(level, fmt, args...) \
27         rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
28                 "%s" fmt "\n", __func__, ##args)
29
30 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
31
32 /*
33  * The set of PCI devices this driver supports
34  */
35 #define CISCO_PCI_VENDOR_ID 0x1137
36 static const struct rte_pci_id pci_id_enic_map[] = {
37         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
38         { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
39         {.vendor_id = 0, /* sentinel */},
40 };
41
42 RTE_INIT(enicpmd_init_log);
43 static void
44 enicpmd_init_log(void)
45 {
46         enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
47         if (enicpmd_logtype_init >= 0)
48                 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
49         enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
50         if (enicpmd_logtype_flow >= 0)
51                 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
52 }
53
54 static int
55 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
56                         enum rte_filter_op filter_op, void *arg)
57 {
58         struct enic *enic = pmd_priv(eth_dev);
59         int ret = 0;
60
61         ENICPMD_FUNC_TRACE();
62         if (filter_op == RTE_ETH_FILTER_NOP)
63                 return 0;
64
65         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
66                 return -EINVAL;
67
68         switch (filter_op) {
69         case RTE_ETH_FILTER_ADD:
70         case RTE_ETH_FILTER_UPDATE:
71                 ret = enic_fdir_add_fltr(enic,
72                         (struct rte_eth_fdir_filter *)arg);
73                 break;
74
75         case RTE_ETH_FILTER_DELETE:
76                 ret = enic_fdir_del_fltr(enic,
77                         (struct rte_eth_fdir_filter *)arg);
78                 break;
79
80         case RTE_ETH_FILTER_STATS:
81                 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
82                 break;
83
84         case RTE_ETH_FILTER_FLUSH:
85                 dev_warning(enic, "unsupported operation %u", filter_op);
86                 ret = -ENOTSUP;
87                 break;
88         case RTE_ETH_FILTER_INFO:
89                 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
90                 break;
91         default:
92                 dev_err(enic, "unknown operation %u", filter_op);
93                 ret = -EINVAL;
94                 break;
95         }
96         return ret;
97 }
98
99 static int
100 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
101                      enum rte_filter_type filter_type,
102                      enum rte_filter_op filter_op,
103                      void *arg)
104 {
105         int ret = 0;
106
107         ENICPMD_FUNC_TRACE();
108
109         switch (filter_type) {
110         case RTE_ETH_FILTER_GENERIC:
111                 if (filter_op != RTE_ETH_FILTER_GET)
112                         return -EINVAL;
113                 *(const void **)arg = &enic_flow_ops;
114                 break;
115         case RTE_ETH_FILTER_FDIR:
116                 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
117                 break;
118         default:
119                 dev_warning(enic, "Filter type (%d) not supported",
120                         filter_type);
121                 ret = -EINVAL;
122                 break;
123         }
124
125         return ret;
126 }
127
128 static void enicpmd_dev_tx_queue_release(void *txq)
129 {
130         ENICPMD_FUNC_TRACE();
131
132         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
133                 return;
134
135         enic_free_wq(txq);
136 }
137
138 static int enicpmd_dev_setup_intr(struct enic *enic)
139 {
140         int ret;
141         unsigned int index;
142
143         ENICPMD_FUNC_TRACE();
144
145         /* Are we done with the init of all the queues? */
146         for (index = 0; index < enic->cq_count; index++) {
147                 if (!enic->cq[index].ctrl)
148                         break;
149         }
150         if (enic->cq_count != index)
151                 return 0;
152         for (index = 0; index < enic->wq_count; index++) {
153                 if (!enic->wq[index].ctrl)
154                         break;
155         }
156         if (enic->wq_count != index)
157                 return 0;
158         /* check start of packet (SOP) RQs only in case scatter is disabled. */
159         for (index = 0; index < enic->rq_count; index++) {
160                 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
161                         break;
162         }
163         if (enic->rq_count != index)
164                 return 0;
165
166         ret = enic_alloc_intr_resources(enic);
167         if (ret) {
168                 dev_err(enic, "alloc intr failed\n");
169                 return ret;
170         }
171         enic_init_vnic_resources(enic);
172
173         ret = enic_setup_finish(enic);
174         if (ret)
175                 dev_err(enic, "setup could not be finished\n");
176
177         return ret;
178 }
179
180 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
181         uint16_t queue_idx,
182         uint16_t nb_desc,
183         unsigned int socket_id,
184         __rte_unused const struct rte_eth_txconf *tx_conf)
185 {
186         int ret;
187         struct enic *enic = pmd_priv(eth_dev);
188
189         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
190                 return -E_RTE_SECONDARY;
191
192         ENICPMD_FUNC_TRACE();
193         RTE_ASSERT(queue_idx < enic->conf_wq_count);
194         eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
195
196         ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
197         if (ret) {
198                 dev_err(enic, "error in allocating wq\n");
199                 return ret;
200         }
201
202         return enicpmd_dev_setup_intr(enic);
203 }
204
205 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
206         uint16_t queue_idx)
207 {
208         struct enic *enic = pmd_priv(eth_dev);
209
210         ENICPMD_FUNC_TRACE();
211
212         enic_start_wq(enic, queue_idx);
213
214         return 0;
215 }
216
217 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
218         uint16_t queue_idx)
219 {
220         int ret;
221         struct enic *enic = pmd_priv(eth_dev);
222
223         ENICPMD_FUNC_TRACE();
224
225         ret = enic_stop_wq(enic, queue_idx);
226         if (ret)
227                 dev_err(enic, "error in stopping wq %d\n", queue_idx);
228
229         return ret;
230 }
231
232 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
233         uint16_t queue_idx)
234 {
235         struct enic *enic = pmd_priv(eth_dev);
236
237         ENICPMD_FUNC_TRACE();
238
239         enic_start_rq(enic, queue_idx);
240
241         return 0;
242 }
243
244 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
245         uint16_t queue_idx)
246 {
247         int ret;
248         struct enic *enic = pmd_priv(eth_dev);
249
250         ENICPMD_FUNC_TRACE();
251
252         ret = enic_stop_rq(enic, queue_idx);
253         if (ret)
254                 dev_err(enic, "error in stopping rq %d\n", queue_idx);
255
256         return ret;
257 }
258
259 static void enicpmd_dev_rx_queue_release(void *rxq)
260 {
261         ENICPMD_FUNC_TRACE();
262
263         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
264                 return;
265
266         enic_free_rq(rxq);
267 }
268
269 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
270                                            uint16_t rx_queue_id)
271 {
272         struct enic *enic = pmd_priv(dev);
273         uint32_t queue_count = 0;
274         struct vnic_cq *cq;
275         uint32_t cq_tail;
276         uint16_t cq_idx;
277         int rq_num;
278
279         rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
280         cq = &enic->cq[enic_cq_rq(enic, rq_num)];
281         cq_idx = cq->to_clean;
282
283         cq_tail = ioread32(&cq->ctrl->cq_tail);
284
285         if (cq_tail < cq_idx)
286                 cq_tail += cq->ring.desc_count;
287
288         queue_count = cq_tail - cq_idx;
289
290         return queue_count;
291 }
292
293 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
294         uint16_t queue_idx,
295         uint16_t nb_desc,
296         unsigned int socket_id,
297         const struct rte_eth_rxconf *rx_conf,
298         struct rte_mempool *mp)
299 {
300         int ret;
301         struct enic *enic = pmd_priv(eth_dev);
302
303         ENICPMD_FUNC_TRACE();
304
305         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
306                 return -E_RTE_SECONDARY;
307         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
308         eth_dev->data->rx_queues[queue_idx] =
309                 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
310
311         ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
312                             rx_conf->rx_free_thresh);
313         if (ret) {
314                 dev_err(enic, "error in allocating rq\n");
315                 return ret;
316         }
317
318         return enicpmd_dev_setup_intr(enic);
319 }
320
321 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
322         uint16_t vlan_id, int on)
323 {
324         struct enic *enic = pmd_priv(eth_dev);
325         int err;
326
327         ENICPMD_FUNC_TRACE();
328         if (on)
329                 err = enic_add_vlan(enic, vlan_id);
330         else
331                 err = enic_del_vlan(enic, vlan_id);
332         return err;
333 }
334
335 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
336 {
337         struct enic *enic = pmd_priv(eth_dev);
338
339         ENICPMD_FUNC_TRACE();
340
341         if (mask & ETH_VLAN_STRIP_MASK) {
342                 if (eth_dev->data->dev_conf.rxmode.offloads &
343                     DEV_RX_OFFLOAD_VLAN_STRIP)
344                         enic->ig_vlan_strip_en = 1;
345                 else
346                         enic->ig_vlan_strip_en = 0;
347         }
348
349         if (mask & ETH_VLAN_FILTER_MASK) {
350                 dev_warning(enic,
351                         "Configuration of VLAN filter is not supported\n");
352         }
353
354         if (mask & ETH_VLAN_EXTEND_MASK) {
355                 dev_warning(enic,
356                         "Configuration of extended VLAN is not supported\n");
357         }
358
359         return enic_set_vlan_strip(enic);
360 }
361
362 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
363 {
364         int ret;
365         struct enic *enic = pmd_priv(eth_dev);
366
367         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
368                 return -E_RTE_SECONDARY;
369
370         ENICPMD_FUNC_TRACE();
371         ret = enic_set_vnic_res(enic);
372         if (ret) {
373                 dev_err(enic, "Set vNIC resource num  failed, aborting\n");
374                 return ret;
375         }
376
377         enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
378                                   DEV_RX_OFFLOAD_CHECKSUM);
379         ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
380         if (ret) {
381                 dev_err(enic, "Failed to configure VLAN offloads\n");
382                 return ret;
383         }
384         /*
385          * Initialize RSS with the default reta and key. If the user key is
386          * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
387          * default key.
388          */
389         return enic_init_rss_nic_cfg(enic);
390 }
391
392 /* Start the device.
393  * It returns 0 on success.
394  */
395 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
396 {
397         struct enic *enic = pmd_priv(eth_dev);
398
399         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
400                 return -E_RTE_SECONDARY;
401
402         ENICPMD_FUNC_TRACE();
403         return enic_enable(enic);
404 }
405
406 /*
407  * Stop device: disable rx and tx functions to allow for reconfiguring.
408  */
409 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
410 {
411         struct rte_eth_link link;
412         struct enic *enic = pmd_priv(eth_dev);
413
414         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
415                 return;
416
417         ENICPMD_FUNC_TRACE();
418         enic_disable(enic);
419         memset(&link, 0, sizeof(link));
420         rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
421                 *(uint64_t *)&eth_dev->data->dev_link,
422                 *(uint64_t *)&link);
423 }
424
425 /*
426  * Stop device.
427  */
428 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
429 {
430         struct enic *enic = pmd_priv(eth_dev);
431
432         ENICPMD_FUNC_TRACE();
433         enic_remove(enic);
434 }
435
436 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
437         __rte_unused int wait_to_complete)
438 {
439         struct enic *enic = pmd_priv(eth_dev);
440
441         ENICPMD_FUNC_TRACE();
442         return enic_link_update(enic);
443 }
444
445 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
446         struct rte_eth_stats *stats)
447 {
448         struct enic *enic = pmd_priv(eth_dev);
449
450         ENICPMD_FUNC_TRACE();
451         return enic_dev_stats_get(enic, stats);
452 }
453
454 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
455 {
456         struct enic *enic = pmd_priv(eth_dev);
457
458         ENICPMD_FUNC_TRACE();
459         enic_dev_stats_clear(enic);
460 }
461
462 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
463         struct rte_eth_dev_info *device_info)
464 {
465         struct enic *enic = pmd_priv(eth_dev);
466
467         ENICPMD_FUNC_TRACE();
468         device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
469         /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
470         device_info->max_rx_queues = enic->conf_rq_count / 2;
471         device_info->max_tx_queues = enic->conf_wq_count;
472         device_info->min_rx_bufsize = ENIC_MIN_MTU;
473         /* "Max" mtu is not a typo. HW receives packet sizes up to the
474          * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
475          * a hint to the driver to size receive buffers accordingly so that
476          * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
477          * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
478          * ignoring vNIC mtu.
479          */
480         device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
481         device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
482         device_info->rx_offload_capa =
483                 DEV_RX_OFFLOAD_VLAN_STRIP |
484                 DEV_RX_OFFLOAD_IPV4_CKSUM |
485                 DEV_RX_OFFLOAD_UDP_CKSUM  |
486                 DEV_RX_OFFLOAD_TCP_CKSUM;
487         device_info->tx_offload_capa =
488                 DEV_TX_OFFLOAD_VLAN_INSERT |
489                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
490                 DEV_TX_OFFLOAD_UDP_CKSUM   |
491                 DEV_TX_OFFLOAD_TCP_CKSUM   |
492                 DEV_TX_OFFLOAD_TCP_TSO;
493         device_info->default_rxconf = (struct rte_eth_rxconf) {
494                 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
495         };
496         device_info->reta_size = enic->reta_size;
497         device_info->hash_key_size = enic->hash_key_size;
498         device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
499 }
500
501 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
502 {
503         static const uint32_t ptypes[] = {
504                 RTE_PTYPE_L2_ETHER,
505                 RTE_PTYPE_L2_ETHER_VLAN,
506                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
507                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
508                 RTE_PTYPE_L4_TCP,
509                 RTE_PTYPE_L4_UDP,
510                 RTE_PTYPE_L4_FRAG,
511                 RTE_PTYPE_L4_NONFRAG,
512                 RTE_PTYPE_UNKNOWN
513         };
514
515         if (dev->rx_pkt_burst == enic_recv_pkts)
516                 return ptypes;
517         return NULL;
518 }
519
520 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
521 {
522         struct enic *enic = pmd_priv(eth_dev);
523
524         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
525                 return;
526
527         ENICPMD_FUNC_TRACE();
528
529         enic->promisc = 1;
530         enic_add_packet_filter(enic);
531 }
532
533 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
534 {
535         struct enic *enic = pmd_priv(eth_dev);
536
537         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
538                 return;
539
540         ENICPMD_FUNC_TRACE();
541         enic->promisc = 0;
542         enic_add_packet_filter(enic);
543 }
544
545 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
546 {
547         struct enic *enic = pmd_priv(eth_dev);
548
549         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
550                 return;
551
552         ENICPMD_FUNC_TRACE();
553         enic->allmulti = 1;
554         enic_add_packet_filter(enic);
555 }
556
557 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
558 {
559         struct enic *enic = pmd_priv(eth_dev);
560
561         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562                 return;
563
564         ENICPMD_FUNC_TRACE();
565         enic->allmulti = 0;
566         enic_add_packet_filter(enic);
567 }
568
569 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
570         struct ether_addr *mac_addr,
571         __rte_unused uint32_t index, __rte_unused uint32_t pool)
572 {
573         struct enic *enic = pmd_priv(eth_dev);
574
575         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
576                 return -E_RTE_SECONDARY;
577
578         ENICPMD_FUNC_TRACE();
579         return enic_set_mac_address(enic, mac_addr->addr_bytes);
580 }
581
582 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
583 {
584         struct enic *enic = pmd_priv(eth_dev);
585
586         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
587                 return;
588
589         ENICPMD_FUNC_TRACE();
590         enic_del_mac_address(enic, index);
591 }
592
593 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
594 {
595         struct enic *enic = pmd_priv(eth_dev);
596
597         ENICPMD_FUNC_TRACE();
598         return enic_set_mtu(enic, mtu);
599 }
600
601 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
602                                       struct rte_eth_rss_reta_entry64
603                                       *reta_conf,
604                                       uint16_t reta_size)
605 {
606         struct enic *enic = pmd_priv(dev);
607         uint16_t i, idx, shift;
608
609         ENICPMD_FUNC_TRACE();
610         if (reta_size != ENIC_RSS_RETA_SIZE) {
611                 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
612                         reta_size, ENIC_RSS_RETA_SIZE);
613                 return -EINVAL;
614         }
615
616         for (i = 0; i < reta_size; i++) {
617                 idx = i / RTE_RETA_GROUP_SIZE;
618                 shift = i % RTE_RETA_GROUP_SIZE;
619                 if (reta_conf[idx].mask & (1ULL << shift))
620                         reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
621                                 enic->rss_cpu.cpu[i / 4].b[i % 4]);
622         }
623
624         return 0;
625 }
626
627 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
628                                        struct rte_eth_rss_reta_entry64
629                                        *reta_conf,
630                                        uint16_t reta_size)
631 {
632         struct enic *enic = pmd_priv(dev);
633         union vnic_rss_cpu rss_cpu;
634         uint16_t i, idx, shift;
635
636         ENICPMD_FUNC_TRACE();
637         if (reta_size != ENIC_RSS_RETA_SIZE) {
638                 dev_err(enic, "reta_update: wrong reta_size. given=%u"
639                         " expected=%u\n",
640                         reta_size, ENIC_RSS_RETA_SIZE);
641                 return -EINVAL;
642         }
643         /*
644          * Start with the current reta and modify it per reta_conf, as we
645          * need to push the entire reta even if we only modify one entry.
646          */
647         rss_cpu = enic->rss_cpu;
648         for (i = 0; i < reta_size; i++) {
649                 idx = i / RTE_RETA_GROUP_SIZE;
650                 shift = i % RTE_RETA_GROUP_SIZE;
651                 if (reta_conf[idx].mask & (1ULL << shift))
652                         rss_cpu.cpu[i / 4].b[i % 4] =
653                                 enic_rte_rq_idx_to_sop_idx(
654                                         reta_conf[idx].reta[shift]);
655         }
656         return enic_set_rss_reta(enic, &rss_cpu);
657 }
658
659 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
660                                        struct rte_eth_rss_conf *rss_conf)
661 {
662         struct enic *enic = pmd_priv(dev);
663
664         ENICPMD_FUNC_TRACE();
665         return enic_set_rss_conf(enic, rss_conf);
666 }
667
668 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
669                                          struct rte_eth_rss_conf *rss_conf)
670 {
671         struct enic *enic = pmd_priv(dev);
672
673         ENICPMD_FUNC_TRACE();
674         if (rss_conf == NULL)
675                 return -EINVAL;
676         if (rss_conf->rss_key != NULL &&
677             rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
678                 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
679                         " expected=%u+\n",
680                         rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
681                 return -EINVAL;
682         }
683         rss_conf->rss_hf = enic->rss_hf;
684         if (rss_conf->rss_key != NULL) {
685                 int i;
686                 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
687                         rss_conf->rss_key[i] =
688                                 enic->rss_key.key[i / 10].b[i % 10];
689                 }
690                 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
691         }
692         return 0;
693 }
694
695 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
696         .dev_configure        = enicpmd_dev_configure,
697         .dev_start            = enicpmd_dev_start,
698         .dev_stop             = enicpmd_dev_stop,
699         .dev_set_link_up      = NULL,
700         .dev_set_link_down    = NULL,
701         .dev_close            = enicpmd_dev_close,
702         .promiscuous_enable   = enicpmd_dev_promiscuous_enable,
703         .promiscuous_disable  = enicpmd_dev_promiscuous_disable,
704         .allmulticast_enable  = enicpmd_dev_allmulticast_enable,
705         .allmulticast_disable = enicpmd_dev_allmulticast_disable,
706         .link_update          = enicpmd_dev_link_update,
707         .stats_get            = enicpmd_dev_stats_get,
708         .stats_reset          = enicpmd_dev_stats_reset,
709         .queue_stats_mapping_set = NULL,
710         .dev_infos_get        = enicpmd_dev_info_get,
711         .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
712         .mtu_set              = enicpmd_mtu_set,
713         .vlan_filter_set      = enicpmd_vlan_filter_set,
714         .vlan_tpid_set        = NULL,
715         .vlan_offload_set     = enicpmd_vlan_offload_set,
716         .vlan_strip_queue_set = NULL,
717         .rx_queue_start       = enicpmd_dev_rx_queue_start,
718         .rx_queue_stop        = enicpmd_dev_rx_queue_stop,
719         .tx_queue_start       = enicpmd_dev_tx_queue_start,
720         .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
721         .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
722         .rx_queue_release     = enicpmd_dev_rx_queue_release,
723         .rx_queue_count       = enicpmd_dev_rx_queue_count,
724         .rx_descriptor_done   = NULL,
725         .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
726         .tx_queue_release     = enicpmd_dev_tx_queue_release,
727         .dev_led_on           = NULL,
728         .dev_led_off          = NULL,
729         .flow_ctrl_get        = NULL,
730         .flow_ctrl_set        = NULL,
731         .priority_flow_ctrl_set = NULL,
732         .mac_addr_add         = enicpmd_add_mac_addr,
733         .mac_addr_remove      = enicpmd_remove_mac_addr,
734         .filter_ctrl          = enicpmd_dev_filter_ctrl,
735         .reta_query           = enicpmd_dev_rss_reta_query,
736         .reta_update          = enicpmd_dev_rss_reta_update,
737         .rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
738         .rss_hash_update      = enicpmd_dev_rss_hash_update,
739 };
740
741 struct enic *enicpmd_list_head = NULL;
742 /* Initialize the driver
743  * It returns 0 on success.
744  */
745 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
746 {
747         struct rte_pci_device *pdev;
748         struct rte_pci_addr *addr;
749         struct enic *enic = pmd_priv(eth_dev);
750
751         ENICPMD_FUNC_TRACE();
752
753         enic->port_id = eth_dev->data->port_id;
754         enic->rte_dev = eth_dev;
755         eth_dev->dev_ops = &enicpmd_eth_dev_ops;
756         eth_dev->rx_pkt_burst = &enic_recv_pkts;
757         eth_dev->tx_pkt_burst = &enic_xmit_pkts;
758         eth_dev->tx_pkt_prepare = &enic_prep_pkts;
759
760         pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
761         rte_eth_copy_pci_info(eth_dev, pdev);
762         enic->pdev = pdev;
763         addr = &pdev->addr;
764
765         snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
766                 addr->domain, addr->bus, addr->devid, addr->function);
767
768         return enic_probe(enic);
769 }
770
771 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
772         struct rte_pci_device *pci_dev)
773 {
774         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
775                 eth_enicpmd_dev_init);
776 }
777
778 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
779 {
780         return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
781 }
782
783 static struct rte_pci_driver rte_enic_pmd = {
784         .id_table = pci_id_enic_map,
785         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
786         .probe = eth_enic_pci_probe,
787         .remove = eth_enic_pci_remove,
788 };
789
790 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
791 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
792 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");