ixgbe: fix release queue mbufs
[dpdk.git] / drivers / net / enic / enic_ethdev.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 #ident "$Id$"
35
36 #include <stdio.h>
37 #include <stdint.h>
38
39 #include <rte_dev.h>
40 #include <rte_pci.h>
41 #include <rte_ethdev.h>
42 #include <rte_string_fns.h>
43
44 #include "vnic_intr.h"
45 #include "vnic_cq.h"
46 #include "vnic_wq.h"
47 #include "vnic_rq.h"
48 #include "vnic_enet.h"
49 #include "enic.h"
50
51 #ifdef RTE_LIBRTE_ENIC_DEBUG
52 #define ENICPMD_FUNC_TRACE() \
53         RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
54 #else
55 #define ENICPMD_FUNC_TRACE() (void)0
56 #endif
57
58 /*
59  * The set of PCI devices this driver supports
60  */
61 static const struct rte_pci_id pci_id_enic_map[] = {
62 #define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
63 #ifndef PCI_VENDOR_ID_CISCO
64 #define PCI_VENDOR_ID_CISCO     0x1137
65 #endif
66 #include "rte_pci_dev_ids.h"
67 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET)
68 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
69 {.vendor_id = 0, /* Sentinal */},
70 };
71
72 static int
73 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
74                         enum rte_filter_op filter_op, void *arg)
75 {
76         struct enic *enic = pmd_priv(eth_dev);
77         int ret = 0;
78
79         ENICPMD_FUNC_TRACE();
80         if (filter_op == RTE_ETH_FILTER_NOP)
81                 return 0;
82
83         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
84                 return -EINVAL;
85
86         switch (filter_op) {
87         case RTE_ETH_FILTER_ADD:
88         case RTE_ETH_FILTER_UPDATE:
89                 ret = enic_fdir_add_fltr(enic,
90                         (struct rte_eth_fdir_filter *)arg);
91                 break;
92
93         case RTE_ETH_FILTER_DELETE:
94                 ret = enic_fdir_del_fltr(enic,
95                         (struct rte_eth_fdir_filter *)arg);
96                 break;
97
98         case RTE_ETH_FILTER_STATS:
99                 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
100                 break;
101
102         case RTE_ETH_FILTER_FLUSH:
103         case RTE_ETH_FILTER_INFO:
104                 dev_warning(enic, "unsupported operation %u", filter_op);
105                 ret = -ENOTSUP;
106                 break;
107         default:
108                 dev_err(enic, "unknown operation %u", filter_op);
109                 ret = -EINVAL;
110                 break;
111         }
112         return ret;
113 }
114
115 static int
116 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
117                      enum rte_filter_type filter_type,
118                      enum rte_filter_op filter_op,
119                      void *arg)
120 {
121         int ret = -EINVAL;
122
123         if (RTE_ETH_FILTER_FDIR == filter_type)
124                 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
125         else
126                 dev_warning(enic, "Filter type (%d) not supported",
127                         filter_type);
128
129         return ret;
130 }
131
132 static void enicpmd_dev_tx_queue_release(void *txq)
133 {
134         ENICPMD_FUNC_TRACE();
135         enic_free_wq(txq);
136 }
137
138 static int enicpmd_dev_setup_intr(struct enic *enic)
139 {
140         int ret;
141         unsigned int index;
142
143         ENICPMD_FUNC_TRACE();
144
145         /* Are we done with the init of all the queues? */
146         for (index = 0; index < enic->cq_count; index++) {
147                 if (!enic->cq[index].ctrl)
148                         break;
149         }
150
151         if (enic->cq_count != index)
152                 return 0;
153
154         ret = enic_alloc_intr_resources(enic);
155         if (ret) {
156                 dev_err(enic, "alloc intr failed\n");
157                 return ret;
158         }
159         enic_init_vnic_resources(enic);
160
161         ret = enic_setup_finish(enic);
162         if (ret)
163                 dev_err(enic, "setup could not be finished\n");
164
165         return ret;
166 }
167
168 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
169         uint16_t queue_idx,
170         uint16_t nb_desc,
171         unsigned int socket_id,
172         __rte_unused const struct rte_eth_txconf *tx_conf)
173 {
174         int ret;
175         struct enic *enic = pmd_priv(eth_dev);
176
177         ENICPMD_FUNC_TRACE();
178         eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
179
180         ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
181         if (ret) {
182                 dev_err(enic, "error in allocating wq\n");
183                 return ret;
184         }
185
186         return enicpmd_dev_setup_intr(enic);
187 }
188
189 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
190         uint16_t queue_idx)
191 {
192         struct enic *enic = pmd_priv(eth_dev);
193
194         ENICPMD_FUNC_TRACE();
195
196         enic_start_wq(enic, queue_idx);
197
198         return 0;
199 }
200
201 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
202         uint16_t queue_idx)
203 {
204         int ret;
205         struct enic *enic = pmd_priv(eth_dev);
206
207         ENICPMD_FUNC_TRACE();
208
209         ret = enic_stop_wq(enic, queue_idx);
210         if (ret)
211                 dev_err(enic, "error in stopping wq %d\n", queue_idx);
212
213         return ret;
214 }
215
216 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
217         uint16_t queue_idx)
218 {
219         struct enic *enic = pmd_priv(eth_dev);
220
221         ENICPMD_FUNC_TRACE();
222
223         enic_start_rq(enic, queue_idx);
224
225         return 0;
226 }
227
228 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
229         uint16_t queue_idx)
230 {
231         int ret;
232         struct enic *enic = pmd_priv(eth_dev);
233
234         ENICPMD_FUNC_TRACE();
235
236         ret = enic_stop_rq(enic, queue_idx);
237         if (ret)
238                 dev_err(enic, "error in stopping rq %d\n", queue_idx);
239
240         return ret;
241 }
242
243 static void enicpmd_dev_rx_queue_release(void *rxq)
244 {
245         ENICPMD_FUNC_TRACE();
246         enic_free_rq(rxq);
247 }
248
249 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
250         uint16_t queue_idx,
251         uint16_t nb_desc,
252         unsigned int socket_id,
253         __rte_unused const struct rte_eth_rxconf *rx_conf,
254         struct rte_mempool *mp)
255 {
256         int ret;
257         struct enic *enic = pmd_priv(eth_dev);
258
259         ENICPMD_FUNC_TRACE();
260         eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
261
262         ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
263         if (ret) {
264                 dev_err(enic, "error in allocating rq\n");
265                 return ret;
266         }
267
268         return enicpmd_dev_setup_intr(enic);
269 }
270
271 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
272         uint16_t vlan_id, int on)
273 {
274         struct enic *enic = pmd_priv(eth_dev);
275
276         ENICPMD_FUNC_TRACE();
277         if (on)
278                 enic_add_vlan(enic, vlan_id);
279         else
280                 enic_del_vlan(enic, vlan_id);
281         return 0;
282 }
283
284 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
285 {
286         struct enic *enic = pmd_priv(eth_dev);
287
288         ENICPMD_FUNC_TRACE();
289
290         if (mask & ETH_VLAN_STRIP_MASK) {
291                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
292                         enic->ig_vlan_strip_en = 1;
293                 else
294                         enic->ig_vlan_strip_en = 0;
295         }
296         enic_set_rss_nic_cfg(enic);
297
298
299         if (mask & ETH_VLAN_FILTER_MASK) {
300                 dev_warning(enic,
301                         "Configuration of VLAN filter is not supported\n");
302         }
303
304         if (mask & ETH_VLAN_EXTEND_MASK) {
305                 dev_warning(enic,
306                         "Configuration of extended VLAN is not supported\n");
307         }
308 }
309
310 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
311 {
312         int ret;
313         struct enic *enic = pmd_priv(eth_dev);
314
315         ENICPMD_FUNC_TRACE();
316         ret = enic_set_vnic_res(enic);
317         if (ret) {
318                 dev_err(enic, "Set vNIC resource num  failed, aborting\n");
319                 return ret;
320         }
321
322         if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
323                 eth_dev->data->dev_conf.rxmode.header_split) {
324                 /* Enable header-data-split */
325                 enic_set_hdr_split_size(enic,
326                         eth_dev->data->dev_conf.rxmode.split_hdr_size);
327         }
328
329         enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
330         return 0;
331 }
332
333 /* Start the device.
334  * It returns 0 on success.
335  */
336 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
337 {
338         struct enic *enic = pmd_priv(eth_dev);
339
340         ENICPMD_FUNC_TRACE();
341         return enic_enable(enic);
342 }
343
344 /*
345  * Stop device: disable rx and tx functions to allow for reconfiguring.
346  */
347 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
348 {
349         struct rte_eth_link link;
350         struct enic *enic = pmd_priv(eth_dev);
351
352         ENICPMD_FUNC_TRACE();
353         enic_disable(enic);
354         memset(&link, 0, sizeof(link));
355         rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
356                 *(uint64_t *)&eth_dev->data->dev_link,
357                 *(uint64_t *)&link);
358 }
359
360 /*
361  * Stop device.
362  */
363 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
364 {
365         struct enic *enic = pmd_priv(eth_dev);
366
367         ENICPMD_FUNC_TRACE();
368         enic_remove(enic);
369 }
370
371 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
372         __rte_unused int wait_to_complete)
373 {
374         struct enic *enic = pmd_priv(eth_dev);
375         int ret;
376         int link_status = 0;
377
378         ENICPMD_FUNC_TRACE();
379         link_status = enic_get_link_status(enic);
380         ret = (link_status == enic->link_status);
381         enic->link_status = link_status;
382         eth_dev->data->dev_link.link_status = link_status;
383         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
384         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
385         return ret;
386 }
387
388 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
389         struct rte_eth_stats *stats)
390 {
391         struct enic *enic = pmd_priv(eth_dev);
392
393         ENICPMD_FUNC_TRACE();
394         enic_dev_stats_get(enic, stats);
395 }
396
397 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
398 {
399         struct enic *enic = pmd_priv(eth_dev);
400
401         ENICPMD_FUNC_TRACE();
402         enic_dev_stats_clear(enic);
403 }
404
405 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
406         struct rte_eth_dev_info *device_info)
407 {
408         struct enic *enic = pmd_priv(eth_dev);
409
410         ENICPMD_FUNC_TRACE();
411         device_info->max_rx_queues = enic->rq_count;
412         device_info->max_tx_queues = enic->wq_count;
413         device_info->min_rx_bufsize = ENIC_MIN_MTU;
414         device_info->max_rx_pktlen = enic->config.mtu;
415         device_info->max_mac_addrs = 1;
416         device_info->rx_offload_capa =
417                 DEV_RX_OFFLOAD_VLAN_STRIP |
418                 DEV_RX_OFFLOAD_IPV4_CKSUM |
419                 DEV_RX_OFFLOAD_UDP_CKSUM  |
420                 DEV_RX_OFFLOAD_TCP_CKSUM;
421         device_info->tx_offload_capa =
422                 DEV_TX_OFFLOAD_VLAN_INSERT |
423                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
424                 DEV_TX_OFFLOAD_UDP_CKSUM   |
425                 DEV_TX_OFFLOAD_TCP_CKSUM;
426 }
427
428 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
429 {
430         struct enic *enic = pmd_priv(eth_dev);
431
432         ENICPMD_FUNC_TRACE();
433         enic->promisc = 1;
434         enic_add_packet_filter(enic);
435 }
436
437 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
438 {
439         struct enic *enic = pmd_priv(eth_dev);
440
441         ENICPMD_FUNC_TRACE();
442         enic->promisc = 0;
443         enic_add_packet_filter(enic);
444 }
445
446 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
447 {
448         struct enic *enic = pmd_priv(eth_dev);
449
450         ENICPMD_FUNC_TRACE();
451         enic->allmulti = 1;
452         enic_add_packet_filter(enic);
453 }
454
455 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
456 {
457         struct enic *enic = pmd_priv(eth_dev);
458
459         ENICPMD_FUNC_TRACE();
460         enic->allmulti = 0;
461         enic_add_packet_filter(enic);
462 }
463
464 static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
465         struct ether_addr *mac_addr,
466         __rte_unused uint32_t index, __rte_unused uint32_t pool)
467 {
468         struct enic *enic = pmd_priv(eth_dev);
469
470         ENICPMD_FUNC_TRACE();
471         enic_set_mac_address(enic, mac_addr->addr_bytes);
472 }
473
474 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
475 {
476         struct enic *enic = pmd_priv(eth_dev);
477
478         ENICPMD_FUNC_TRACE();
479         enic_del_mac_address(enic);
480 }
481
482
483 static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
484         uint16_t nb_pkts)
485 {
486         unsigned int index;
487         unsigned int frags;
488         unsigned int pkt_len;
489         unsigned int seg_len;
490         unsigned int inc_len;
491         unsigned int nb_segs;
492         struct rte_mbuf *tx_pkt;
493         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
494         struct enic *enic = vnic_dev_priv(wq->vdev);
495         unsigned short vlan_id;
496         unsigned short ol_flags;
497
498         for (index = 0; index < nb_pkts; index++) {
499                 tx_pkt = *tx_pkts++;
500                 inc_len = 0;
501                 nb_segs = tx_pkt->nb_segs;
502                 if (nb_segs > vnic_wq_desc_avail(wq)) {
503                         /* wq cleanup and try again */
504                         if (!enic_cleanup_wq(enic, wq) ||
505                                 (nb_segs > vnic_wq_desc_avail(wq)))
506                                 return index;
507                 }
508                 pkt_len = tx_pkt->pkt_len;
509                 vlan_id = tx_pkt->vlan_tci;
510                 ol_flags = tx_pkt->ol_flags;
511                 for (frags = 0; inc_len < pkt_len; frags++) {
512                         if (!tx_pkt)
513                                 break;
514                         seg_len = tx_pkt->data_len;
515                         inc_len += seg_len;
516                         if (enic_send_pkt(enic, wq, tx_pkt,
517                                     (unsigned short)seg_len, !frags,
518                                     (pkt_len == inc_len), ol_flags, vlan_id)) {
519                                 break;
520                         }
521                         tx_pkt = tx_pkt->next;
522                 }
523         }
524
525         enic_cleanup_wq(enic, wq);
526         return index;
527 }
528
529 static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
530         uint16_t nb_pkts)
531 {
532         struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
533         unsigned int work_done;
534
535         if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
536                 dev_err(enic, "error in enicpmd poll\n");
537
538         return work_done;
539 }
540
541 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
542         .dev_configure        = enicpmd_dev_configure,
543         .dev_start            = enicpmd_dev_start,
544         .dev_stop             = enicpmd_dev_stop,
545         .dev_set_link_up      = NULL,
546         .dev_set_link_down    = NULL,
547         .dev_close            = enicpmd_dev_close,
548         .promiscuous_enable   = enicpmd_dev_promiscuous_enable,
549         .promiscuous_disable  = enicpmd_dev_promiscuous_disable,
550         .allmulticast_enable  = enicpmd_dev_allmulticast_enable,
551         .allmulticast_disable = enicpmd_dev_allmulticast_disable,
552         .link_update          = enicpmd_dev_link_update,
553         .stats_get            = enicpmd_dev_stats_get,
554         .stats_reset          = enicpmd_dev_stats_reset,
555         .queue_stats_mapping_set = NULL,
556         .dev_infos_get        = enicpmd_dev_info_get,
557         .mtu_set              = NULL,
558         .vlan_filter_set      = enicpmd_vlan_filter_set,
559         .vlan_tpid_set        = NULL,
560         .vlan_offload_set     = enicpmd_vlan_offload_set,
561         .vlan_strip_queue_set = NULL,
562         .rx_queue_start       = enicpmd_dev_rx_queue_start,
563         .rx_queue_stop        = enicpmd_dev_rx_queue_stop,
564         .tx_queue_start       = enicpmd_dev_tx_queue_start,
565         .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
566         .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
567         .rx_queue_release     = enicpmd_dev_rx_queue_release,
568         .rx_queue_count       = NULL,
569         .rx_descriptor_done   = NULL,
570         .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
571         .tx_queue_release     = enicpmd_dev_tx_queue_release,
572         .dev_led_on           = NULL,
573         .dev_led_off          = NULL,
574         .flow_ctrl_get        = NULL,
575         .flow_ctrl_set        = NULL,
576         .priority_flow_ctrl_set = NULL,
577         .mac_addr_add         = enicpmd_add_mac_addr,
578         .mac_addr_remove      = enicpmd_remove_mac_addr,
579         .fdir_set_masks               = NULL,
580         .filter_ctrl          = enicpmd_dev_filter_ctrl,
581 };
582
583 struct enic *enicpmd_list_head = NULL;
584 /* Initialize the driver
585  * It returns 0 on success.
586  */
587 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
588 {
589         struct rte_pci_device *pdev;
590         struct rte_pci_addr *addr;
591         struct enic *enic = pmd_priv(eth_dev);
592
593         ENICPMD_FUNC_TRACE();
594
595         enic->port_id = eth_dev->data->port_id;
596         enic->rte_dev = eth_dev;
597         eth_dev->dev_ops = &enicpmd_eth_dev_ops;
598         eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
599         eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
600
601         pdev = eth_dev->pci_dev;
602         enic->pdev = pdev;
603         addr = &pdev->addr;
604
605         snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
606                 addr->domain, addr->bus, addr->devid, addr->function);
607
608         return enic_probe(enic);
609 }
610
611 static struct eth_driver rte_enic_pmd = {
612         .pci_drv = {
613                 .name = "rte_enic_pmd",
614                 .id_table = pci_id_enic_map,
615                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
616         },
617         .eth_dev_init = eth_enicpmd_dev_init,
618         .dev_private_size = sizeof(struct enic),
619 };
620
621 /* Driver initialization routine.
622  * Invoked once at EAL init time.
623  * Register as the [Poll Mode] Driver of Cisco ENIC device.
624  */
625 static int
626 rte_enic_pmd_init(const char *name __rte_unused,
627         const char *params __rte_unused)
628 {
629         ENICPMD_FUNC_TRACE();
630
631         rte_eth_driver_register(&rte_enic_pmd);
632         return 0;
633 }
634
635 static struct rte_driver rte_enic_driver = {
636         .type = PMD_PDEV,
637         .init = rte_enic_pmd_init,
638 };
639
640 PMD_REGISTER_DRIVER(rte_enic_driver);