6ce4454fe2f3e8fabb230e3ae909eb40070f4d6b
[dpdk.git] / drivers / net / atlantic / atl_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4
5 #include <rte_ethdev_pci.h>
6
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32                                     struct rte_eth_xstat_name *xstats_names,
33                                     unsigned int size);
34
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36                                 struct rte_eth_stats *stats);
37
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39                               struct rte_eth_xstat *stats, unsigned int n);
40
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44                               size_t fw_size);
45
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47                                struct rte_eth_dev_info *dev_info);
48
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50
51 /* Flow control */
52 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
53                                struct rte_eth_fc_conf *fc_conf);
54 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
55                                struct rte_eth_fc_conf *fc_conf);
56
57 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
58
59 /* Interrupts */
60 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
61 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
62 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
63 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
64                                     struct rte_intr_handle *handle);
65 static void atl_dev_interrupt_handler(void *param);
66
67 /* RSS */
68 static int atl_reta_update(struct rte_eth_dev *dev,
69                              struct rte_eth_rss_reta_entry64 *reta_conf,
70                              uint16_t reta_size);
71 static int atl_reta_query(struct rte_eth_dev *dev,
72                             struct rte_eth_rss_reta_entry64 *reta_conf,
73                             uint16_t reta_size);
74 static int atl_rss_hash_update(struct rte_eth_dev *dev,
75                                  struct rte_eth_rss_conf *rss_conf);
76 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
77                                    struct rte_eth_rss_conf *rss_conf);
78
79
80 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
81         struct rte_pci_device *pci_dev);
82 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
83
84 static void atl_dev_info_get(struct rte_eth_dev *dev,
85                                 struct rte_eth_dev_info *dev_info);
86
87 int atl_logtype_init;
88 int atl_logtype_driver;
89
90 /*
91  * The set of PCI devices this driver supports
92  */
93 static const struct rte_pci_id pci_id_atl_map[] = {
94         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
95         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
96         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
97         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
98         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
99
100         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
101         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
102         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
103         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
104         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
105         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
106
107         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
108         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
109         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
110         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
111         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
112         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
113
114         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
115         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
116         { .vendor_id = 0, /* sentinel */ },
117 };
118
119 static struct rte_pci_driver rte_atl_pmd = {
120         .id_table = pci_id_atl_map,
121         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
122                      RTE_PCI_DRV_IOVA_AS_VA,
123         .probe = eth_atl_pci_probe,
124         .remove = eth_atl_pci_remove,
125 };
126
127 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
128                         | DEV_RX_OFFLOAD_IPV4_CKSUM \
129                         | DEV_RX_OFFLOAD_UDP_CKSUM \
130                         | DEV_RX_OFFLOAD_TCP_CKSUM \
131                         | DEV_RX_OFFLOAD_JUMBO_FRAME)
132
133 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
134                         | DEV_TX_OFFLOAD_IPV4_CKSUM \
135                         | DEV_TX_OFFLOAD_UDP_CKSUM \
136                         | DEV_TX_OFFLOAD_TCP_CKSUM \
137                         | DEV_TX_OFFLOAD_TCP_TSO \
138                         | DEV_TX_OFFLOAD_MULTI_SEGS)
139
140 static const struct rte_eth_desc_lim rx_desc_lim = {
141         .nb_max = ATL_MAX_RING_DESC,
142         .nb_min = ATL_MIN_RING_DESC,
143         .nb_align = ATL_RXD_ALIGN,
144 };
145
146 static const struct rte_eth_desc_lim tx_desc_lim = {
147         .nb_max = ATL_MAX_RING_DESC,
148         .nb_min = ATL_MIN_RING_DESC,
149         .nb_align = ATL_TXD_ALIGN,
150         .nb_seg_max = ATL_TX_MAX_SEG,
151         .nb_mtu_seg_max = ATL_TX_MAX_SEG,
152 };
153
154 #define ATL_XSTATS_FIELD(name) { \
155         #name, \
156         offsetof(struct aq_stats_s, name) \
157 }
158
159 struct atl_xstats_tbl_s {
160         const char *name;
161         unsigned int offset;
162 };
163
164 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
165         ATL_XSTATS_FIELD(uprc),
166         ATL_XSTATS_FIELD(mprc),
167         ATL_XSTATS_FIELD(bprc),
168         ATL_XSTATS_FIELD(erpt),
169         ATL_XSTATS_FIELD(uptc),
170         ATL_XSTATS_FIELD(mptc),
171         ATL_XSTATS_FIELD(bptc),
172         ATL_XSTATS_FIELD(erpr),
173         ATL_XSTATS_FIELD(ubrc),
174         ATL_XSTATS_FIELD(ubtc),
175         ATL_XSTATS_FIELD(mbrc),
176         ATL_XSTATS_FIELD(mbtc),
177         ATL_XSTATS_FIELD(bbrc),
178         ATL_XSTATS_FIELD(bbtc),
179 };
180
181 static const struct eth_dev_ops atl_eth_dev_ops = {
182         .dev_configure        = atl_dev_configure,
183         .dev_start            = atl_dev_start,
184         .dev_stop             = atl_dev_stop,
185         .dev_set_link_up      = atl_dev_set_link_up,
186         .dev_set_link_down    = atl_dev_set_link_down,
187         .dev_close            = atl_dev_close,
188         .dev_reset            = atl_dev_reset,
189
190         /* PROMISC */
191         .promiscuous_enable   = atl_dev_promiscuous_enable,
192         .promiscuous_disable  = atl_dev_promiscuous_disable,
193         .allmulticast_enable  = atl_dev_allmulticast_enable,
194         .allmulticast_disable = atl_dev_allmulticast_disable,
195
196         /* Link */
197         .link_update          = atl_dev_link_update,
198
199         /* Stats */
200         .stats_get            = atl_dev_stats_get,
201         .xstats_get           = atl_dev_xstats_get,
202         .xstats_get_names     = atl_dev_xstats_get_names,
203         .stats_reset          = atl_dev_stats_reset,
204         .xstats_reset         = atl_dev_stats_reset,
205
206         .fw_version_get       = atl_fw_version_get,
207         .dev_infos_get        = atl_dev_info_get,
208         .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
209
210         /* Queue Control */
211         .rx_queue_start       = atl_rx_queue_start,
212         .rx_queue_stop        = atl_rx_queue_stop,
213         .rx_queue_setup       = atl_rx_queue_setup,
214         .rx_queue_release     = atl_rx_queue_release,
215
216         .tx_queue_start       = atl_tx_queue_start,
217         .tx_queue_stop        = atl_tx_queue_stop,
218         .tx_queue_setup       = atl_tx_queue_setup,
219         .tx_queue_release     = atl_tx_queue_release,
220
221         .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
222         .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
223
224         .rx_queue_count       = atl_rx_queue_count,
225         .rx_descriptor_status = atl_dev_rx_descriptor_status,
226         .tx_descriptor_status = atl_dev_tx_descriptor_status,
227
228         /* Flow Control */
229         .flow_ctrl_get        = atl_flow_ctrl_get,
230         .flow_ctrl_set        = atl_flow_ctrl_set,
231
232         .rxq_info_get         = atl_rxq_info_get,
233         .txq_info_get         = atl_txq_info_get,
234
235         .reta_update          = atl_reta_update,
236         .reta_query           = atl_reta_query,
237         .rss_hash_update      = atl_rss_hash_update,
238         .rss_hash_conf_get    = atl_rss_hash_conf_get,
239 };
240
241 static inline int32_t
242 atl_reset_hw(struct aq_hw_s *hw)
243 {
244         return hw_atl_b0_hw_reset(hw);
245 }
246
247 static inline void
248 atl_enable_intr(struct rte_eth_dev *dev)
249 {
250         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
251
252         hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
253 }
254
255 static void
256 atl_disable_intr(struct aq_hw_s *hw)
257 {
258         PMD_INIT_FUNC_TRACE();
259         hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
260 }
261
262 static int
263 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
264 {
265         struct atl_adapter *adapter =
266                 (struct atl_adapter *)eth_dev->data->dev_private;
267         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
268         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
269         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
270         int err = 0;
271
272         PMD_INIT_FUNC_TRACE();
273
274         eth_dev->dev_ops = &atl_eth_dev_ops;
275         eth_dev->rx_pkt_burst = &atl_recv_pkts;
276         eth_dev->tx_pkt_burst = &atl_xmit_pkts;
277         eth_dev->tx_pkt_prepare = &atl_prep_pkts;
278
279         /* For secondary processes, the primary process has done all the work */
280         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
281                 return 0;
282
283         /* Vendor and Device ID need to be set before init of shared code */
284         hw->device_id = pci_dev->id.device_id;
285         hw->vendor_id = pci_dev->id.vendor_id;
286         hw->mmio = (void *)pci_dev->mem_resource[0].addr;
287
288         /* Hardware configuration - hardcode */
289         adapter->hw_cfg.is_lro = false;
290         adapter->hw_cfg.wol = false;
291         adapter->hw_cfg.is_rss = false;
292         adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
293
294         adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
295                           AQ_NIC_RATE_5G |
296                           AQ_NIC_RATE_2G5 |
297                           AQ_NIC_RATE_1G |
298                           AQ_NIC_RATE_100M;
299
300         adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
301         adapter->hw_cfg.aq_rss.indirection_table_size =
302                 HW_ATL_B0_RSS_REDIRECTION_MAX;
303
304         hw->aq_nic_cfg = &adapter->hw_cfg;
305
306         /* disable interrupt */
307         atl_disable_intr(hw);
308
309         /* Allocate memory for storing MAC addresses */
310         eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
311         if (eth_dev->data->mac_addrs == NULL) {
312                 PMD_INIT_LOG(ERR, "MAC Malloc failed");
313                 return -ENOMEM;
314         }
315
316         err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
317         if (err)
318                 return err;
319
320         /* Copy the permanent MAC address */
321         if (hw->aq_fw_ops->get_mac_permanent(hw,
322                         eth_dev->data->mac_addrs->addr_bytes) != 0)
323                 return -EINVAL;
324
325         /* Reset the hw statistics */
326         atl_dev_stats_reset(eth_dev);
327
328         rte_intr_callback_register(intr_handle,
329                                    atl_dev_interrupt_handler, eth_dev);
330
331         /* enable uio/vfio intr/eventfd mapping */
332         rte_intr_enable(intr_handle);
333
334         /* enable support intr */
335         atl_enable_intr(eth_dev);
336
337         return err;
338 }
339
340 static int
341 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
342 {
343         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
344         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
345         struct aq_hw_s *hw;
346
347         PMD_INIT_FUNC_TRACE();
348
349         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
350                 return -EPERM;
351
352         hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
353
354         if (hw->adapter_stopped == 0)
355                 atl_dev_close(eth_dev);
356
357         eth_dev->dev_ops = NULL;
358         eth_dev->rx_pkt_burst = NULL;
359         eth_dev->tx_pkt_burst = NULL;
360
361         /* disable uio intr before callback unregister */
362         rte_intr_disable(intr_handle);
363         rte_intr_callback_unregister(intr_handle,
364                                      atl_dev_interrupt_handler, eth_dev);
365
366         rte_free(eth_dev->data->mac_addrs);
367         eth_dev->data->mac_addrs = NULL;
368
369         return 0;
370 }
371
372 static int
373 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
374         struct rte_pci_device *pci_dev)
375 {
376         return rte_eth_dev_pci_generic_probe(pci_dev,
377                 sizeof(struct atl_adapter), eth_atl_dev_init);
378 }
379
380 static int
381 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
382 {
383         return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
384 }
385
386 static int
387 atl_dev_configure(struct rte_eth_dev *dev)
388 {
389         struct atl_interrupt *intr =
390                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
391
392         PMD_INIT_FUNC_TRACE();
393
394         /* set flag to update link status after init */
395         intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
396
397         return 0;
398 }
399
400 /*
401  * Configure device link speed and setup link.
402  * It returns 0 on success.
403  */
404 static int
405 atl_dev_start(struct rte_eth_dev *dev)
406 {
407         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
408         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
409         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
410         uint32_t intr_vector = 0;
411         uint32_t *link_speeds;
412         uint32_t speed = 0;
413         int status;
414         int err;
415
416         PMD_INIT_FUNC_TRACE();
417
418         /* set adapter started */
419         hw->adapter_stopped = 0;
420
421         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
422                 PMD_INIT_LOG(ERR,
423                 "Invalid link_speeds for port %u, fix speed not supported",
424                                 dev->data->port_id);
425                 return -EINVAL;
426         }
427
428         /* disable uio/vfio intr/eventfd mapping */
429         rte_intr_disable(intr_handle);
430
431         /* reinitialize adapter
432          * this calls reset and start
433          */
434         status = atl_reset_hw(hw);
435         if (status != 0)
436                 return -EIO;
437
438         err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
439
440         hw_atl_b0_hw_start(hw);
441         /* check and configure queue intr-vector mapping */
442         if ((rte_intr_cap_multiple(intr_handle) ||
443             !RTE_ETH_DEV_SRIOV(dev).active) &&
444             dev->data->dev_conf.intr_conf.rxq != 0) {
445                 intr_vector = dev->data->nb_rx_queues;
446                 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
447                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
448                                         ATL_MAX_INTR_QUEUE_NUM);
449                         return -ENOTSUP;
450                 }
451                 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
452                         PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
453                         return -1;
454                 }
455         }
456
457         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
458                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
459                                     dev->data->nb_rx_queues * sizeof(int), 0);
460                 if (intr_handle->intr_vec == NULL) {
461                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
462                                      " intr_vec", dev->data->nb_rx_queues);
463                         return -ENOMEM;
464                 }
465         }
466
467         /* initialize transmission unit */
468         atl_tx_init(dev);
469
470         /* This can fail when allocating mbufs for descriptor rings */
471         err = atl_rx_init(dev);
472         if (err) {
473                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
474                 goto error;
475         }
476
477         PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
478                 hw->fw_ver_actual >> 24,
479                 (hw->fw_ver_actual >> 16) & 0xFF,
480                 hw->fw_ver_actual & 0xFFFF);
481         PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
482
483         err = atl_start_queues(dev);
484         if (err < 0) {
485                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
486                 goto error;
487         }
488
489         err = hw->aq_fw_ops->update_link_status(hw);
490
491         if (err)
492                 goto error;
493
494         dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
495
496         link_speeds = &dev->data->dev_conf.link_speeds;
497
498         speed = 0x0;
499
500         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
501                 speed = hw->aq_nic_cfg->link_speed_msk;
502         } else {
503                 if (*link_speeds & ETH_LINK_SPEED_10G)
504                         speed |= AQ_NIC_RATE_10G;
505                 if (*link_speeds & ETH_LINK_SPEED_5G)
506                         speed |= AQ_NIC_RATE_5G;
507                 if (*link_speeds & ETH_LINK_SPEED_1G)
508                         speed |= AQ_NIC_RATE_1G;
509                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
510                         speed |=  AQ_NIC_RATE_2G5;
511                 if (*link_speeds & ETH_LINK_SPEED_100M)
512                         speed |= AQ_NIC_RATE_100M;
513         }
514
515         err = hw->aq_fw_ops->set_link_speed(hw, speed);
516         if (err)
517                 goto error;
518
519         if (rte_intr_allow_others(intr_handle)) {
520                 /* check if lsc interrupt is enabled */
521                 if (dev->data->dev_conf.intr_conf.lsc != 0)
522                         atl_dev_lsc_interrupt_setup(dev, true);
523                 else
524                         atl_dev_lsc_interrupt_setup(dev, false);
525         } else {
526                 rte_intr_callback_unregister(intr_handle,
527                                              atl_dev_interrupt_handler, dev);
528                 if (dev->data->dev_conf.intr_conf.lsc != 0)
529                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
530                                      " no intr multiplex");
531         }
532
533         /* check if rxq interrupt is enabled */
534         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
535             rte_intr_dp_is_en(intr_handle))
536                 atl_dev_rxq_interrupt_setup(dev);
537
538         /* enable uio/vfio intr/eventfd mapping */
539         rte_intr_enable(intr_handle);
540
541         /* resume enabled intr since hw reset */
542         atl_enable_intr(dev);
543
544         return 0;
545
546 error:
547         atl_stop_queues(dev);
548         return -EIO;
549 }
550
551 /*
552  * Stop device: disable rx and tx functions to allow for reconfiguring.
553  */
554 static void
555 atl_dev_stop(struct rte_eth_dev *dev)
556 {
557         struct rte_eth_link link;
558         struct aq_hw_s *hw =
559                 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
561         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
562
563         PMD_INIT_FUNC_TRACE();
564
565         /* disable interrupts */
566         atl_disable_intr(hw);
567
568         /* reset the NIC */
569         atl_reset_hw(hw);
570         hw->adapter_stopped = 1;
571
572         atl_stop_queues(dev);
573
574         /* Clear stored conf */
575         dev->data->scattered_rx = 0;
576         dev->data->lro = 0;
577
578         /* Clear recorded link status */
579         memset(&link, 0, sizeof(link));
580         rte_eth_linkstatus_set(dev, &link);
581
582         if (!rte_intr_allow_others(intr_handle))
583                 /* resume to the default handler */
584                 rte_intr_callback_register(intr_handle,
585                                            atl_dev_interrupt_handler,
586                                            (void *)dev);
587
588         /* Clean datapath event and queue/vec mapping */
589         rte_intr_efd_disable(intr_handle);
590         if (intr_handle->intr_vec != NULL) {
591                 rte_free(intr_handle->intr_vec);
592                 intr_handle->intr_vec = NULL;
593         }
594 }
595
596 /*
597  * Set device link up: enable tx.
598  */
599 static int
600 atl_dev_set_link_up(struct rte_eth_dev *dev)
601 {
602         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
603
604         return hw->aq_fw_ops->set_link_speed(hw,
605                         hw->aq_nic_cfg->link_speed_msk);
606 }
607
608 /*
609  * Set device link down: disable tx.
610  */
611 static int
612 atl_dev_set_link_down(struct rte_eth_dev *dev)
613 {
614         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
615
616         return hw->aq_fw_ops->set_link_speed(hw, 0);
617 }
618
619 /*
620  * Reset and stop device.
621  */
622 static void
623 atl_dev_close(struct rte_eth_dev *dev)
624 {
625         PMD_INIT_FUNC_TRACE();
626
627         atl_dev_stop(dev);
628
629         atl_free_queues(dev);
630 }
631
632 static int
633 atl_dev_reset(struct rte_eth_dev *dev)
634 {
635         int ret;
636
637         ret = eth_atl_dev_uninit(dev);
638         if (ret)
639                 return ret;
640
641         ret = eth_atl_dev_init(dev);
642
643         return ret;
644 }
645
646
647 static int
648 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
649 {
650         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
651         struct aq_hw_s *hw = &adapter->hw;
652         struct atl_sw_stats *swstats = &adapter->sw_stats;
653         unsigned int i;
654
655         hw->aq_fw_ops->update_stats(hw);
656
657         /* Fill out the rte_eth_stats statistics structure */
658         stats->ipackets = hw->curr_stats.dma_pkt_rc;
659         stats->ibytes = hw->curr_stats.dma_oct_rc;
660         stats->imissed = hw->curr_stats.dpc;
661         stats->ierrors = hw->curr_stats.erpt;
662
663         stats->opackets = hw->curr_stats.dma_pkt_tc;
664         stats->obytes = hw->curr_stats.dma_oct_tc;
665         stats->oerrors = 0;
666
667         stats->rx_nombuf = swstats->rx_nombuf;
668
669         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
670                 stats->q_ipackets[i] = swstats->q_ipackets[i];
671                 stats->q_opackets[i] = swstats->q_opackets[i];
672                 stats->q_ibytes[i] = swstats->q_ibytes[i];
673                 stats->q_obytes[i] = swstats->q_obytes[i];
674                 stats->q_errors[i] = swstats->q_errors[i];
675         }
676         return 0;
677 }
678
679 static void
680 atl_dev_stats_reset(struct rte_eth_dev *dev)
681 {
682         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
683         struct aq_hw_s *hw = &adapter->hw;
684
685         hw->aq_fw_ops->update_stats(hw);
686
687         /* Reset software totals */
688         memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
689
690         memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
691 }
692
693 static int
694 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
695                          struct rte_eth_xstat_name *xstats_names,
696                          unsigned int size)
697 {
698         unsigned int i;
699
700         if (!xstats_names)
701                 return RTE_DIM(atl_xstats_tbl);
702
703         for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
704                 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
705                         atl_xstats_tbl[i].name);
706
707         return size;
708 }
709
710 static int
711 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
712                    unsigned int n)
713 {
714         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
715         struct aq_hw_s *hw = &adapter->hw;
716         unsigned int i;
717
718         if (!stats)
719                 return 0;
720
721         for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
722                 stats[i].id = i;
723                 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
724                                         atl_xstats_tbl[i].offset);
725         }
726
727         return n;
728 }
729
730 static int
731 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
732 {
733         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
734         uint32_t fw_ver = 0;
735         unsigned int ret = 0;
736
737         ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
738         if (ret)
739                 return -EIO;
740
741         ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
742                        (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
743
744         ret += 1; /* add string null-terminator */
745
746         if (fw_size < ret)
747                 return ret;
748
749         return 0;
750 }
751
752 static void
753 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
754 {
755         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
756
757         dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
758         dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
759
760         dev_info->min_rx_bufsize = 1024;
761         dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
762         dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
763         dev_info->max_vfs = pci_dev->max_vfs;
764
765         dev_info->max_hash_mac_addrs = 0;
766         dev_info->max_vmdq_pools = 0;
767         dev_info->vmdq_queue_num = 0;
768
769         dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
770
771         dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
772
773
774         dev_info->default_rxconf = (struct rte_eth_rxconf) {
775                 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
776         };
777
778         dev_info->default_txconf = (struct rte_eth_txconf) {
779                 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
780         };
781
782         dev_info->rx_desc_lim = rx_desc_lim;
783         dev_info->tx_desc_lim = tx_desc_lim;
784
785         dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
786         dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
787         dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
788
789         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
790         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
791         dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
792         dev_info->speed_capa |= ETH_LINK_SPEED_5G;
793 }
794
795 static const uint32_t *
796 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
797 {
798         static const uint32_t ptypes[] = {
799                 RTE_PTYPE_L2_ETHER,
800                 RTE_PTYPE_L2_ETHER_ARP,
801                 RTE_PTYPE_L2_ETHER_VLAN,
802                 RTE_PTYPE_L3_IPV4,
803                 RTE_PTYPE_L3_IPV6,
804                 RTE_PTYPE_L4_TCP,
805                 RTE_PTYPE_L4_UDP,
806                 RTE_PTYPE_L4_SCTP,
807                 RTE_PTYPE_L4_ICMP,
808                 RTE_PTYPE_UNKNOWN
809         };
810
811         if (dev->rx_pkt_burst == atl_recv_pkts)
812                 return ptypes;
813
814         return NULL;
815 }
816
817 /* return 0 means link status changed, -1 means not changed */
818 static int
819 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
820 {
821         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822         struct atl_interrupt *intr =
823                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
824         struct rte_eth_link link, old;
825         int err = 0;
826
827         link.link_status = ETH_LINK_DOWN;
828         link.link_speed = 0;
829         link.link_duplex = ETH_LINK_FULL_DUPLEX;
830         link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
831         memset(&old, 0, sizeof(old));
832
833         /* load old link status */
834         rte_eth_linkstatus_get(dev, &old);
835
836         /* read current link status */
837         err = hw->aq_fw_ops->update_link_status(hw);
838
839         if (err)
840                 return 0;
841
842         if (hw->aq_link_status.mbps == 0) {
843                 /* write default (down) link status */
844                 rte_eth_linkstatus_set(dev, &link);
845                 if (link.link_status == old.link_status)
846                         return -1;
847                 return 0;
848         }
849
850         intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
851
852         link.link_status = ETH_LINK_UP;
853         link.link_duplex = ETH_LINK_FULL_DUPLEX;
854         link.link_speed = hw->aq_link_status.mbps;
855
856         rte_eth_linkstatus_set(dev, &link);
857
858         if (link.link_status == old.link_status)
859                 return -1;
860
861         return 0;
862 }
863
864 static void
865 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
866 {
867         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868
869         hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
870 }
871
872 static void
873 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
874 {
875         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
876
877         hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
878 }
879
880 static void
881 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
882 {
883         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
884
885         hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
886 }
887
888 static void
889 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
890 {
891         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
892
893         if (dev->data->promiscuous == 1)
894                 return; /* must remain in all_multicast mode */
895
896         hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
897 }
898
899 /**
900  * It clears the interrupt causes and enables the interrupt.
901  * It will be called once only during nic initialized.
902  *
903  * @param dev
904  *  Pointer to struct rte_eth_dev.
905  * @param on
906  *  Enable or Disable.
907  *
908  * @return
909  *  - On success, zero.
910  *  - On failure, a negative value.
911  */
912
913 static int
914 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
915 {
916         atl_dev_link_status_print(dev);
917         return 0;
918 }
919
920 static int
921 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
922 {
923         return 0;
924 }
925
926
927 static int
928 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
929 {
930         struct atl_interrupt *intr =
931                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
932         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933         u64 cause = 0;
934
935         hw_atl_b0_hw_irq_read(hw, &cause);
936
937         atl_disable_intr(hw);
938         intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
939                         ATL_FLAG_NEED_LINK_UPDATE : 0;
940
941         return 0;
942 }
943
944 /**
945  * It gets and then prints the link status.
946  *
947  * @param dev
948  *  Pointer to struct rte_eth_dev.
949  *
950  * @return
951  *  - On success, zero.
952  *  - On failure, a negative value.
953  */
954 static void
955 atl_dev_link_status_print(struct rte_eth_dev *dev)
956 {
957         struct rte_eth_link link;
958
959         memset(&link, 0, sizeof(link));
960         rte_eth_linkstatus_get(dev, &link);
961         if (link.link_status) {
962                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
963                                         (int)(dev->data->port_id),
964                                         (unsigned int)link.link_speed,
965                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
966                                         "full-duplex" : "half-duplex");
967         } else {
968                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
969                                 (int)(dev->data->port_id));
970         }
971
972
973 #ifdef DEBUG
974 {
975         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
976
977         PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
978                                 pci_dev->addr.domain,
979                                 pci_dev->addr.bus,
980                                 pci_dev->addr.devid,
981                                 pci_dev->addr.function);
982 }
983 #endif
984
985         PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
986 }
987
988 /*
989  * It executes link_update after knowing an interrupt occurred.
990  *
991  * @param dev
992  *  Pointer to struct rte_eth_dev.
993  *
994  * @return
995  *  - On success, zero.
996  *  - On failure, a negative value.
997  */
998 static int
999 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1000                            struct rte_intr_handle *intr_handle)
1001 {
1002         struct atl_interrupt *intr =
1003                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1004
1005         if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1006                 atl_dev_link_update(dev, 0);
1007                 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1008                 atl_dev_link_status_print(dev);
1009                 _rte_eth_dev_callback_process(dev,
1010                         RTE_ETH_EVENT_INTR_LSC, NULL);
1011         }
1012
1013         atl_enable_intr(dev);
1014         rte_intr_enable(intr_handle);
1015
1016         return 0;
1017 }
1018
1019 /**
1020  * Interrupt handler triggered by NIC  for handling
1021  * specific interrupt.
1022  *
1023  * @param handle
1024  *  Pointer to interrupt handle.
1025  * @param param
1026  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1027  *
1028  * @return
1029  *  void
1030  */
1031 static void
1032 atl_dev_interrupt_handler(void *param)
1033 {
1034         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1035
1036         atl_dev_interrupt_get_status(dev);
1037         atl_dev_interrupt_action(dev, dev->intr_handle);
1038 }
1039
1040
1041 static int
1042 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1043 {
1044         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1045
1046         if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1047                 fc_conf->mode = RTE_FC_NONE;
1048         else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1049                 fc_conf->mode = RTE_FC_FULL;
1050         else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1051                 fc_conf->mode = RTE_FC_RX_PAUSE;
1052         else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1053                 fc_conf->mode = RTE_FC_TX_PAUSE;
1054
1055         return 0;
1056 }
1057
1058 static int
1059 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1060 {
1061         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062         uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1063
1064
1065         if (hw->aq_fw_ops->set_flow_control == NULL)
1066                 return -ENOTSUP;
1067
1068         if (fc_conf->mode == RTE_FC_NONE)
1069                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1070         else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1071                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1072         else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1073                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1074         else if (fc_conf->mode == RTE_FC_FULL)
1075                 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1076
1077         if (old_flow_control != hw->aq_nic_cfg->flow_control)
1078                 return hw->aq_fw_ops->set_flow_control(hw);
1079
1080         return 0;
1081 }
1082
1083 static int
1084 atl_reta_update(struct rte_eth_dev *dev,
1085                    struct rte_eth_rss_reta_entry64 *reta_conf,
1086                    uint16_t reta_size)
1087 {
1088         int i;
1089         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1091
1092         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1093                 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1094                                         dev->data->nb_rx_queues - 1);
1095
1096         hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1097         return 0;
1098 }
1099
1100 static int
1101 atl_reta_query(struct rte_eth_dev *dev,
1102                     struct rte_eth_rss_reta_entry64 *reta_conf,
1103                     uint16_t reta_size)
1104 {
1105         int i;
1106         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1107
1108         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1109                 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1110         reta_conf->mask = ~0U;
1111         return 0;
1112 }
1113
1114 static int
1115 atl_rss_hash_update(struct rte_eth_dev *dev,
1116                                  struct rte_eth_rss_conf *rss_conf)
1117 {
1118         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1119         struct aq_hw_cfg_s *cfg =
1120                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1121         static u8 def_rss_key[40] = {
1122                 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1123                 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1124                 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1125                 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1126                 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1127         };
1128
1129         cfg->is_rss = !!rss_conf->rss_hf;
1130         if (rss_conf->rss_key) {
1131                 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1132                        rss_conf->rss_key_len);
1133                 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1134         } else {
1135                 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1136                        sizeof(def_rss_key));
1137                 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1138         }
1139
1140         hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1141         hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1142         return 0;
1143 }
1144
1145 static int
1146 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1147                                  struct rte_eth_rss_conf *rss_conf)
1148 {
1149         struct aq_hw_cfg_s *cfg =
1150                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1151
1152         rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1153         if (rss_conf->rss_key) {
1154                 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1155                 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1156                        rss_conf->rss_key_len);
1157         }
1158
1159         return 0;
1160 }
1161
1162 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1163 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1164 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1165
1166 RTE_INIT(atl_init_log)
1167 {
1168         atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1169         if (atl_logtype_init >= 0)
1170                 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1171         atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1172         if (atl_logtype_driver >= 0)
1173                 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1174 }
1175