1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
27 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
30 static void atl_dev_info_get(struct rte_eth_dev *dev,
31 struct rte_eth_dev_info *dev_info);
33 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
35 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
38 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
39 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
40 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
41 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
42 struct rte_intr_handle *handle);
43 static void atl_dev_interrupt_handler(void *param);
45 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
46 struct rte_pci_device *pci_dev);
47 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
49 static void atl_dev_info_get(struct rte_eth_dev *dev,
50 struct rte_eth_dev_info *dev_info);
53 int atl_logtype_driver;
56 * The set of PCI devices this driver supports
58 static const struct rte_pci_id pci_id_atl_map[] = {
59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
81 { .vendor_id = 0, /* sentinel */ },
84 static struct rte_pci_driver rte_atl_pmd = {
85 .id_table = pci_id_atl_map,
86 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
87 RTE_PCI_DRV_IOVA_AS_VA,
88 .probe = eth_atl_pci_probe,
89 .remove = eth_atl_pci_remove,
92 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
93 | DEV_RX_OFFLOAD_IPV4_CKSUM \
94 | DEV_RX_OFFLOAD_UDP_CKSUM \
95 | DEV_RX_OFFLOAD_TCP_CKSUM \
96 | DEV_RX_OFFLOAD_JUMBO_FRAME)
98 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
99 | DEV_TX_OFFLOAD_IPV4_CKSUM \
100 | DEV_TX_OFFLOAD_UDP_CKSUM \
101 | DEV_TX_OFFLOAD_TCP_CKSUM \
102 | DEV_TX_OFFLOAD_TCP_TSO \
103 | DEV_TX_OFFLOAD_MULTI_SEGS)
105 static const struct rte_eth_desc_lim rx_desc_lim = {
106 .nb_max = ATL_MAX_RING_DESC,
107 .nb_min = ATL_MIN_RING_DESC,
108 .nb_align = ATL_RXD_ALIGN,
111 static const struct rte_eth_desc_lim tx_desc_lim = {
112 .nb_max = ATL_MAX_RING_DESC,
113 .nb_min = ATL_MIN_RING_DESC,
114 .nb_align = ATL_TXD_ALIGN,
115 .nb_seg_max = ATL_TX_MAX_SEG,
116 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
119 static const struct eth_dev_ops atl_eth_dev_ops = {
120 .dev_configure = atl_dev_configure,
121 .dev_start = atl_dev_start,
122 .dev_stop = atl_dev_stop,
123 .dev_set_link_up = atl_dev_set_link_up,
124 .dev_set_link_down = atl_dev_set_link_down,
125 .dev_close = atl_dev_close,
126 .dev_reset = atl_dev_reset,
129 .link_update = atl_dev_link_update,
131 .fw_version_get = atl_fw_version_get,
132 .dev_infos_get = atl_dev_info_get,
133 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
136 .rx_queue_start = atl_rx_queue_start,
137 .rx_queue_stop = atl_rx_queue_stop,
138 .rx_queue_setup = atl_rx_queue_setup,
139 .rx_queue_release = atl_rx_queue_release,
141 .tx_queue_start = atl_tx_queue_start,
142 .tx_queue_stop = atl_tx_queue_stop,
143 .tx_queue_setup = atl_tx_queue_setup,
144 .tx_queue_release = atl_tx_queue_release,
146 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
147 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
150 static inline int32_t
151 atl_reset_hw(struct aq_hw_s *hw)
153 return hw_atl_b0_hw_reset(hw);
157 atl_enable_intr(struct rte_eth_dev *dev)
159 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
161 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
165 atl_disable_intr(struct aq_hw_s *hw)
167 PMD_INIT_FUNC_TRACE();
168 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
172 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
174 struct atl_adapter *adapter =
175 (struct atl_adapter *)eth_dev->data->dev_private;
176 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
177 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
178 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
181 PMD_INIT_FUNC_TRACE();
183 eth_dev->dev_ops = &atl_eth_dev_ops;
184 eth_dev->rx_pkt_burst = &atl_recv_pkts;
185 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
186 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
188 /* For secondary processes, the primary process has done all the work */
189 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
192 /* Vendor and Device ID need to be set before init of shared code */
193 hw->device_id = pci_dev->id.device_id;
194 hw->vendor_id = pci_dev->id.vendor_id;
195 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
197 /* Hardware configuration - hardcode */
198 adapter->hw_cfg.is_lro = false;
199 adapter->hw_cfg.wol = false;
200 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
206 hw->aq_nic_cfg = &adapter->hw_cfg;
208 /* disable interrupt */
209 atl_disable_intr(hw);
211 /* Allocate memory for storing MAC addresses */
212 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
213 if (eth_dev->data->mac_addrs == NULL) {
214 PMD_INIT_LOG(ERR, "MAC Malloc failed");
218 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
222 /* Copy the permanent MAC address */
223 if (hw->aq_fw_ops->get_mac_permanent(hw,
224 eth_dev->data->mac_addrs->addr_bytes) != 0)
227 rte_intr_callback_register(intr_handle,
228 atl_dev_interrupt_handler, eth_dev);
230 /* enable uio/vfio intr/eventfd mapping */
231 rte_intr_enable(intr_handle);
233 /* enable support intr */
234 atl_enable_intr(eth_dev);
240 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
242 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
243 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
246 PMD_INIT_FUNC_TRACE();
248 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
251 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
253 if (hw->adapter_stopped == 0)
254 atl_dev_close(eth_dev);
256 eth_dev->dev_ops = NULL;
257 eth_dev->rx_pkt_burst = NULL;
258 eth_dev->tx_pkt_burst = NULL;
260 /* disable uio intr before callback unregister */
261 rte_intr_disable(intr_handle);
262 rte_intr_callback_unregister(intr_handle,
263 atl_dev_interrupt_handler, eth_dev);
265 rte_free(eth_dev->data->mac_addrs);
266 eth_dev->data->mac_addrs = NULL;
272 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
273 struct rte_pci_device *pci_dev)
275 return rte_eth_dev_pci_generic_probe(pci_dev,
276 sizeof(struct atl_adapter), eth_atl_dev_init);
280 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
282 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
286 atl_dev_configure(struct rte_eth_dev *dev)
288 struct atl_interrupt *intr =
289 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
291 PMD_INIT_FUNC_TRACE();
293 /* set flag to update link status after init */
294 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
300 * Configure device link speed and setup link.
301 * It returns 0 on success.
304 atl_dev_start(struct rte_eth_dev *dev)
306 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
307 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
308 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
309 uint32_t intr_vector = 0;
310 uint32_t *link_speeds;
315 PMD_INIT_FUNC_TRACE();
317 /* set adapter started */
318 hw->adapter_stopped = 0;
320 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
322 "Invalid link_speeds for port %u, fix speed not supported",
327 /* disable uio/vfio intr/eventfd mapping */
328 rte_intr_disable(intr_handle);
330 /* reinitialize adapter
331 * this calls reset and start
333 status = atl_reset_hw(hw);
337 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
339 hw_atl_b0_hw_start(hw);
340 /* check and configure queue intr-vector mapping */
341 if ((rte_intr_cap_multiple(intr_handle) ||
342 !RTE_ETH_DEV_SRIOV(dev).active) &&
343 dev->data->dev_conf.intr_conf.rxq != 0) {
344 intr_vector = dev->data->nb_rx_queues;
345 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
346 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
347 ATL_MAX_INTR_QUEUE_NUM);
350 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
351 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
356 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
357 intr_handle->intr_vec = rte_zmalloc("intr_vec",
358 dev->data->nb_rx_queues * sizeof(int), 0);
359 if (intr_handle->intr_vec == NULL) {
360 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
361 " intr_vec", dev->data->nb_rx_queues);
366 /* initialize transmission unit */
369 /* This can fail when allocating mbufs for descriptor rings */
370 err = atl_rx_init(dev);
372 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
376 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
377 hw->fw_ver_actual >> 24,
378 (hw->fw_ver_actual >> 16) & 0xFF,
379 hw->fw_ver_actual & 0xFFFF);
380 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
382 err = atl_start_queues(dev);
384 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
388 err = hw->aq_fw_ops->update_link_status(hw);
393 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
395 link_speeds = &dev->data->dev_conf.link_speeds;
399 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
400 speed = hw->aq_nic_cfg->link_speed_msk;
402 if (*link_speeds & ETH_LINK_SPEED_10G)
403 speed |= AQ_NIC_RATE_10G;
404 if (*link_speeds & ETH_LINK_SPEED_5G)
405 speed |= AQ_NIC_RATE_5G;
406 if (*link_speeds & ETH_LINK_SPEED_1G)
407 speed |= AQ_NIC_RATE_1G;
408 if (*link_speeds & ETH_LINK_SPEED_2_5G)
409 speed |= AQ_NIC_RATE_2G5;
410 if (*link_speeds & ETH_LINK_SPEED_100M)
411 speed |= AQ_NIC_RATE_100M;
414 err = hw->aq_fw_ops->set_link_speed(hw, speed);
418 if (rte_intr_allow_others(intr_handle)) {
419 /* check if lsc interrupt is enabled */
420 if (dev->data->dev_conf.intr_conf.lsc != 0)
421 atl_dev_lsc_interrupt_setup(dev, true);
423 atl_dev_lsc_interrupt_setup(dev, false);
425 rte_intr_callback_unregister(intr_handle,
426 atl_dev_interrupt_handler, dev);
427 if (dev->data->dev_conf.intr_conf.lsc != 0)
428 PMD_INIT_LOG(INFO, "lsc won't enable because of"
429 " no intr multiplex");
432 /* check if rxq interrupt is enabled */
433 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
434 rte_intr_dp_is_en(intr_handle))
435 atl_dev_rxq_interrupt_setup(dev);
437 /* enable uio/vfio intr/eventfd mapping */
438 rte_intr_enable(intr_handle);
440 /* resume enabled intr since hw reset */
441 atl_enable_intr(dev);
446 atl_stop_queues(dev);
451 * Stop device: disable rx and tx functions to allow for reconfiguring.
454 atl_dev_stop(struct rte_eth_dev *dev)
456 struct rte_eth_link link;
458 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
460 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
462 PMD_INIT_FUNC_TRACE();
464 /* disable interrupts */
465 atl_disable_intr(hw);
469 hw->adapter_stopped = 1;
471 atl_stop_queues(dev);
473 /* Clear stored conf */
474 dev->data->scattered_rx = 0;
477 /* Clear recorded link status */
478 memset(&link, 0, sizeof(link));
479 rte_eth_linkstatus_set(dev, &link);
481 if (!rte_intr_allow_others(intr_handle))
482 /* resume to the default handler */
483 rte_intr_callback_register(intr_handle,
484 atl_dev_interrupt_handler,
487 /* Clean datapath event and queue/vec mapping */
488 rte_intr_efd_disable(intr_handle);
489 if (intr_handle->intr_vec != NULL) {
490 rte_free(intr_handle->intr_vec);
491 intr_handle->intr_vec = NULL;
496 * Set device link up: enable tx.
499 atl_dev_set_link_up(struct rte_eth_dev *dev)
501 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
503 return hw->aq_fw_ops->set_link_speed(hw,
504 hw->aq_nic_cfg->link_speed_msk);
508 * Set device link down: disable tx.
511 atl_dev_set_link_down(struct rte_eth_dev *dev)
513 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 return hw->aq_fw_ops->set_link_speed(hw, 0);
519 * Reset and stop device.
522 atl_dev_close(struct rte_eth_dev *dev)
524 PMD_INIT_FUNC_TRACE();
528 atl_free_queues(dev);
532 atl_dev_reset(struct rte_eth_dev *dev)
536 ret = eth_atl_dev_uninit(dev);
540 ret = eth_atl_dev_init(dev);
546 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
548 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550 unsigned int ret = 0;
552 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
556 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
557 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
559 ret += 1; /* add string null-terminator */
568 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
570 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
572 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
573 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
575 dev_info->min_rx_bufsize = 1024;
576 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
577 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
578 dev_info->max_vfs = pci_dev->max_vfs;
580 dev_info->max_hash_mac_addrs = 0;
581 dev_info->max_vmdq_pools = 0;
582 dev_info->vmdq_queue_num = 0;
584 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
586 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
589 dev_info->default_rxconf = (struct rte_eth_rxconf) {
590 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
593 dev_info->default_txconf = (struct rte_eth_txconf) {
594 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
597 dev_info->rx_desc_lim = rx_desc_lim;
598 dev_info->tx_desc_lim = tx_desc_lim;
600 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
601 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
602 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
603 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
606 static const uint32_t *
607 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
609 static const uint32_t ptypes[] = {
611 RTE_PTYPE_L2_ETHER_ARP,
612 RTE_PTYPE_L2_ETHER_VLAN,
622 if (dev->rx_pkt_burst == atl_recv_pkts)
628 /* return 0 means link status changed, -1 means not changed */
630 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
632 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633 struct atl_interrupt *intr =
634 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
635 struct rte_eth_link link, old;
638 link.link_status = ETH_LINK_DOWN;
640 link.link_duplex = ETH_LINK_FULL_DUPLEX;
641 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
642 memset(&old, 0, sizeof(old));
644 /* load old link status */
645 rte_eth_linkstatus_get(dev, &old);
647 /* read current link status */
648 err = hw->aq_fw_ops->update_link_status(hw);
653 if (hw->aq_link_status.mbps == 0) {
654 /* write default (down) link status */
655 rte_eth_linkstatus_set(dev, &link);
656 if (link.link_status == old.link_status)
661 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
663 link.link_status = ETH_LINK_UP;
664 link.link_duplex = ETH_LINK_FULL_DUPLEX;
665 link.link_speed = hw->aq_link_status.mbps;
667 rte_eth_linkstatus_set(dev, &link);
669 if (link.link_status == old.link_status)
677 * It clears the interrupt causes and enables the interrupt.
678 * It will be called once only during nic initialized.
681 * Pointer to struct rte_eth_dev.
686 * - On success, zero.
687 * - On failure, a negative value.
691 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
693 atl_dev_link_status_print(dev);
698 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
705 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
707 struct atl_interrupt *intr =
708 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
709 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
712 hw_atl_b0_hw_irq_read(hw, &cause);
714 atl_disable_intr(hw);
715 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
716 ATL_FLAG_NEED_LINK_UPDATE : 0;
722 * It gets and then prints the link status.
725 * Pointer to struct rte_eth_dev.
728 * - On success, zero.
729 * - On failure, a negative value.
732 atl_dev_link_status_print(struct rte_eth_dev *dev)
734 struct rte_eth_link link;
736 memset(&link, 0, sizeof(link));
737 rte_eth_linkstatus_get(dev, &link);
738 if (link.link_status) {
739 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
740 (int)(dev->data->port_id),
741 (unsigned int)link.link_speed,
742 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
743 "full-duplex" : "half-duplex");
745 PMD_DRV_LOG(INFO, " Port %d: Link Down",
746 (int)(dev->data->port_id));
752 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
754 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
755 pci_dev->addr.domain,
758 pci_dev->addr.function);
762 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
766 * It executes link_update after knowing an interrupt occurred.
769 * Pointer to struct rte_eth_dev.
772 * - On success, zero.
773 * - On failure, a negative value.
776 atl_dev_interrupt_action(struct rte_eth_dev *dev,
777 struct rte_intr_handle *intr_handle)
779 struct atl_interrupt *intr =
780 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
782 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
783 atl_dev_link_update(dev, 0);
784 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
785 atl_dev_link_status_print(dev);
786 _rte_eth_dev_callback_process(dev,
787 RTE_ETH_EVENT_INTR_LSC, NULL);
790 atl_enable_intr(dev);
791 rte_intr_enable(intr_handle);
797 * Interrupt handler triggered by NIC for handling
798 * specific interrupt.
801 * Pointer to interrupt handle.
803 * The address of parameter (struct rte_eth_dev *) regsitered before.
809 atl_dev_interrupt_handler(void *param)
811 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
813 atl_dev_interrupt_get_status(dev);
814 atl_dev_interrupt_action(dev, dev->intr_handle);
817 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
818 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
819 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
821 RTE_INIT(atl_init_log)
823 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
824 if (atl_logtype_init >= 0)
825 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
826 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
827 if (atl_logtype_driver >= 0)
828 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);