1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
27 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
28 struct rte_eth_xstat_name *xstats_names,
31 static int atl_dev_stats_get(struct rte_eth_dev *dev,
32 struct rte_eth_stats *stats);
34 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
35 struct rte_eth_xstat *stats, unsigned int n);
37 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
39 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
42 static void atl_dev_info_get(struct rte_eth_dev *dev,
43 struct rte_eth_dev_info *dev_info);
45 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
50 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
51 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
52 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
53 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
54 struct rte_intr_handle *handle);
55 static void atl_dev_interrupt_handler(void *param);
57 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
58 struct rte_pci_device *pci_dev);
59 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
61 static void atl_dev_info_get(struct rte_eth_dev *dev,
62 struct rte_eth_dev_info *dev_info);
65 int atl_logtype_driver;
68 * The set of PCI devices this driver supports
70 static const struct rte_pci_id pci_id_atl_map[] = {
71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
93 { .vendor_id = 0, /* sentinel */ },
96 static struct rte_pci_driver rte_atl_pmd = {
97 .id_table = pci_id_atl_map,
98 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
99 RTE_PCI_DRV_IOVA_AS_VA,
100 .probe = eth_atl_pci_probe,
101 .remove = eth_atl_pci_remove,
104 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
105 | DEV_RX_OFFLOAD_IPV4_CKSUM \
106 | DEV_RX_OFFLOAD_UDP_CKSUM \
107 | DEV_RX_OFFLOAD_TCP_CKSUM \
108 | DEV_RX_OFFLOAD_JUMBO_FRAME)
110 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
111 | DEV_TX_OFFLOAD_IPV4_CKSUM \
112 | DEV_TX_OFFLOAD_UDP_CKSUM \
113 | DEV_TX_OFFLOAD_TCP_CKSUM \
114 | DEV_TX_OFFLOAD_TCP_TSO \
115 | DEV_TX_OFFLOAD_MULTI_SEGS)
117 static const struct rte_eth_desc_lim rx_desc_lim = {
118 .nb_max = ATL_MAX_RING_DESC,
119 .nb_min = ATL_MIN_RING_DESC,
120 .nb_align = ATL_RXD_ALIGN,
123 static const struct rte_eth_desc_lim tx_desc_lim = {
124 .nb_max = ATL_MAX_RING_DESC,
125 .nb_min = ATL_MIN_RING_DESC,
126 .nb_align = ATL_TXD_ALIGN,
127 .nb_seg_max = ATL_TX_MAX_SEG,
128 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
131 #define ATL_XSTATS_FIELD(name) { \
133 offsetof(struct aq_stats_s, name) \
136 struct atl_xstats_tbl_s {
141 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
142 ATL_XSTATS_FIELD(uprc),
143 ATL_XSTATS_FIELD(mprc),
144 ATL_XSTATS_FIELD(bprc),
145 ATL_XSTATS_FIELD(erpt),
146 ATL_XSTATS_FIELD(uptc),
147 ATL_XSTATS_FIELD(mptc),
148 ATL_XSTATS_FIELD(bptc),
149 ATL_XSTATS_FIELD(erpr),
150 ATL_XSTATS_FIELD(ubrc),
151 ATL_XSTATS_FIELD(ubtc),
152 ATL_XSTATS_FIELD(mbrc),
153 ATL_XSTATS_FIELD(mbtc),
154 ATL_XSTATS_FIELD(bbrc),
155 ATL_XSTATS_FIELD(bbtc),
158 static const struct eth_dev_ops atl_eth_dev_ops = {
159 .dev_configure = atl_dev_configure,
160 .dev_start = atl_dev_start,
161 .dev_stop = atl_dev_stop,
162 .dev_set_link_up = atl_dev_set_link_up,
163 .dev_set_link_down = atl_dev_set_link_down,
164 .dev_close = atl_dev_close,
165 .dev_reset = atl_dev_reset,
168 .link_update = atl_dev_link_update,
171 .stats_get = atl_dev_stats_get,
172 .xstats_get = atl_dev_xstats_get,
173 .xstats_get_names = atl_dev_xstats_get_names,
174 .stats_reset = atl_dev_stats_reset,
175 .xstats_reset = atl_dev_stats_reset,
177 .fw_version_get = atl_fw_version_get,
178 .dev_infos_get = atl_dev_info_get,
179 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
182 .rx_queue_start = atl_rx_queue_start,
183 .rx_queue_stop = atl_rx_queue_stop,
184 .rx_queue_setup = atl_rx_queue_setup,
185 .rx_queue_release = atl_rx_queue_release,
187 .tx_queue_start = atl_tx_queue_start,
188 .tx_queue_stop = atl_tx_queue_stop,
189 .tx_queue_setup = atl_tx_queue_setup,
190 .tx_queue_release = atl_tx_queue_release,
192 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
193 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
195 .rx_queue_count = atl_rx_queue_count,
196 .rx_descriptor_status = atl_dev_rx_descriptor_status,
197 .tx_descriptor_status = atl_dev_tx_descriptor_status,
199 .rxq_info_get = atl_rxq_info_get,
200 .txq_info_get = atl_txq_info_get,
203 static inline int32_t
204 atl_reset_hw(struct aq_hw_s *hw)
206 return hw_atl_b0_hw_reset(hw);
210 atl_enable_intr(struct rte_eth_dev *dev)
212 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
214 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
218 atl_disable_intr(struct aq_hw_s *hw)
220 PMD_INIT_FUNC_TRACE();
221 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
225 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
227 struct atl_adapter *adapter =
228 (struct atl_adapter *)eth_dev->data->dev_private;
229 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
230 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
231 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
234 PMD_INIT_FUNC_TRACE();
236 eth_dev->dev_ops = &atl_eth_dev_ops;
237 eth_dev->rx_pkt_burst = &atl_recv_pkts;
238 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
239 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
241 /* For secondary processes, the primary process has done all the work */
242 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
245 /* Vendor and Device ID need to be set before init of shared code */
246 hw->device_id = pci_dev->id.device_id;
247 hw->vendor_id = pci_dev->id.vendor_id;
248 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
250 /* Hardware configuration - hardcode */
251 adapter->hw_cfg.is_lro = false;
252 adapter->hw_cfg.wol = false;
253 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
259 hw->aq_nic_cfg = &adapter->hw_cfg;
261 /* disable interrupt */
262 atl_disable_intr(hw);
264 /* Allocate memory for storing MAC addresses */
265 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
266 if (eth_dev->data->mac_addrs == NULL) {
267 PMD_INIT_LOG(ERR, "MAC Malloc failed");
271 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
275 /* Copy the permanent MAC address */
276 if (hw->aq_fw_ops->get_mac_permanent(hw,
277 eth_dev->data->mac_addrs->addr_bytes) != 0)
280 /* Reset the hw statistics */
281 atl_dev_stats_reset(eth_dev);
283 rte_intr_callback_register(intr_handle,
284 atl_dev_interrupt_handler, eth_dev);
286 /* enable uio/vfio intr/eventfd mapping */
287 rte_intr_enable(intr_handle);
289 /* enable support intr */
290 atl_enable_intr(eth_dev);
296 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
298 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
299 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
302 PMD_INIT_FUNC_TRACE();
304 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
307 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
309 if (hw->adapter_stopped == 0)
310 atl_dev_close(eth_dev);
312 eth_dev->dev_ops = NULL;
313 eth_dev->rx_pkt_burst = NULL;
314 eth_dev->tx_pkt_burst = NULL;
316 /* disable uio intr before callback unregister */
317 rte_intr_disable(intr_handle);
318 rte_intr_callback_unregister(intr_handle,
319 atl_dev_interrupt_handler, eth_dev);
321 rte_free(eth_dev->data->mac_addrs);
322 eth_dev->data->mac_addrs = NULL;
328 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
329 struct rte_pci_device *pci_dev)
331 return rte_eth_dev_pci_generic_probe(pci_dev,
332 sizeof(struct atl_adapter), eth_atl_dev_init);
336 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
338 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
342 atl_dev_configure(struct rte_eth_dev *dev)
344 struct atl_interrupt *intr =
345 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
347 PMD_INIT_FUNC_TRACE();
349 /* set flag to update link status after init */
350 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
356 * Configure device link speed and setup link.
357 * It returns 0 on success.
360 atl_dev_start(struct rte_eth_dev *dev)
362 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
363 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
364 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
365 uint32_t intr_vector = 0;
366 uint32_t *link_speeds;
371 PMD_INIT_FUNC_TRACE();
373 /* set adapter started */
374 hw->adapter_stopped = 0;
376 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
378 "Invalid link_speeds for port %u, fix speed not supported",
383 /* disable uio/vfio intr/eventfd mapping */
384 rte_intr_disable(intr_handle);
386 /* reinitialize adapter
387 * this calls reset and start
389 status = atl_reset_hw(hw);
393 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
395 hw_atl_b0_hw_start(hw);
396 /* check and configure queue intr-vector mapping */
397 if ((rte_intr_cap_multiple(intr_handle) ||
398 !RTE_ETH_DEV_SRIOV(dev).active) &&
399 dev->data->dev_conf.intr_conf.rxq != 0) {
400 intr_vector = dev->data->nb_rx_queues;
401 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
402 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
403 ATL_MAX_INTR_QUEUE_NUM);
406 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
407 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
412 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
413 intr_handle->intr_vec = rte_zmalloc("intr_vec",
414 dev->data->nb_rx_queues * sizeof(int), 0);
415 if (intr_handle->intr_vec == NULL) {
416 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
417 " intr_vec", dev->data->nb_rx_queues);
422 /* initialize transmission unit */
425 /* This can fail when allocating mbufs for descriptor rings */
426 err = atl_rx_init(dev);
428 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
432 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
433 hw->fw_ver_actual >> 24,
434 (hw->fw_ver_actual >> 16) & 0xFF,
435 hw->fw_ver_actual & 0xFFFF);
436 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
438 err = atl_start_queues(dev);
440 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
444 err = hw->aq_fw_ops->update_link_status(hw);
449 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
451 link_speeds = &dev->data->dev_conf.link_speeds;
455 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
456 speed = hw->aq_nic_cfg->link_speed_msk;
458 if (*link_speeds & ETH_LINK_SPEED_10G)
459 speed |= AQ_NIC_RATE_10G;
460 if (*link_speeds & ETH_LINK_SPEED_5G)
461 speed |= AQ_NIC_RATE_5G;
462 if (*link_speeds & ETH_LINK_SPEED_1G)
463 speed |= AQ_NIC_RATE_1G;
464 if (*link_speeds & ETH_LINK_SPEED_2_5G)
465 speed |= AQ_NIC_RATE_2G5;
466 if (*link_speeds & ETH_LINK_SPEED_100M)
467 speed |= AQ_NIC_RATE_100M;
470 err = hw->aq_fw_ops->set_link_speed(hw, speed);
474 if (rte_intr_allow_others(intr_handle)) {
475 /* check if lsc interrupt is enabled */
476 if (dev->data->dev_conf.intr_conf.lsc != 0)
477 atl_dev_lsc_interrupt_setup(dev, true);
479 atl_dev_lsc_interrupt_setup(dev, false);
481 rte_intr_callback_unregister(intr_handle,
482 atl_dev_interrupt_handler, dev);
483 if (dev->data->dev_conf.intr_conf.lsc != 0)
484 PMD_INIT_LOG(INFO, "lsc won't enable because of"
485 " no intr multiplex");
488 /* check if rxq interrupt is enabled */
489 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
490 rte_intr_dp_is_en(intr_handle))
491 atl_dev_rxq_interrupt_setup(dev);
493 /* enable uio/vfio intr/eventfd mapping */
494 rte_intr_enable(intr_handle);
496 /* resume enabled intr since hw reset */
497 atl_enable_intr(dev);
502 atl_stop_queues(dev);
507 * Stop device: disable rx and tx functions to allow for reconfiguring.
510 atl_dev_stop(struct rte_eth_dev *dev)
512 struct rte_eth_link link;
514 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
518 PMD_INIT_FUNC_TRACE();
520 /* disable interrupts */
521 atl_disable_intr(hw);
525 hw->adapter_stopped = 1;
527 atl_stop_queues(dev);
529 /* Clear stored conf */
530 dev->data->scattered_rx = 0;
533 /* Clear recorded link status */
534 memset(&link, 0, sizeof(link));
535 rte_eth_linkstatus_set(dev, &link);
537 if (!rte_intr_allow_others(intr_handle))
538 /* resume to the default handler */
539 rte_intr_callback_register(intr_handle,
540 atl_dev_interrupt_handler,
543 /* Clean datapath event and queue/vec mapping */
544 rte_intr_efd_disable(intr_handle);
545 if (intr_handle->intr_vec != NULL) {
546 rte_free(intr_handle->intr_vec);
547 intr_handle->intr_vec = NULL;
552 * Set device link up: enable tx.
555 atl_dev_set_link_up(struct rte_eth_dev *dev)
557 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
559 return hw->aq_fw_ops->set_link_speed(hw,
560 hw->aq_nic_cfg->link_speed_msk);
564 * Set device link down: disable tx.
567 atl_dev_set_link_down(struct rte_eth_dev *dev)
569 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571 return hw->aq_fw_ops->set_link_speed(hw, 0);
575 * Reset and stop device.
578 atl_dev_close(struct rte_eth_dev *dev)
580 PMD_INIT_FUNC_TRACE();
584 atl_free_queues(dev);
588 atl_dev_reset(struct rte_eth_dev *dev)
592 ret = eth_atl_dev_uninit(dev);
596 ret = eth_atl_dev_init(dev);
603 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
605 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
606 struct aq_hw_s *hw = &adapter->hw;
607 struct atl_sw_stats *swstats = &adapter->sw_stats;
610 hw->aq_fw_ops->update_stats(hw);
612 /* Fill out the rte_eth_stats statistics structure */
613 stats->ipackets = hw->curr_stats.dma_pkt_rc;
614 stats->ibytes = hw->curr_stats.dma_oct_rc;
615 stats->imissed = hw->curr_stats.dpc;
616 stats->ierrors = hw->curr_stats.erpt;
618 stats->opackets = hw->curr_stats.dma_pkt_tc;
619 stats->obytes = hw->curr_stats.dma_oct_tc;
622 stats->rx_nombuf = swstats->rx_nombuf;
624 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
625 stats->q_ipackets[i] = swstats->q_ipackets[i];
626 stats->q_opackets[i] = swstats->q_opackets[i];
627 stats->q_ibytes[i] = swstats->q_ibytes[i];
628 stats->q_obytes[i] = swstats->q_obytes[i];
629 stats->q_errors[i] = swstats->q_errors[i];
635 atl_dev_stats_reset(struct rte_eth_dev *dev)
637 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
638 struct aq_hw_s *hw = &adapter->hw;
640 hw->aq_fw_ops->update_stats(hw);
642 /* Reset software totals */
643 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
645 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
649 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
650 struct rte_eth_xstat_name *xstats_names,
656 return RTE_DIM(atl_xstats_tbl);
658 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
659 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
660 atl_xstats_tbl[i].name);
666 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
669 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
670 struct aq_hw_s *hw = &adapter->hw;
676 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
678 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
679 atl_xstats_tbl[i].offset);
686 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
688 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 unsigned int ret = 0;
692 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
696 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
697 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
699 ret += 1; /* add string null-terminator */
708 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
710 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
712 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
713 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
715 dev_info->min_rx_bufsize = 1024;
716 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
717 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
718 dev_info->max_vfs = pci_dev->max_vfs;
720 dev_info->max_hash_mac_addrs = 0;
721 dev_info->max_vmdq_pools = 0;
722 dev_info->vmdq_queue_num = 0;
724 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
726 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
729 dev_info->default_rxconf = (struct rte_eth_rxconf) {
730 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
733 dev_info->default_txconf = (struct rte_eth_txconf) {
734 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
737 dev_info->rx_desc_lim = rx_desc_lim;
738 dev_info->tx_desc_lim = tx_desc_lim;
740 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
741 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
742 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
743 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
746 static const uint32_t *
747 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
749 static const uint32_t ptypes[] = {
751 RTE_PTYPE_L2_ETHER_ARP,
752 RTE_PTYPE_L2_ETHER_VLAN,
762 if (dev->rx_pkt_burst == atl_recv_pkts)
768 /* return 0 means link status changed, -1 means not changed */
770 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
772 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
773 struct atl_interrupt *intr =
774 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
775 struct rte_eth_link link, old;
778 link.link_status = ETH_LINK_DOWN;
780 link.link_duplex = ETH_LINK_FULL_DUPLEX;
781 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
782 memset(&old, 0, sizeof(old));
784 /* load old link status */
785 rte_eth_linkstatus_get(dev, &old);
787 /* read current link status */
788 err = hw->aq_fw_ops->update_link_status(hw);
793 if (hw->aq_link_status.mbps == 0) {
794 /* write default (down) link status */
795 rte_eth_linkstatus_set(dev, &link);
796 if (link.link_status == old.link_status)
801 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
803 link.link_status = ETH_LINK_UP;
804 link.link_duplex = ETH_LINK_FULL_DUPLEX;
805 link.link_speed = hw->aq_link_status.mbps;
807 rte_eth_linkstatus_set(dev, &link);
809 if (link.link_status == old.link_status)
817 * It clears the interrupt causes and enables the interrupt.
818 * It will be called once only during nic initialized.
821 * Pointer to struct rte_eth_dev.
826 * - On success, zero.
827 * - On failure, a negative value.
831 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
833 atl_dev_link_status_print(dev);
838 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
845 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
847 struct atl_interrupt *intr =
848 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
849 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852 hw_atl_b0_hw_irq_read(hw, &cause);
854 atl_disable_intr(hw);
855 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
856 ATL_FLAG_NEED_LINK_UPDATE : 0;
862 * It gets and then prints the link status.
865 * Pointer to struct rte_eth_dev.
868 * - On success, zero.
869 * - On failure, a negative value.
872 atl_dev_link_status_print(struct rte_eth_dev *dev)
874 struct rte_eth_link link;
876 memset(&link, 0, sizeof(link));
877 rte_eth_linkstatus_get(dev, &link);
878 if (link.link_status) {
879 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
880 (int)(dev->data->port_id),
881 (unsigned int)link.link_speed,
882 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
883 "full-duplex" : "half-duplex");
885 PMD_DRV_LOG(INFO, " Port %d: Link Down",
886 (int)(dev->data->port_id));
892 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
894 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
895 pci_dev->addr.domain,
898 pci_dev->addr.function);
902 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
906 * It executes link_update after knowing an interrupt occurred.
909 * Pointer to struct rte_eth_dev.
912 * - On success, zero.
913 * - On failure, a negative value.
916 atl_dev_interrupt_action(struct rte_eth_dev *dev,
917 struct rte_intr_handle *intr_handle)
919 struct atl_interrupt *intr =
920 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
922 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
923 atl_dev_link_update(dev, 0);
924 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
925 atl_dev_link_status_print(dev);
926 _rte_eth_dev_callback_process(dev,
927 RTE_ETH_EVENT_INTR_LSC, NULL);
930 atl_enable_intr(dev);
931 rte_intr_enable(intr_handle);
937 * Interrupt handler triggered by NIC for handling
938 * specific interrupt.
941 * Pointer to interrupt handle.
943 * The address of parameter (struct rte_eth_dev *) regsitered before.
949 atl_dev_interrupt_handler(void *param)
951 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
953 atl_dev_interrupt_get_status(dev);
954 atl_dev_interrupt_action(dev, dev->intr_handle);
957 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
958 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
959 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
961 RTE_INIT(atl_init_log)
963 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
964 if (atl_logtype_init >= 0)
965 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
966 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
967 if (atl_logtype_driver >= 0)
968 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);