1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
27 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
28 struct rte_eth_xstat_name *xstats_names,
31 static int atl_dev_stats_get(struct rte_eth_dev *dev,
32 struct rte_eth_stats *stats);
34 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
35 struct rte_eth_xstat *stats, unsigned int n);
37 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
39 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
42 static void atl_dev_info_get(struct rte_eth_dev *dev,
43 struct rte_eth_dev_info *dev_info);
45 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
50 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
51 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
52 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
53 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
54 struct rte_intr_handle *handle);
55 static void atl_dev_interrupt_handler(void *param);
57 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
58 struct rte_pci_device *pci_dev);
59 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
61 static void atl_dev_info_get(struct rte_eth_dev *dev,
62 struct rte_eth_dev_info *dev_info);
65 int atl_logtype_driver;
68 * The set of PCI devices this driver supports
70 static const struct rte_pci_id pci_id_atl_map[] = {
71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
93 { .vendor_id = 0, /* sentinel */ },
96 static struct rte_pci_driver rte_atl_pmd = {
97 .id_table = pci_id_atl_map,
98 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
99 RTE_PCI_DRV_IOVA_AS_VA,
100 .probe = eth_atl_pci_probe,
101 .remove = eth_atl_pci_remove,
104 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
105 | DEV_RX_OFFLOAD_IPV4_CKSUM \
106 | DEV_RX_OFFLOAD_UDP_CKSUM \
107 | DEV_RX_OFFLOAD_TCP_CKSUM \
108 | DEV_RX_OFFLOAD_JUMBO_FRAME)
110 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
111 | DEV_TX_OFFLOAD_IPV4_CKSUM \
112 | DEV_TX_OFFLOAD_UDP_CKSUM \
113 | DEV_TX_OFFLOAD_TCP_CKSUM \
114 | DEV_TX_OFFLOAD_TCP_TSO \
115 | DEV_TX_OFFLOAD_MULTI_SEGS)
117 static const struct rte_eth_desc_lim rx_desc_lim = {
118 .nb_max = ATL_MAX_RING_DESC,
119 .nb_min = ATL_MIN_RING_DESC,
120 .nb_align = ATL_RXD_ALIGN,
123 static const struct rte_eth_desc_lim tx_desc_lim = {
124 .nb_max = ATL_MAX_RING_DESC,
125 .nb_min = ATL_MIN_RING_DESC,
126 .nb_align = ATL_TXD_ALIGN,
127 .nb_seg_max = ATL_TX_MAX_SEG,
128 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
131 #define ATL_XSTATS_FIELD(name) { \
133 offsetof(struct aq_stats_s, name) \
136 struct atl_xstats_tbl_s {
141 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
142 ATL_XSTATS_FIELD(uprc),
143 ATL_XSTATS_FIELD(mprc),
144 ATL_XSTATS_FIELD(bprc),
145 ATL_XSTATS_FIELD(erpt),
146 ATL_XSTATS_FIELD(uptc),
147 ATL_XSTATS_FIELD(mptc),
148 ATL_XSTATS_FIELD(bptc),
149 ATL_XSTATS_FIELD(erpr),
150 ATL_XSTATS_FIELD(ubrc),
151 ATL_XSTATS_FIELD(ubtc),
152 ATL_XSTATS_FIELD(mbrc),
153 ATL_XSTATS_FIELD(mbtc),
154 ATL_XSTATS_FIELD(bbrc),
155 ATL_XSTATS_FIELD(bbtc),
158 static const struct eth_dev_ops atl_eth_dev_ops = {
159 .dev_configure = atl_dev_configure,
160 .dev_start = atl_dev_start,
161 .dev_stop = atl_dev_stop,
162 .dev_set_link_up = atl_dev_set_link_up,
163 .dev_set_link_down = atl_dev_set_link_down,
164 .dev_close = atl_dev_close,
165 .dev_reset = atl_dev_reset,
168 .link_update = atl_dev_link_update,
171 .stats_get = atl_dev_stats_get,
172 .xstats_get = atl_dev_xstats_get,
173 .xstats_get_names = atl_dev_xstats_get_names,
174 .stats_reset = atl_dev_stats_reset,
175 .xstats_reset = atl_dev_stats_reset,
177 .fw_version_get = atl_fw_version_get,
178 .dev_infos_get = atl_dev_info_get,
179 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
182 .rx_queue_start = atl_rx_queue_start,
183 .rx_queue_stop = atl_rx_queue_stop,
184 .rx_queue_setup = atl_rx_queue_setup,
185 .rx_queue_release = atl_rx_queue_release,
187 .tx_queue_start = atl_tx_queue_start,
188 .tx_queue_stop = atl_tx_queue_stop,
189 .tx_queue_setup = atl_tx_queue_setup,
190 .tx_queue_release = atl_tx_queue_release,
192 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
193 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
196 static inline int32_t
197 atl_reset_hw(struct aq_hw_s *hw)
199 return hw_atl_b0_hw_reset(hw);
203 atl_enable_intr(struct rte_eth_dev *dev)
205 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
207 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
211 atl_disable_intr(struct aq_hw_s *hw)
213 PMD_INIT_FUNC_TRACE();
214 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
218 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
220 struct atl_adapter *adapter =
221 (struct atl_adapter *)eth_dev->data->dev_private;
222 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
223 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
224 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
227 PMD_INIT_FUNC_TRACE();
229 eth_dev->dev_ops = &atl_eth_dev_ops;
230 eth_dev->rx_pkt_burst = &atl_recv_pkts;
231 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
232 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
234 /* For secondary processes, the primary process has done all the work */
235 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
238 /* Vendor and Device ID need to be set before init of shared code */
239 hw->device_id = pci_dev->id.device_id;
240 hw->vendor_id = pci_dev->id.vendor_id;
241 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
243 /* Hardware configuration - hardcode */
244 adapter->hw_cfg.is_lro = false;
245 adapter->hw_cfg.wol = false;
246 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
252 hw->aq_nic_cfg = &adapter->hw_cfg;
254 /* disable interrupt */
255 atl_disable_intr(hw);
257 /* Allocate memory for storing MAC addresses */
258 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
259 if (eth_dev->data->mac_addrs == NULL) {
260 PMD_INIT_LOG(ERR, "MAC Malloc failed");
264 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
268 /* Copy the permanent MAC address */
269 if (hw->aq_fw_ops->get_mac_permanent(hw,
270 eth_dev->data->mac_addrs->addr_bytes) != 0)
273 /* Reset the hw statistics */
274 atl_dev_stats_reset(eth_dev);
276 rte_intr_callback_register(intr_handle,
277 atl_dev_interrupt_handler, eth_dev);
279 /* enable uio/vfio intr/eventfd mapping */
280 rte_intr_enable(intr_handle);
282 /* enable support intr */
283 atl_enable_intr(eth_dev);
289 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
291 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
292 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
295 PMD_INIT_FUNC_TRACE();
297 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
300 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
302 if (hw->adapter_stopped == 0)
303 atl_dev_close(eth_dev);
305 eth_dev->dev_ops = NULL;
306 eth_dev->rx_pkt_burst = NULL;
307 eth_dev->tx_pkt_burst = NULL;
309 /* disable uio intr before callback unregister */
310 rte_intr_disable(intr_handle);
311 rte_intr_callback_unregister(intr_handle,
312 atl_dev_interrupt_handler, eth_dev);
314 rte_free(eth_dev->data->mac_addrs);
315 eth_dev->data->mac_addrs = NULL;
321 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
322 struct rte_pci_device *pci_dev)
324 return rte_eth_dev_pci_generic_probe(pci_dev,
325 sizeof(struct atl_adapter), eth_atl_dev_init);
329 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
331 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
335 atl_dev_configure(struct rte_eth_dev *dev)
337 struct atl_interrupt *intr =
338 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
340 PMD_INIT_FUNC_TRACE();
342 /* set flag to update link status after init */
343 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
349 * Configure device link speed and setup link.
350 * It returns 0 on success.
353 atl_dev_start(struct rte_eth_dev *dev)
355 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
356 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
357 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
358 uint32_t intr_vector = 0;
359 uint32_t *link_speeds;
364 PMD_INIT_FUNC_TRACE();
366 /* set adapter started */
367 hw->adapter_stopped = 0;
369 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
371 "Invalid link_speeds for port %u, fix speed not supported",
376 /* disable uio/vfio intr/eventfd mapping */
377 rte_intr_disable(intr_handle);
379 /* reinitialize adapter
380 * this calls reset and start
382 status = atl_reset_hw(hw);
386 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
388 hw_atl_b0_hw_start(hw);
389 /* check and configure queue intr-vector mapping */
390 if ((rte_intr_cap_multiple(intr_handle) ||
391 !RTE_ETH_DEV_SRIOV(dev).active) &&
392 dev->data->dev_conf.intr_conf.rxq != 0) {
393 intr_vector = dev->data->nb_rx_queues;
394 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
395 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
396 ATL_MAX_INTR_QUEUE_NUM);
399 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
400 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
405 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
406 intr_handle->intr_vec = rte_zmalloc("intr_vec",
407 dev->data->nb_rx_queues * sizeof(int), 0);
408 if (intr_handle->intr_vec == NULL) {
409 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
410 " intr_vec", dev->data->nb_rx_queues);
415 /* initialize transmission unit */
418 /* This can fail when allocating mbufs for descriptor rings */
419 err = atl_rx_init(dev);
421 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
425 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
426 hw->fw_ver_actual >> 24,
427 (hw->fw_ver_actual >> 16) & 0xFF,
428 hw->fw_ver_actual & 0xFFFF);
429 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
431 err = atl_start_queues(dev);
433 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
437 err = hw->aq_fw_ops->update_link_status(hw);
442 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
444 link_speeds = &dev->data->dev_conf.link_speeds;
448 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
449 speed = hw->aq_nic_cfg->link_speed_msk;
451 if (*link_speeds & ETH_LINK_SPEED_10G)
452 speed |= AQ_NIC_RATE_10G;
453 if (*link_speeds & ETH_LINK_SPEED_5G)
454 speed |= AQ_NIC_RATE_5G;
455 if (*link_speeds & ETH_LINK_SPEED_1G)
456 speed |= AQ_NIC_RATE_1G;
457 if (*link_speeds & ETH_LINK_SPEED_2_5G)
458 speed |= AQ_NIC_RATE_2G5;
459 if (*link_speeds & ETH_LINK_SPEED_100M)
460 speed |= AQ_NIC_RATE_100M;
463 err = hw->aq_fw_ops->set_link_speed(hw, speed);
467 if (rte_intr_allow_others(intr_handle)) {
468 /* check if lsc interrupt is enabled */
469 if (dev->data->dev_conf.intr_conf.lsc != 0)
470 atl_dev_lsc_interrupt_setup(dev, true);
472 atl_dev_lsc_interrupt_setup(dev, false);
474 rte_intr_callback_unregister(intr_handle,
475 atl_dev_interrupt_handler, dev);
476 if (dev->data->dev_conf.intr_conf.lsc != 0)
477 PMD_INIT_LOG(INFO, "lsc won't enable because of"
478 " no intr multiplex");
481 /* check if rxq interrupt is enabled */
482 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
483 rte_intr_dp_is_en(intr_handle))
484 atl_dev_rxq_interrupt_setup(dev);
486 /* enable uio/vfio intr/eventfd mapping */
487 rte_intr_enable(intr_handle);
489 /* resume enabled intr since hw reset */
490 atl_enable_intr(dev);
495 atl_stop_queues(dev);
500 * Stop device: disable rx and tx functions to allow for reconfiguring.
503 atl_dev_stop(struct rte_eth_dev *dev)
505 struct rte_eth_link link;
507 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
509 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
511 PMD_INIT_FUNC_TRACE();
513 /* disable interrupts */
514 atl_disable_intr(hw);
518 hw->adapter_stopped = 1;
520 atl_stop_queues(dev);
522 /* Clear stored conf */
523 dev->data->scattered_rx = 0;
526 /* Clear recorded link status */
527 memset(&link, 0, sizeof(link));
528 rte_eth_linkstatus_set(dev, &link);
530 if (!rte_intr_allow_others(intr_handle))
531 /* resume to the default handler */
532 rte_intr_callback_register(intr_handle,
533 atl_dev_interrupt_handler,
536 /* Clean datapath event and queue/vec mapping */
537 rte_intr_efd_disable(intr_handle);
538 if (intr_handle->intr_vec != NULL) {
539 rte_free(intr_handle->intr_vec);
540 intr_handle->intr_vec = NULL;
545 * Set device link up: enable tx.
548 atl_dev_set_link_up(struct rte_eth_dev *dev)
550 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
552 return hw->aq_fw_ops->set_link_speed(hw,
553 hw->aq_nic_cfg->link_speed_msk);
557 * Set device link down: disable tx.
560 atl_dev_set_link_down(struct rte_eth_dev *dev)
562 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 return hw->aq_fw_ops->set_link_speed(hw, 0);
568 * Reset and stop device.
571 atl_dev_close(struct rte_eth_dev *dev)
573 PMD_INIT_FUNC_TRACE();
577 atl_free_queues(dev);
581 atl_dev_reset(struct rte_eth_dev *dev)
585 ret = eth_atl_dev_uninit(dev);
589 ret = eth_atl_dev_init(dev);
596 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
598 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
599 struct aq_hw_s *hw = &adapter->hw;
600 struct atl_sw_stats *swstats = &adapter->sw_stats;
603 hw->aq_fw_ops->update_stats(hw);
605 /* Fill out the rte_eth_stats statistics structure */
606 stats->ipackets = hw->curr_stats.dma_pkt_rc;
607 stats->ibytes = hw->curr_stats.dma_oct_rc;
608 stats->imissed = hw->curr_stats.dpc;
609 stats->ierrors = hw->curr_stats.erpt;
611 stats->opackets = hw->curr_stats.dma_pkt_tc;
612 stats->obytes = hw->curr_stats.dma_oct_tc;
615 stats->rx_nombuf = swstats->rx_nombuf;
617 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
618 stats->q_ipackets[i] = swstats->q_ipackets[i];
619 stats->q_opackets[i] = swstats->q_opackets[i];
620 stats->q_ibytes[i] = swstats->q_ibytes[i];
621 stats->q_obytes[i] = swstats->q_obytes[i];
622 stats->q_errors[i] = swstats->q_errors[i];
628 atl_dev_stats_reset(struct rte_eth_dev *dev)
630 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
631 struct aq_hw_s *hw = &adapter->hw;
633 hw->aq_fw_ops->update_stats(hw);
635 /* Reset software totals */
636 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
638 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
642 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
643 struct rte_eth_xstat_name *xstats_names,
649 return RTE_DIM(atl_xstats_tbl);
651 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
652 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
653 atl_xstats_tbl[i].name);
659 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
662 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
663 struct aq_hw_s *hw = &adapter->hw;
669 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
671 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
672 atl_xstats_tbl[i].offset);
679 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
681 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
683 unsigned int ret = 0;
685 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
689 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
690 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
692 ret += 1; /* add string null-terminator */
701 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
703 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
705 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
706 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
708 dev_info->min_rx_bufsize = 1024;
709 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
710 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
711 dev_info->max_vfs = pci_dev->max_vfs;
713 dev_info->max_hash_mac_addrs = 0;
714 dev_info->max_vmdq_pools = 0;
715 dev_info->vmdq_queue_num = 0;
717 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
719 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
722 dev_info->default_rxconf = (struct rte_eth_rxconf) {
723 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
726 dev_info->default_txconf = (struct rte_eth_txconf) {
727 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
730 dev_info->rx_desc_lim = rx_desc_lim;
731 dev_info->tx_desc_lim = tx_desc_lim;
733 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
734 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
735 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
736 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
739 static const uint32_t *
740 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
742 static const uint32_t ptypes[] = {
744 RTE_PTYPE_L2_ETHER_ARP,
745 RTE_PTYPE_L2_ETHER_VLAN,
755 if (dev->rx_pkt_burst == atl_recv_pkts)
761 /* return 0 means link status changed, -1 means not changed */
763 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
765 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
766 struct atl_interrupt *intr =
767 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
768 struct rte_eth_link link, old;
771 link.link_status = ETH_LINK_DOWN;
773 link.link_duplex = ETH_LINK_FULL_DUPLEX;
774 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
775 memset(&old, 0, sizeof(old));
777 /* load old link status */
778 rte_eth_linkstatus_get(dev, &old);
780 /* read current link status */
781 err = hw->aq_fw_ops->update_link_status(hw);
786 if (hw->aq_link_status.mbps == 0) {
787 /* write default (down) link status */
788 rte_eth_linkstatus_set(dev, &link);
789 if (link.link_status == old.link_status)
794 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
796 link.link_status = ETH_LINK_UP;
797 link.link_duplex = ETH_LINK_FULL_DUPLEX;
798 link.link_speed = hw->aq_link_status.mbps;
800 rte_eth_linkstatus_set(dev, &link);
802 if (link.link_status == old.link_status)
810 * It clears the interrupt causes and enables the interrupt.
811 * It will be called once only during nic initialized.
814 * Pointer to struct rte_eth_dev.
819 * - On success, zero.
820 * - On failure, a negative value.
824 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
826 atl_dev_link_status_print(dev);
831 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
838 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
840 struct atl_interrupt *intr =
841 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
842 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
845 hw_atl_b0_hw_irq_read(hw, &cause);
847 atl_disable_intr(hw);
848 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
849 ATL_FLAG_NEED_LINK_UPDATE : 0;
855 * It gets and then prints the link status.
858 * Pointer to struct rte_eth_dev.
861 * - On success, zero.
862 * - On failure, a negative value.
865 atl_dev_link_status_print(struct rte_eth_dev *dev)
867 struct rte_eth_link link;
869 memset(&link, 0, sizeof(link));
870 rte_eth_linkstatus_get(dev, &link);
871 if (link.link_status) {
872 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
873 (int)(dev->data->port_id),
874 (unsigned int)link.link_speed,
875 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
876 "full-duplex" : "half-duplex");
878 PMD_DRV_LOG(INFO, " Port %d: Link Down",
879 (int)(dev->data->port_id));
885 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
887 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
888 pci_dev->addr.domain,
891 pci_dev->addr.function);
895 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
899 * It executes link_update after knowing an interrupt occurred.
902 * Pointer to struct rte_eth_dev.
905 * - On success, zero.
906 * - On failure, a negative value.
909 atl_dev_interrupt_action(struct rte_eth_dev *dev,
910 struct rte_intr_handle *intr_handle)
912 struct atl_interrupt *intr =
913 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
915 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
916 atl_dev_link_update(dev, 0);
917 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
918 atl_dev_link_status_print(dev);
919 _rte_eth_dev_callback_process(dev,
920 RTE_ETH_EVENT_INTR_LSC, NULL);
923 atl_enable_intr(dev);
924 rte_intr_enable(intr_handle);
930 * Interrupt handler triggered by NIC for handling
931 * specific interrupt.
934 * Pointer to interrupt handle.
936 * The address of parameter (struct rte_eth_dev *) regsitered before.
942 atl_dev_interrupt_handler(void *param)
944 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
946 atl_dev_interrupt_get_status(dev);
947 atl_dev_interrupt_action(dev, dev->intr_handle);
950 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
951 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
952 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
954 RTE_INIT(atl_init_log)
956 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
957 if (atl_logtype_init >= 0)
958 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
959 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
960 if (atl_logtype_driver >= 0)
961 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);