1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
54 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
55 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
56 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
57 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
58 struct rte_intr_handle *handle);
59 static void atl_dev_interrupt_handler(void *param);
62 static int atl_reta_update(struct rte_eth_dev *dev,
63 struct rte_eth_rss_reta_entry64 *reta_conf,
65 static int atl_reta_query(struct rte_eth_dev *dev,
66 struct rte_eth_rss_reta_entry64 *reta_conf,
68 static int atl_rss_hash_update(struct rte_eth_dev *dev,
69 struct rte_eth_rss_conf *rss_conf);
70 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
71 struct rte_eth_rss_conf *rss_conf);
74 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
75 struct rte_pci_device *pci_dev);
76 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
78 static void atl_dev_info_get(struct rte_eth_dev *dev,
79 struct rte_eth_dev_info *dev_info);
82 int atl_logtype_driver;
85 * The set of PCI devices this driver supports
87 static const struct rte_pci_id pci_id_atl_map[] = {
88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
110 { .vendor_id = 0, /* sentinel */ },
113 static struct rte_pci_driver rte_atl_pmd = {
114 .id_table = pci_id_atl_map,
115 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
116 RTE_PCI_DRV_IOVA_AS_VA,
117 .probe = eth_atl_pci_probe,
118 .remove = eth_atl_pci_remove,
121 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
122 | DEV_RX_OFFLOAD_IPV4_CKSUM \
123 | DEV_RX_OFFLOAD_UDP_CKSUM \
124 | DEV_RX_OFFLOAD_TCP_CKSUM \
125 | DEV_RX_OFFLOAD_JUMBO_FRAME)
127 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
128 | DEV_TX_OFFLOAD_IPV4_CKSUM \
129 | DEV_TX_OFFLOAD_UDP_CKSUM \
130 | DEV_TX_OFFLOAD_TCP_CKSUM \
131 | DEV_TX_OFFLOAD_TCP_TSO \
132 | DEV_TX_OFFLOAD_MULTI_SEGS)
134 static const struct rte_eth_desc_lim rx_desc_lim = {
135 .nb_max = ATL_MAX_RING_DESC,
136 .nb_min = ATL_MIN_RING_DESC,
137 .nb_align = ATL_RXD_ALIGN,
140 static const struct rte_eth_desc_lim tx_desc_lim = {
141 .nb_max = ATL_MAX_RING_DESC,
142 .nb_min = ATL_MIN_RING_DESC,
143 .nb_align = ATL_TXD_ALIGN,
144 .nb_seg_max = ATL_TX_MAX_SEG,
145 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
148 #define ATL_XSTATS_FIELD(name) { \
150 offsetof(struct aq_stats_s, name) \
153 struct atl_xstats_tbl_s {
158 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
159 ATL_XSTATS_FIELD(uprc),
160 ATL_XSTATS_FIELD(mprc),
161 ATL_XSTATS_FIELD(bprc),
162 ATL_XSTATS_FIELD(erpt),
163 ATL_XSTATS_FIELD(uptc),
164 ATL_XSTATS_FIELD(mptc),
165 ATL_XSTATS_FIELD(bptc),
166 ATL_XSTATS_FIELD(erpr),
167 ATL_XSTATS_FIELD(ubrc),
168 ATL_XSTATS_FIELD(ubtc),
169 ATL_XSTATS_FIELD(mbrc),
170 ATL_XSTATS_FIELD(mbtc),
171 ATL_XSTATS_FIELD(bbrc),
172 ATL_XSTATS_FIELD(bbtc),
175 static const struct eth_dev_ops atl_eth_dev_ops = {
176 .dev_configure = atl_dev_configure,
177 .dev_start = atl_dev_start,
178 .dev_stop = atl_dev_stop,
179 .dev_set_link_up = atl_dev_set_link_up,
180 .dev_set_link_down = atl_dev_set_link_down,
181 .dev_close = atl_dev_close,
182 .dev_reset = atl_dev_reset,
185 .promiscuous_enable = atl_dev_promiscuous_enable,
186 .promiscuous_disable = atl_dev_promiscuous_disable,
187 .allmulticast_enable = atl_dev_allmulticast_enable,
188 .allmulticast_disable = atl_dev_allmulticast_disable,
191 .link_update = atl_dev_link_update,
194 .stats_get = atl_dev_stats_get,
195 .xstats_get = atl_dev_xstats_get,
196 .xstats_get_names = atl_dev_xstats_get_names,
197 .stats_reset = atl_dev_stats_reset,
198 .xstats_reset = atl_dev_stats_reset,
200 .fw_version_get = atl_fw_version_get,
201 .dev_infos_get = atl_dev_info_get,
202 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
205 .rx_queue_start = atl_rx_queue_start,
206 .rx_queue_stop = atl_rx_queue_stop,
207 .rx_queue_setup = atl_rx_queue_setup,
208 .rx_queue_release = atl_rx_queue_release,
210 .tx_queue_start = atl_tx_queue_start,
211 .tx_queue_stop = atl_tx_queue_stop,
212 .tx_queue_setup = atl_tx_queue_setup,
213 .tx_queue_release = atl_tx_queue_release,
215 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
216 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
218 .rx_queue_count = atl_rx_queue_count,
219 .rx_descriptor_status = atl_dev_rx_descriptor_status,
220 .tx_descriptor_status = atl_dev_tx_descriptor_status,
222 .rxq_info_get = atl_rxq_info_get,
223 .txq_info_get = atl_txq_info_get,
225 .reta_update = atl_reta_update,
226 .reta_query = atl_reta_query,
227 .rss_hash_update = atl_rss_hash_update,
228 .rss_hash_conf_get = atl_rss_hash_conf_get,
231 static inline int32_t
232 atl_reset_hw(struct aq_hw_s *hw)
234 return hw_atl_b0_hw_reset(hw);
238 atl_enable_intr(struct rte_eth_dev *dev)
240 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
242 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
246 atl_disable_intr(struct aq_hw_s *hw)
248 PMD_INIT_FUNC_TRACE();
249 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
253 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
255 struct atl_adapter *adapter =
256 (struct atl_adapter *)eth_dev->data->dev_private;
257 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
258 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
259 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
262 PMD_INIT_FUNC_TRACE();
264 eth_dev->dev_ops = &atl_eth_dev_ops;
265 eth_dev->rx_pkt_burst = &atl_recv_pkts;
266 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
267 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
269 /* For secondary processes, the primary process has done all the work */
270 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
273 /* Vendor and Device ID need to be set before init of shared code */
274 hw->device_id = pci_dev->id.device_id;
275 hw->vendor_id = pci_dev->id.vendor_id;
276 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
278 /* Hardware configuration - hardcode */
279 adapter->hw_cfg.is_lro = false;
280 adapter->hw_cfg.wol = false;
281 adapter->hw_cfg.is_rss = false;
282 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
284 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
290 adapter->hw_cfg.aq_rss.indirection_table_size =
291 HW_ATL_B0_RSS_REDIRECTION_MAX;
293 hw->aq_nic_cfg = &adapter->hw_cfg;
295 /* disable interrupt */
296 atl_disable_intr(hw);
298 /* Allocate memory for storing MAC addresses */
299 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
300 if (eth_dev->data->mac_addrs == NULL) {
301 PMD_INIT_LOG(ERR, "MAC Malloc failed");
305 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
309 /* Copy the permanent MAC address */
310 if (hw->aq_fw_ops->get_mac_permanent(hw,
311 eth_dev->data->mac_addrs->addr_bytes) != 0)
314 /* Reset the hw statistics */
315 atl_dev_stats_reset(eth_dev);
317 rte_intr_callback_register(intr_handle,
318 atl_dev_interrupt_handler, eth_dev);
320 /* enable uio/vfio intr/eventfd mapping */
321 rte_intr_enable(intr_handle);
323 /* enable support intr */
324 atl_enable_intr(eth_dev);
330 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
332 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
333 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
336 PMD_INIT_FUNC_TRACE();
338 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
341 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
343 if (hw->adapter_stopped == 0)
344 atl_dev_close(eth_dev);
346 eth_dev->dev_ops = NULL;
347 eth_dev->rx_pkt_burst = NULL;
348 eth_dev->tx_pkt_burst = NULL;
350 /* disable uio intr before callback unregister */
351 rte_intr_disable(intr_handle);
352 rte_intr_callback_unregister(intr_handle,
353 atl_dev_interrupt_handler, eth_dev);
355 rte_free(eth_dev->data->mac_addrs);
356 eth_dev->data->mac_addrs = NULL;
362 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
363 struct rte_pci_device *pci_dev)
365 return rte_eth_dev_pci_generic_probe(pci_dev,
366 sizeof(struct atl_adapter), eth_atl_dev_init);
370 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
372 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
376 atl_dev_configure(struct rte_eth_dev *dev)
378 struct atl_interrupt *intr =
379 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
381 PMD_INIT_FUNC_TRACE();
383 /* set flag to update link status after init */
384 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
390 * Configure device link speed and setup link.
391 * It returns 0 on success.
394 atl_dev_start(struct rte_eth_dev *dev)
396 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
398 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
399 uint32_t intr_vector = 0;
400 uint32_t *link_speeds;
405 PMD_INIT_FUNC_TRACE();
407 /* set adapter started */
408 hw->adapter_stopped = 0;
410 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
412 "Invalid link_speeds for port %u, fix speed not supported",
417 /* disable uio/vfio intr/eventfd mapping */
418 rte_intr_disable(intr_handle);
420 /* reinitialize adapter
421 * this calls reset and start
423 status = atl_reset_hw(hw);
427 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
429 hw_atl_b0_hw_start(hw);
430 /* check and configure queue intr-vector mapping */
431 if ((rte_intr_cap_multiple(intr_handle) ||
432 !RTE_ETH_DEV_SRIOV(dev).active) &&
433 dev->data->dev_conf.intr_conf.rxq != 0) {
434 intr_vector = dev->data->nb_rx_queues;
435 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
436 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
437 ATL_MAX_INTR_QUEUE_NUM);
440 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
441 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
446 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
447 intr_handle->intr_vec = rte_zmalloc("intr_vec",
448 dev->data->nb_rx_queues * sizeof(int), 0);
449 if (intr_handle->intr_vec == NULL) {
450 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
451 " intr_vec", dev->data->nb_rx_queues);
456 /* initialize transmission unit */
459 /* This can fail when allocating mbufs for descriptor rings */
460 err = atl_rx_init(dev);
462 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
466 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
467 hw->fw_ver_actual >> 24,
468 (hw->fw_ver_actual >> 16) & 0xFF,
469 hw->fw_ver_actual & 0xFFFF);
470 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
472 err = atl_start_queues(dev);
474 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
478 err = hw->aq_fw_ops->update_link_status(hw);
483 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
485 link_speeds = &dev->data->dev_conf.link_speeds;
489 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
490 speed = hw->aq_nic_cfg->link_speed_msk;
492 if (*link_speeds & ETH_LINK_SPEED_10G)
493 speed |= AQ_NIC_RATE_10G;
494 if (*link_speeds & ETH_LINK_SPEED_5G)
495 speed |= AQ_NIC_RATE_5G;
496 if (*link_speeds & ETH_LINK_SPEED_1G)
497 speed |= AQ_NIC_RATE_1G;
498 if (*link_speeds & ETH_LINK_SPEED_2_5G)
499 speed |= AQ_NIC_RATE_2G5;
500 if (*link_speeds & ETH_LINK_SPEED_100M)
501 speed |= AQ_NIC_RATE_100M;
504 err = hw->aq_fw_ops->set_link_speed(hw, speed);
508 if (rte_intr_allow_others(intr_handle)) {
509 /* check if lsc interrupt is enabled */
510 if (dev->data->dev_conf.intr_conf.lsc != 0)
511 atl_dev_lsc_interrupt_setup(dev, true);
513 atl_dev_lsc_interrupt_setup(dev, false);
515 rte_intr_callback_unregister(intr_handle,
516 atl_dev_interrupt_handler, dev);
517 if (dev->data->dev_conf.intr_conf.lsc != 0)
518 PMD_INIT_LOG(INFO, "lsc won't enable because of"
519 " no intr multiplex");
522 /* check if rxq interrupt is enabled */
523 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
524 rte_intr_dp_is_en(intr_handle))
525 atl_dev_rxq_interrupt_setup(dev);
527 /* enable uio/vfio intr/eventfd mapping */
528 rte_intr_enable(intr_handle);
530 /* resume enabled intr since hw reset */
531 atl_enable_intr(dev);
536 atl_stop_queues(dev);
541 * Stop device: disable rx and tx functions to allow for reconfiguring.
544 atl_dev_stop(struct rte_eth_dev *dev)
546 struct rte_eth_link link;
548 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
550 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
552 PMD_INIT_FUNC_TRACE();
554 /* disable interrupts */
555 atl_disable_intr(hw);
559 hw->adapter_stopped = 1;
561 atl_stop_queues(dev);
563 /* Clear stored conf */
564 dev->data->scattered_rx = 0;
567 /* Clear recorded link status */
568 memset(&link, 0, sizeof(link));
569 rte_eth_linkstatus_set(dev, &link);
571 if (!rte_intr_allow_others(intr_handle))
572 /* resume to the default handler */
573 rte_intr_callback_register(intr_handle,
574 atl_dev_interrupt_handler,
577 /* Clean datapath event and queue/vec mapping */
578 rte_intr_efd_disable(intr_handle);
579 if (intr_handle->intr_vec != NULL) {
580 rte_free(intr_handle->intr_vec);
581 intr_handle->intr_vec = NULL;
586 * Set device link up: enable tx.
589 atl_dev_set_link_up(struct rte_eth_dev *dev)
591 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
593 return hw->aq_fw_ops->set_link_speed(hw,
594 hw->aq_nic_cfg->link_speed_msk);
598 * Set device link down: disable tx.
601 atl_dev_set_link_down(struct rte_eth_dev *dev)
603 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605 return hw->aq_fw_ops->set_link_speed(hw, 0);
609 * Reset and stop device.
612 atl_dev_close(struct rte_eth_dev *dev)
614 PMD_INIT_FUNC_TRACE();
618 atl_free_queues(dev);
622 atl_dev_reset(struct rte_eth_dev *dev)
626 ret = eth_atl_dev_uninit(dev);
630 ret = eth_atl_dev_init(dev);
637 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
639 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
640 struct aq_hw_s *hw = &adapter->hw;
641 struct atl_sw_stats *swstats = &adapter->sw_stats;
644 hw->aq_fw_ops->update_stats(hw);
646 /* Fill out the rte_eth_stats statistics structure */
647 stats->ipackets = hw->curr_stats.dma_pkt_rc;
648 stats->ibytes = hw->curr_stats.dma_oct_rc;
649 stats->imissed = hw->curr_stats.dpc;
650 stats->ierrors = hw->curr_stats.erpt;
652 stats->opackets = hw->curr_stats.dma_pkt_tc;
653 stats->obytes = hw->curr_stats.dma_oct_tc;
656 stats->rx_nombuf = swstats->rx_nombuf;
658 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
659 stats->q_ipackets[i] = swstats->q_ipackets[i];
660 stats->q_opackets[i] = swstats->q_opackets[i];
661 stats->q_ibytes[i] = swstats->q_ibytes[i];
662 stats->q_obytes[i] = swstats->q_obytes[i];
663 stats->q_errors[i] = swstats->q_errors[i];
669 atl_dev_stats_reset(struct rte_eth_dev *dev)
671 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
672 struct aq_hw_s *hw = &adapter->hw;
674 hw->aq_fw_ops->update_stats(hw);
676 /* Reset software totals */
677 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
679 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
683 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
684 struct rte_eth_xstat_name *xstats_names,
690 return RTE_DIM(atl_xstats_tbl);
692 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
693 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
694 atl_xstats_tbl[i].name);
700 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
703 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
704 struct aq_hw_s *hw = &adapter->hw;
710 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
712 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
713 atl_xstats_tbl[i].offset);
720 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
722 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
724 unsigned int ret = 0;
726 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
730 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
731 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
733 ret += 1; /* add string null-terminator */
742 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
744 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
746 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
747 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
749 dev_info->min_rx_bufsize = 1024;
750 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
751 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
752 dev_info->max_vfs = pci_dev->max_vfs;
754 dev_info->max_hash_mac_addrs = 0;
755 dev_info->max_vmdq_pools = 0;
756 dev_info->vmdq_queue_num = 0;
758 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
760 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
763 dev_info->default_rxconf = (struct rte_eth_rxconf) {
764 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
767 dev_info->default_txconf = (struct rte_eth_txconf) {
768 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
771 dev_info->rx_desc_lim = rx_desc_lim;
772 dev_info->tx_desc_lim = tx_desc_lim;
774 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
775 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
776 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
778 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
779 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
780 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
781 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
784 static const uint32_t *
785 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
787 static const uint32_t ptypes[] = {
789 RTE_PTYPE_L2_ETHER_ARP,
790 RTE_PTYPE_L2_ETHER_VLAN,
800 if (dev->rx_pkt_burst == atl_recv_pkts)
806 /* return 0 means link status changed, -1 means not changed */
808 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
810 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811 struct atl_interrupt *intr =
812 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
813 struct rte_eth_link link, old;
816 link.link_status = ETH_LINK_DOWN;
818 link.link_duplex = ETH_LINK_FULL_DUPLEX;
819 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
820 memset(&old, 0, sizeof(old));
822 /* load old link status */
823 rte_eth_linkstatus_get(dev, &old);
825 /* read current link status */
826 err = hw->aq_fw_ops->update_link_status(hw);
831 if (hw->aq_link_status.mbps == 0) {
832 /* write default (down) link status */
833 rte_eth_linkstatus_set(dev, &link);
834 if (link.link_status == old.link_status)
839 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
841 link.link_status = ETH_LINK_UP;
842 link.link_duplex = ETH_LINK_FULL_DUPLEX;
843 link.link_speed = hw->aq_link_status.mbps;
845 rte_eth_linkstatus_set(dev, &link);
847 if (link.link_status == old.link_status)
854 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
856 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
862 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
864 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
866 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
870 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
872 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
878 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
880 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
882 if (dev->data->promiscuous == 1)
883 return; /* must remain in all_multicast mode */
885 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
889 * It clears the interrupt causes and enables the interrupt.
890 * It will be called once only during nic initialized.
893 * Pointer to struct rte_eth_dev.
898 * - On success, zero.
899 * - On failure, a negative value.
903 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
905 atl_dev_link_status_print(dev);
910 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
917 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
919 struct atl_interrupt *intr =
920 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
921 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924 hw_atl_b0_hw_irq_read(hw, &cause);
926 atl_disable_intr(hw);
927 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
928 ATL_FLAG_NEED_LINK_UPDATE : 0;
934 * It gets and then prints the link status.
937 * Pointer to struct rte_eth_dev.
940 * - On success, zero.
941 * - On failure, a negative value.
944 atl_dev_link_status_print(struct rte_eth_dev *dev)
946 struct rte_eth_link link;
948 memset(&link, 0, sizeof(link));
949 rte_eth_linkstatus_get(dev, &link);
950 if (link.link_status) {
951 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
952 (int)(dev->data->port_id),
953 (unsigned int)link.link_speed,
954 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
955 "full-duplex" : "half-duplex");
957 PMD_DRV_LOG(INFO, " Port %d: Link Down",
958 (int)(dev->data->port_id));
964 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
966 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
967 pci_dev->addr.domain,
970 pci_dev->addr.function);
974 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
978 * It executes link_update after knowing an interrupt occurred.
981 * Pointer to struct rte_eth_dev.
984 * - On success, zero.
985 * - On failure, a negative value.
988 atl_dev_interrupt_action(struct rte_eth_dev *dev,
989 struct rte_intr_handle *intr_handle)
991 struct atl_interrupt *intr =
992 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
994 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
995 atl_dev_link_update(dev, 0);
996 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
997 atl_dev_link_status_print(dev);
998 _rte_eth_dev_callback_process(dev,
999 RTE_ETH_EVENT_INTR_LSC, NULL);
1002 atl_enable_intr(dev);
1003 rte_intr_enable(intr_handle);
1009 * Interrupt handler triggered by NIC for handling
1010 * specific interrupt.
1013 * Pointer to interrupt handle.
1015 * The address of parameter (struct rte_eth_dev *) regsitered before.
1021 atl_dev_interrupt_handler(void *param)
1023 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1025 atl_dev_interrupt_get_status(dev);
1026 atl_dev_interrupt_action(dev, dev->intr_handle);
1030 atl_reta_update(struct rte_eth_dev *dev,
1031 struct rte_eth_rss_reta_entry64 *reta_conf,
1035 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1036 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1038 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1039 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1040 dev->data->nb_rx_queues - 1);
1042 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1047 atl_reta_query(struct rte_eth_dev *dev,
1048 struct rte_eth_rss_reta_entry64 *reta_conf,
1052 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1054 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1055 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1056 reta_conf->mask = ~0U;
1061 atl_rss_hash_update(struct rte_eth_dev *dev,
1062 struct rte_eth_rss_conf *rss_conf)
1064 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1065 struct aq_hw_cfg_s *cfg =
1066 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1067 static u8 def_rss_key[40] = {
1068 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1069 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1070 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1071 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1072 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1075 cfg->is_rss = !!rss_conf->rss_hf;
1076 if (rss_conf->rss_key) {
1077 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1078 rss_conf->rss_key_len);
1079 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1081 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1082 sizeof(def_rss_key));
1083 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1086 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1087 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1092 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1093 struct rte_eth_rss_conf *rss_conf)
1095 struct aq_hw_cfg_s *cfg =
1096 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1098 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1099 if (rss_conf->rss_key) {
1100 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1101 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1102 rss_conf->rss_key_len);
1108 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1109 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1110 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1112 RTE_INIT(atl_init_log)
1114 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1115 if (atl_logtype_init >= 0)
1116 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1117 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1118 if (atl_logtype_driver >= 0)
1119 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);