1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
53 struct rte_eth_fc_conf *fc_conf);
54 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
55 struct rte_eth_fc_conf *fc_conf);
57 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
60 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
61 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
62 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
63 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
64 struct rte_intr_handle *handle);
65 static void atl_dev_interrupt_handler(void *param);
68 static int atl_reta_update(struct rte_eth_dev *dev,
69 struct rte_eth_rss_reta_entry64 *reta_conf,
71 static int atl_reta_query(struct rte_eth_dev *dev,
72 struct rte_eth_rss_reta_entry64 *reta_conf,
74 static int atl_rss_hash_update(struct rte_eth_dev *dev,
75 struct rte_eth_rss_conf *rss_conf);
76 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
77 struct rte_eth_rss_conf *rss_conf);
80 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
81 struct rte_pci_device *pci_dev);
82 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
84 static void atl_dev_info_get(struct rte_eth_dev *dev,
85 struct rte_eth_dev_info *dev_info);
88 int atl_logtype_driver;
91 * The set of PCI devices this driver supports
93 static const struct rte_pci_id pci_id_atl_map[] = {
94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
116 { .vendor_id = 0, /* sentinel */ },
119 static struct rte_pci_driver rte_atl_pmd = {
120 .id_table = pci_id_atl_map,
121 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
122 RTE_PCI_DRV_IOVA_AS_VA,
123 .probe = eth_atl_pci_probe,
124 .remove = eth_atl_pci_remove,
127 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
128 | DEV_RX_OFFLOAD_IPV4_CKSUM \
129 | DEV_RX_OFFLOAD_UDP_CKSUM \
130 | DEV_RX_OFFLOAD_TCP_CKSUM \
131 | DEV_RX_OFFLOAD_JUMBO_FRAME)
133 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
134 | DEV_TX_OFFLOAD_IPV4_CKSUM \
135 | DEV_TX_OFFLOAD_UDP_CKSUM \
136 | DEV_TX_OFFLOAD_TCP_CKSUM \
137 | DEV_TX_OFFLOAD_TCP_TSO \
138 | DEV_TX_OFFLOAD_MULTI_SEGS)
140 static const struct rte_eth_desc_lim rx_desc_lim = {
141 .nb_max = ATL_MAX_RING_DESC,
142 .nb_min = ATL_MIN_RING_DESC,
143 .nb_align = ATL_RXD_ALIGN,
146 static const struct rte_eth_desc_lim tx_desc_lim = {
147 .nb_max = ATL_MAX_RING_DESC,
148 .nb_min = ATL_MIN_RING_DESC,
149 .nb_align = ATL_TXD_ALIGN,
150 .nb_seg_max = ATL_TX_MAX_SEG,
151 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
154 #define ATL_XSTATS_FIELD(name) { \
156 offsetof(struct aq_stats_s, name) \
159 struct atl_xstats_tbl_s {
164 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
165 ATL_XSTATS_FIELD(uprc),
166 ATL_XSTATS_FIELD(mprc),
167 ATL_XSTATS_FIELD(bprc),
168 ATL_XSTATS_FIELD(erpt),
169 ATL_XSTATS_FIELD(uptc),
170 ATL_XSTATS_FIELD(mptc),
171 ATL_XSTATS_FIELD(bptc),
172 ATL_XSTATS_FIELD(erpr),
173 ATL_XSTATS_FIELD(ubrc),
174 ATL_XSTATS_FIELD(ubtc),
175 ATL_XSTATS_FIELD(mbrc),
176 ATL_XSTATS_FIELD(mbtc),
177 ATL_XSTATS_FIELD(bbrc),
178 ATL_XSTATS_FIELD(bbtc),
181 static const struct eth_dev_ops atl_eth_dev_ops = {
182 .dev_configure = atl_dev_configure,
183 .dev_start = atl_dev_start,
184 .dev_stop = atl_dev_stop,
185 .dev_set_link_up = atl_dev_set_link_up,
186 .dev_set_link_down = atl_dev_set_link_down,
187 .dev_close = atl_dev_close,
188 .dev_reset = atl_dev_reset,
191 .promiscuous_enable = atl_dev_promiscuous_enable,
192 .promiscuous_disable = atl_dev_promiscuous_disable,
193 .allmulticast_enable = atl_dev_allmulticast_enable,
194 .allmulticast_disable = atl_dev_allmulticast_disable,
197 .link_update = atl_dev_link_update,
200 .stats_get = atl_dev_stats_get,
201 .xstats_get = atl_dev_xstats_get,
202 .xstats_get_names = atl_dev_xstats_get_names,
203 .stats_reset = atl_dev_stats_reset,
204 .xstats_reset = atl_dev_stats_reset,
206 .fw_version_get = atl_fw_version_get,
207 .dev_infos_get = atl_dev_info_get,
208 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
211 .rx_queue_start = atl_rx_queue_start,
212 .rx_queue_stop = atl_rx_queue_stop,
213 .rx_queue_setup = atl_rx_queue_setup,
214 .rx_queue_release = atl_rx_queue_release,
216 .tx_queue_start = atl_tx_queue_start,
217 .tx_queue_stop = atl_tx_queue_stop,
218 .tx_queue_setup = atl_tx_queue_setup,
219 .tx_queue_release = atl_tx_queue_release,
221 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
222 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
224 .rx_queue_count = atl_rx_queue_count,
225 .rx_descriptor_status = atl_dev_rx_descriptor_status,
226 .tx_descriptor_status = atl_dev_tx_descriptor_status,
229 .flow_ctrl_get = atl_flow_ctrl_get,
230 .flow_ctrl_set = atl_flow_ctrl_set,
232 .rxq_info_get = atl_rxq_info_get,
233 .txq_info_get = atl_txq_info_get,
235 .reta_update = atl_reta_update,
236 .reta_query = atl_reta_query,
237 .rss_hash_update = atl_rss_hash_update,
238 .rss_hash_conf_get = atl_rss_hash_conf_get,
241 static inline int32_t
242 atl_reset_hw(struct aq_hw_s *hw)
244 return hw_atl_b0_hw_reset(hw);
248 atl_enable_intr(struct rte_eth_dev *dev)
250 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
252 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
256 atl_disable_intr(struct aq_hw_s *hw)
258 PMD_INIT_FUNC_TRACE();
259 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
263 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
265 struct atl_adapter *adapter =
266 (struct atl_adapter *)eth_dev->data->dev_private;
267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
268 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
269 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
272 PMD_INIT_FUNC_TRACE();
274 eth_dev->dev_ops = &atl_eth_dev_ops;
275 eth_dev->rx_pkt_burst = &atl_recv_pkts;
276 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
277 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
279 /* For secondary processes, the primary process has done all the work */
280 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
283 /* Vendor and Device ID need to be set before init of shared code */
284 hw->device_id = pci_dev->id.device_id;
285 hw->vendor_id = pci_dev->id.vendor_id;
286 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
288 /* Hardware configuration - hardcode */
289 adapter->hw_cfg.is_lro = false;
290 adapter->hw_cfg.wol = false;
291 adapter->hw_cfg.is_rss = false;
292 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
294 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
300 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
301 adapter->hw_cfg.aq_rss.indirection_table_size =
302 HW_ATL_B0_RSS_REDIRECTION_MAX;
304 hw->aq_nic_cfg = &adapter->hw_cfg;
306 /* disable interrupt */
307 atl_disable_intr(hw);
309 /* Allocate memory for storing MAC addresses */
310 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
311 if (eth_dev->data->mac_addrs == NULL) {
312 PMD_INIT_LOG(ERR, "MAC Malloc failed");
316 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
320 /* Copy the permanent MAC address */
321 if (hw->aq_fw_ops->get_mac_permanent(hw,
322 eth_dev->data->mac_addrs->addr_bytes) != 0)
325 /* Reset the hw statistics */
326 atl_dev_stats_reset(eth_dev);
328 rte_intr_callback_register(intr_handle,
329 atl_dev_interrupt_handler, eth_dev);
331 /* enable uio/vfio intr/eventfd mapping */
332 rte_intr_enable(intr_handle);
334 /* enable support intr */
335 atl_enable_intr(eth_dev);
341 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
343 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
344 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
347 PMD_INIT_FUNC_TRACE();
349 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
352 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
354 if (hw->adapter_stopped == 0)
355 atl_dev_close(eth_dev);
357 eth_dev->dev_ops = NULL;
358 eth_dev->rx_pkt_burst = NULL;
359 eth_dev->tx_pkt_burst = NULL;
361 /* disable uio intr before callback unregister */
362 rte_intr_disable(intr_handle);
363 rte_intr_callback_unregister(intr_handle,
364 atl_dev_interrupt_handler, eth_dev);
366 rte_free(eth_dev->data->mac_addrs);
367 eth_dev->data->mac_addrs = NULL;
373 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
374 struct rte_pci_device *pci_dev)
376 return rte_eth_dev_pci_generic_probe(pci_dev,
377 sizeof(struct atl_adapter), eth_atl_dev_init);
381 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
383 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
387 atl_dev_configure(struct rte_eth_dev *dev)
389 struct atl_interrupt *intr =
390 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
392 PMD_INIT_FUNC_TRACE();
394 /* set flag to update link status after init */
395 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
401 * Configure device link speed and setup link.
402 * It returns 0 on success.
405 atl_dev_start(struct rte_eth_dev *dev)
407 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
408 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
409 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
410 uint32_t intr_vector = 0;
411 uint32_t *link_speeds;
416 PMD_INIT_FUNC_TRACE();
418 /* set adapter started */
419 hw->adapter_stopped = 0;
421 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
423 "Invalid link_speeds for port %u, fix speed not supported",
428 /* disable uio/vfio intr/eventfd mapping */
429 rte_intr_disable(intr_handle);
431 /* reinitialize adapter
432 * this calls reset and start
434 status = atl_reset_hw(hw);
438 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
440 hw_atl_b0_hw_start(hw);
441 /* check and configure queue intr-vector mapping */
442 if ((rte_intr_cap_multiple(intr_handle) ||
443 !RTE_ETH_DEV_SRIOV(dev).active) &&
444 dev->data->dev_conf.intr_conf.rxq != 0) {
445 intr_vector = dev->data->nb_rx_queues;
446 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
447 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
448 ATL_MAX_INTR_QUEUE_NUM);
451 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
452 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
457 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
458 intr_handle->intr_vec = rte_zmalloc("intr_vec",
459 dev->data->nb_rx_queues * sizeof(int), 0);
460 if (intr_handle->intr_vec == NULL) {
461 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
462 " intr_vec", dev->data->nb_rx_queues);
467 /* initialize transmission unit */
470 /* This can fail when allocating mbufs for descriptor rings */
471 err = atl_rx_init(dev);
473 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
477 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
478 hw->fw_ver_actual >> 24,
479 (hw->fw_ver_actual >> 16) & 0xFF,
480 hw->fw_ver_actual & 0xFFFF);
481 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
483 err = atl_start_queues(dev);
485 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
489 err = hw->aq_fw_ops->update_link_status(hw);
494 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
496 link_speeds = &dev->data->dev_conf.link_speeds;
500 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
501 speed = hw->aq_nic_cfg->link_speed_msk;
503 if (*link_speeds & ETH_LINK_SPEED_10G)
504 speed |= AQ_NIC_RATE_10G;
505 if (*link_speeds & ETH_LINK_SPEED_5G)
506 speed |= AQ_NIC_RATE_5G;
507 if (*link_speeds & ETH_LINK_SPEED_1G)
508 speed |= AQ_NIC_RATE_1G;
509 if (*link_speeds & ETH_LINK_SPEED_2_5G)
510 speed |= AQ_NIC_RATE_2G5;
511 if (*link_speeds & ETH_LINK_SPEED_100M)
512 speed |= AQ_NIC_RATE_100M;
515 err = hw->aq_fw_ops->set_link_speed(hw, speed);
519 if (rte_intr_allow_others(intr_handle)) {
520 /* check if lsc interrupt is enabled */
521 if (dev->data->dev_conf.intr_conf.lsc != 0)
522 atl_dev_lsc_interrupt_setup(dev, true);
524 atl_dev_lsc_interrupt_setup(dev, false);
526 rte_intr_callback_unregister(intr_handle,
527 atl_dev_interrupt_handler, dev);
528 if (dev->data->dev_conf.intr_conf.lsc != 0)
529 PMD_INIT_LOG(INFO, "lsc won't enable because of"
530 " no intr multiplex");
533 /* check if rxq interrupt is enabled */
534 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
535 rte_intr_dp_is_en(intr_handle))
536 atl_dev_rxq_interrupt_setup(dev);
538 /* enable uio/vfio intr/eventfd mapping */
539 rte_intr_enable(intr_handle);
541 /* resume enabled intr since hw reset */
542 atl_enable_intr(dev);
547 atl_stop_queues(dev);
552 * Stop device: disable rx and tx functions to allow for reconfiguring.
555 atl_dev_stop(struct rte_eth_dev *dev)
557 struct rte_eth_link link;
559 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
561 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
563 PMD_INIT_FUNC_TRACE();
565 /* disable interrupts */
566 atl_disable_intr(hw);
570 hw->adapter_stopped = 1;
572 atl_stop_queues(dev);
574 /* Clear stored conf */
575 dev->data->scattered_rx = 0;
578 /* Clear recorded link status */
579 memset(&link, 0, sizeof(link));
580 rte_eth_linkstatus_set(dev, &link);
582 if (!rte_intr_allow_others(intr_handle))
583 /* resume to the default handler */
584 rte_intr_callback_register(intr_handle,
585 atl_dev_interrupt_handler,
588 /* Clean datapath event and queue/vec mapping */
589 rte_intr_efd_disable(intr_handle);
590 if (intr_handle->intr_vec != NULL) {
591 rte_free(intr_handle->intr_vec);
592 intr_handle->intr_vec = NULL;
597 * Set device link up: enable tx.
600 atl_dev_set_link_up(struct rte_eth_dev *dev)
602 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
604 return hw->aq_fw_ops->set_link_speed(hw,
605 hw->aq_nic_cfg->link_speed_msk);
609 * Set device link down: disable tx.
612 atl_dev_set_link_down(struct rte_eth_dev *dev)
614 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 return hw->aq_fw_ops->set_link_speed(hw, 0);
620 * Reset and stop device.
623 atl_dev_close(struct rte_eth_dev *dev)
625 PMD_INIT_FUNC_TRACE();
629 atl_free_queues(dev);
633 atl_dev_reset(struct rte_eth_dev *dev)
637 ret = eth_atl_dev_uninit(dev);
641 ret = eth_atl_dev_init(dev);
648 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
650 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
651 struct aq_hw_s *hw = &adapter->hw;
652 struct atl_sw_stats *swstats = &adapter->sw_stats;
655 hw->aq_fw_ops->update_stats(hw);
657 /* Fill out the rte_eth_stats statistics structure */
658 stats->ipackets = hw->curr_stats.dma_pkt_rc;
659 stats->ibytes = hw->curr_stats.dma_oct_rc;
660 stats->imissed = hw->curr_stats.dpc;
661 stats->ierrors = hw->curr_stats.erpt;
663 stats->opackets = hw->curr_stats.dma_pkt_tc;
664 stats->obytes = hw->curr_stats.dma_oct_tc;
667 stats->rx_nombuf = swstats->rx_nombuf;
669 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
670 stats->q_ipackets[i] = swstats->q_ipackets[i];
671 stats->q_opackets[i] = swstats->q_opackets[i];
672 stats->q_ibytes[i] = swstats->q_ibytes[i];
673 stats->q_obytes[i] = swstats->q_obytes[i];
674 stats->q_errors[i] = swstats->q_errors[i];
680 atl_dev_stats_reset(struct rte_eth_dev *dev)
682 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
683 struct aq_hw_s *hw = &adapter->hw;
685 hw->aq_fw_ops->update_stats(hw);
687 /* Reset software totals */
688 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
690 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
694 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
695 struct rte_eth_xstat_name *xstats_names,
701 return RTE_DIM(atl_xstats_tbl);
703 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
704 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
705 atl_xstats_tbl[i].name);
711 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
714 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
715 struct aq_hw_s *hw = &adapter->hw;
721 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
723 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
724 atl_xstats_tbl[i].offset);
731 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
733 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
735 unsigned int ret = 0;
737 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
741 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
742 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
744 ret += 1; /* add string null-terminator */
753 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
755 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
757 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
758 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
760 dev_info->min_rx_bufsize = 1024;
761 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
762 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
763 dev_info->max_vfs = pci_dev->max_vfs;
765 dev_info->max_hash_mac_addrs = 0;
766 dev_info->max_vmdq_pools = 0;
767 dev_info->vmdq_queue_num = 0;
769 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
771 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
774 dev_info->default_rxconf = (struct rte_eth_rxconf) {
775 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
778 dev_info->default_txconf = (struct rte_eth_txconf) {
779 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
782 dev_info->rx_desc_lim = rx_desc_lim;
783 dev_info->tx_desc_lim = tx_desc_lim;
785 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
786 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
787 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
789 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
790 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
791 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
792 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
795 static const uint32_t *
796 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
798 static const uint32_t ptypes[] = {
800 RTE_PTYPE_L2_ETHER_ARP,
801 RTE_PTYPE_L2_ETHER_VLAN,
811 if (dev->rx_pkt_burst == atl_recv_pkts)
817 /* return 0 means link status changed, -1 means not changed */
819 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
821 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822 struct atl_interrupt *intr =
823 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
824 struct rte_eth_link link, old;
827 link.link_status = ETH_LINK_DOWN;
829 link.link_duplex = ETH_LINK_FULL_DUPLEX;
830 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
831 memset(&old, 0, sizeof(old));
833 /* load old link status */
834 rte_eth_linkstatus_get(dev, &old);
836 /* read current link status */
837 err = hw->aq_fw_ops->update_link_status(hw);
842 if (hw->aq_link_status.mbps == 0) {
843 /* write default (down) link status */
844 rte_eth_linkstatus_set(dev, &link);
845 if (link.link_status == old.link_status)
850 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
852 link.link_status = ETH_LINK_UP;
853 link.link_duplex = ETH_LINK_FULL_DUPLEX;
854 link.link_speed = hw->aq_link_status.mbps;
856 rte_eth_linkstatus_set(dev, &link);
858 if (link.link_status == old.link_status)
865 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
867 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
873 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
875 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
877 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
881 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
883 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
885 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
889 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
891 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893 if (dev->data->promiscuous == 1)
894 return; /* must remain in all_multicast mode */
896 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
900 * It clears the interrupt causes and enables the interrupt.
901 * It will be called once only during nic initialized.
904 * Pointer to struct rte_eth_dev.
909 * - On success, zero.
910 * - On failure, a negative value.
914 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
916 atl_dev_link_status_print(dev);
921 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
928 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
930 struct atl_interrupt *intr =
931 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
932 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
935 hw_atl_b0_hw_irq_read(hw, &cause);
937 atl_disable_intr(hw);
938 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
939 ATL_FLAG_NEED_LINK_UPDATE : 0;
945 * It gets and then prints the link status.
948 * Pointer to struct rte_eth_dev.
951 * - On success, zero.
952 * - On failure, a negative value.
955 atl_dev_link_status_print(struct rte_eth_dev *dev)
957 struct rte_eth_link link;
959 memset(&link, 0, sizeof(link));
960 rte_eth_linkstatus_get(dev, &link);
961 if (link.link_status) {
962 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
963 (int)(dev->data->port_id),
964 (unsigned int)link.link_speed,
965 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
966 "full-duplex" : "half-duplex");
968 PMD_DRV_LOG(INFO, " Port %d: Link Down",
969 (int)(dev->data->port_id));
975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
977 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
978 pci_dev->addr.domain,
981 pci_dev->addr.function);
985 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
989 * It executes link_update after knowing an interrupt occurred.
992 * Pointer to struct rte_eth_dev.
995 * - On success, zero.
996 * - On failure, a negative value.
999 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1000 struct rte_intr_handle *intr_handle)
1002 struct atl_interrupt *intr =
1003 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1005 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1006 atl_dev_link_update(dev, 0);
1007 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1008 atl_dev_link_status_print(dev);
1009 _rte_eth_dev_callback_process(dev,
1010 RTE_ETH_EVENT_INTR_LSC, NULL);
1013 atl_enable_intr(dev);
1014 rte_intr_enable(intr_handle);
1020 * Interrupt handler triggered by NIC for handling
1021 * specific interrupt.
1024 * Pointer to interrupt handle.
1026 * The address of parameter (struct rte_eth_dev *) regsitered before.
1032 atl_dev_interrupt_handler(void *param)
1034 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1036 atl_dev_interrupt_get_status(dev);
1037 atl_dev_interrupt_action(dev, dev->intr_handle);
1042 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1044 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1046 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1047 fc_conf->mode = RTE_FC_NONE;
1048 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1049 fc_conf->mode = RTE_FC_FULL;
1050 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1051 fc_conf->mode = RTE_FC_RX_PAUSE;
1052 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1053 fc_conf->mode = RTE_FC_TX_PAUSE;
1059 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1061 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1065 if (hw->aq_fw_ops->set_flow_control == NULL)
1068 if (fc_conf->mode == RTE_FC_NONE)
1069 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1070 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1071 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1072 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1073 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1074 else if (fc_conf->mode == RTE_FC_FULL)
1075 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1077 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1078 return hw->aq_fw_ops->set_flow_control(hw);
1084 atl_reta_update(struct rte_eth_dev *dev,
1085 struct rte_eth_rss_reta_entry64 *reta_conf,
1089 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1092 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1093 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1094 dev->data->nb_rx_queues - 1);
1096 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1101 atl_reta_query(struct rte_eth_dev *dev,
1102 struct rte_eth_rss_reta_entry64 *reta_conf,
1106 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1108 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1109 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1110 reta_conf->mask = ~0U;
1115 atl_rss_hash_update(struct rte_eth_dev *dev,
1116 struct rte_eth_rss_conf *rss_conf)
1118 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1119 struct aq_hw_cfg_s *cfg =
1120 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1121 static u8 def_rss_key[40] = {
1122 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1123 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1124 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1125 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1126 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1129 cfg->is_rss = !!rss_conf->rss_hf;
1130 if (rss_conf->rss_key) {
1131 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1132 rss_conf->rss_key_len);
1133 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1135 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1136 sizeof(def_rss_key));
1137 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1140 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1141 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1146 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1147 struct rte_eth_rss_conf *rss_conf)
1149 struct aq_hw_cfg_s *cfg =
1150 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1152 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1153 if (rss_conf->rss_key) {
1154 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1155 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1156 rss_conf->rss_key_len);
1162 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1163 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1164 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1166 RTE_INIT(atl_init_log)
1168 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1169 if (atl_logtype_init >= 0)
1170 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1171 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1172 if (atl_logtype_driver >= 0)
1173 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);