1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
55 struct rte_eth_fc_conf *fc_conf);
56 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
57 struct rte_eth_fc_conf *fc_conf);
59 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
62 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
63 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
64 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
65 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
66 struct rte_intr_handle *handle);
67 static void atl_dev_interrupt_handler(void *param);
70 static int atl_add_mac_addr(struct rte_eth_dev *dev,
71 struct ether_addr *mac_addr,
72 uint32_t index, uint32_t pool);
73 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
74 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
75 struct ether_addr *mac_addr);
77 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
78 struct ether_addr *mc_addr_set,
82 static int atl_reta_update(struct rte_eth_dev *dev,
83 struct rte_eth_rss_reta_entry64 *reta_conf,
85 static int atl_reta_query(struct rte_eth_dev *dev,
86 struct rte_eth_rss_reta_entry64 *reta_conf,
88 static int atl_rss_hash_update(struct rte_eth_dev *dev,
89 struct rte_eth_rss_conf *rss_conf);
90 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
91 struct rte_eth_rss_conf *rss_conf);
94 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
95 struct rte_pci_device *pci_dev);
96 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
98 static void atl_dev_info_get(struct rte_eth_dev *dev,
99 struct rte_eth_dev_info *dev_info);
101 int atl_logtype_init;
102 int atl_logtype_driver;
105 * The set of PCI devices this driver supports
107 static const struct rte_pci_id pci_id_atl_map[] = {
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
130 { .vendor_id = 0, /* sentinel */ },
133 static struct rte_pci_driver rte_atl_pmd = {
134 .id_table = pci_id_atl_map,
135 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
136 RTE_PCI_DRV_IOVA_AS_VA,
137 .probe = eth_atl_pci_probe,
138 .remove = eth_atl_pci_remove,
141 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
142 | DEV_RX_OFFLOAD_IPV4_CKSUM \
143 | DEV_RX_OFFLOAD_UDP_CKSUM \
144 | DEV_RX_OFFLOAD_TCP_CKSUM \
145 | DEV_RX_OFFLOAD_JUMBO_FRAME)
147 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
148 | DEV_TX_OFFLOAD_IPV4_CKSUM \
149 | DEV_TX_OFFLOAD_UDP_CKSUM \
150 | DEV_TX_OFFLOAD_TCP_CKSUM \
151 | DEV_TX_OFFLOAD_TCP_TSO \
152 | DEV_TX_OFFLOAD_MULTI_SEGS)
154 static const struct rte_eth_desc_lim rx_desc_lim = {
155 .nb_max = ATL_MAX_RING_DESC,
156 .nb_min = ATL_MIN_RING_DESC,
157 .nb_align = ATL_RXD_ALIGN,
160 static const struct rte_eth_desc_lim tx_desc_lim = {
161 .nb_max = ATL_MAX_RING_DESC,
162 .nb_min = ATL_MIN_RING_DESC,
163 .nb_align = ATL_TXD_ALIGN,
164 .nb_seg_max = ATL_TX_MAX_SEG,
165 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
168 #define ATL_XSTATS_FIELD(name) { \
170 offsetof(struct aq_stats_s, name) \
173 struct atl_xstats_tbl_s {
178 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
179 ATL_XSTATS_FIELD(uprc),
180 ATL_XSTATS_FIELD(mprc),
181 ATL_XSTATS_FIELD(bprc),
182 ATL_XSTATS_FIELD(erpt),
183 ATL_XSTATS_FIELD(uptc),
184 ATL_XSTATS_FIELD(mptc),
185 ATL_XSTATS_FIELD(bptc),
186 ATL_XSTATS_FIELD(erpr),
187 ATL_XSTATS_FIELD(ubrc),
188 ATL_XSTATS_FIELD(ubtc),
189 ATL_XSTATS_FIELD(mbrc),
190 ATL_XSTATS_FIELD(mbtc),
191 ATL_XSTATS_FIELD(bbrc),
192 ATL_XSTATS_FIELD(bbtc),
195 static const struct eth_dev_ops atl_eth_dev_ops = {
196 .dev_configure = atl_dev_configure,
197 .dev_start = atl_dev_start,
198 .dev_stop = atl_dev_stop,
199 .dev_set_link_up = atl_dev_set_link_up,
200 .dev_set_link_down = atl_dev_set_link_down,
201 .dev_close = atl_dev_close,
202 .dev_reset = atl_dev_reset,
205 .promiscuous_enable = atl_dev_promiscuous_enable,
206 .promiscuous_disable = atl_dev_promiscuous_disable,
207 .allmulticast_enable = atl_dev_allmulticast_enable,
208 .allmulticast_disable = atl_dev_allmulticast_disable,
211 .link_update = atl_dev_link_update,
214 .stats_get = atl_dev_stats_get,
215 .xstats_get = atl_dev_xstats_get,
216 .xstats_get_names = atl_dev_xstats_get_names,
217 .stats_reset = atl_dev_stats_reset,
218 .xstats_reset = atl_dev_stats_reset,
220 .fw_version_get = atl_fw_version_get,
221 .dev_infos_get = atl_dev_info_get,
222 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
224 .mtu_set = atl_dev_mtu_set,
227 .rx_queue_start = atl_rx_queue_start,
228 .rx_queue_stop = atl_rx_queue_stop,
229 .rx_queue_setup = atl_rx_queue_setup,
230 .rx_queue_release = atl_rx_queue_release,
232 .tx_queue_start = atl_tx_queue_start,
233 .tx_queue_stop = atl_tx_queue_stop,
234 .tx_queue_setup = atl_tx_queue_setup,
235 .tx_queue_release = atl_tx_queue_release,
237 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
238 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
240 .rx_queue_count = atl_rx_queue_count,
241 .rx_descriptor_status = atl_dev_rx_descriptor_status,
242 .tx_descriptor_status = atl_dev_tx_descriptor_status,
245 .flow_ctrl_get = atl_flow_ctrl_get,
246 .flow_ctrl_set = atl_flow_ctrl_set,
249 .mac_addr_add = atl_add_mac_addr,
250 .mac_addr_remove = atl_remove_mac_addr,
251 .mac_addr_set = atl_set_default_mac_addr,
252 .set_mc_addr_list = atl_dev_set_mc_addr_list,
253 .rxq_info_get = atl_rxq_info_get,
254 .txq_info_get = atl_txq_info_get,
256 .reta_update = atl_reta_update,
257 .reta_query = atl_reta_query,
258 .rss_hash_update = atl_rss_hash_update,
259 .rss_hash_conf_get = atl_rss_hash_conf_get,
262 static inline int32_t
263 atl_reset_hw(struct aq_hw_s *hw)
265 return hw_atl_b0_hw_reset(hw);
269 atl_enable_intr(struct rte_eth_dev *dev)
271 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
273 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
277 atl_disable_intr(struct aq_hw_s *hw)
279 PMD_INIT_FUNC_TRACE();
280 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
284 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
286 struct atl_adapter *adapter =
287 (struct atl_adapter *)eth_dev->data->dev_private;
288 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
289 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
290 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
293 PMD_INIT_FUNC_TRACE();
295 eth_dev->dev_ops = &atl_eth_dev_ops;
296 eth_dev->rx_pkt_burst = &atl_recv_pkts;
297 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
298 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
300 /* For secondary processes, the primary process has done all the work */
301 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
304 /* Vendor and Device ID need to be set before init of shared code */
305 hw->device_id = pci_dev->id.device_id;
306 hw->vendor_id = pci_dev->id.vendor_id;
307 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
309 /* Hardware configuration - hardcode */
310 adapter->hw_cfg.is_lro = false;
311 adapter->hw_cfg.wol = false;
312 adapter->hw_cfg.is_rss = false;
313 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
315 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
321 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
322 adapter->hw_cfg.aq_rss.indirection_table_size =
323 HW_ATL_B0_RSS_REDIRECTION_MAX;
325 hw->aq_nic_cfg = &adapter->hw_cfg;
327 /* disable interrupt */
328 atl_disable_intr(hw);
330 /* Allocate memory for storing MAC addresses */
331 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
332 if (eth_dev->data->mac_addrs == NULL) {
333 PMD_INIT_LOG(ERR, "MAC Malloc failed");
337 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
341 /* Copy the permanent MAC address */
342 if (hw->aq_fw_ops->get_mac_permanent(hw,
343 eth_dev->data->mac_addrs->addr_bytes) != 0)
346 /* Reset the hw statistics */
347 atl_dev_stats_reset(eth_dev);
349 rte_intr_callback_register(intr_handle,
350 atl_dev_interrupt_handler, eth_dev);
352 /* enable uio/vfio intr/eventfd mapping */
353 rte_intr_enable(intr_handle);
355 /* enable support intr */
356 atl_enable_intr(eth_dev);
362 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
364 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
365 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
368 PMD_INIT_FUNC_TRACE();
370 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
373 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
375 if (hw->adapter_stopped == 0)
376 atl_dev_close(eth_dev);
378 eth_dev->dev_ops = NULL;
379 eth_dev->rx_pkt_burst = NULL;
380 eth_dev->tx_pkt_burst = NULL;
382 /* disable uio intr before callback unregister */
383 rte_intr_disable(intr_handle);
384 rte_intr_callback_unregister(intr_handle,
385 atl_dev_interrupt_handler, eth_dev);
387 rte_free(eth_dev->data->mac_addrs);
388 eth_dev->data->mac_addrs = NULL;
394 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
395 struct rte_pci_device *pci_dev)
397 return rte_eth_dev_pci_generic_probe(pci_dev,
398 sizeof(struct atl_adapter), eth_atl_dev_init);
402 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
404 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
408 atl_dev_configure(struct rte_eth_dev *dev)
410 struct atl_interrupt *intr =
411 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
413 PMD_INIT_FUNC_TRACE();
415 /* set flag to update link status after init */
416 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
422 * Configure device link speed and setup link.
423 * It returns 0 on success.
426 atl_dev_start(struct rte_eth_dev *dev)
428 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
429 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
430 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
431 uint32_t intr_vector = 0;
432 uint32_t *link_speeds;
437 PMD_INIT_FUNC_TRACE();
439 /* set adapter started */
440 hw->adapter_stopped = 0;
442 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
444 "Invalid link_speeds for port %u, fix speed not supported",
449 /* disable uio/vfio intr/eventfd mapping */
450 rte_intr_disable(intr_handle);
452 /* reinitialize adapter
453 * this calls reset and start
455 status = atl_reset_hw(hw);
459 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
461 hw_atl_b0_hw_start(hw);
462 /* check and configure queue intr-vector mapping */
463 if ((rte_intr_cap_multiple(intr_handle) ||
464 !RTE_ETH_DEV_SRIOV(dev).active) &&
465 dev->data->dev_conf.intr_conf.rxq != 0) {
466 intr_vector = dev->data->nb_rx_queues;
467 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
468 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
469 ATL_MAX_INTR_QUEUE_NUM);
472 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
473 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
478 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
479 intr_handle->intr_vec = rte_zmalloc("intr_vec",
480 dev->data->nb_rx_queues * sizeof(int), 0);
481 if (intr_handle->intr_vec == NULL) {
482 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
483 " intr_vec", dev->data->nb_rx_queues);
488 /* initialize transmission unit */
491 /* This can fail when allocating mbufs for descriptor rings */
492 err = atl_rx_init(dev);
494 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
498 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
499 hw->fw_ver_actual >> 24,
500 (hw->fw_ver_actual >> 16) & 0xFF,
501 hw->fw_ver_actual & 0xFFFF);
502 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
504 err = atl_start_queues(dev);
506 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
510 err = hw->aq_fw_ops->update_link_status(hw);
515 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
517 link_speeds = &dev->data->dev_conf.link_speeds;
521 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
522 speed = hw->aq_nic_cfg->link_speed_msk;
524 if (*link_speeds & ETH_LINK_SPEED_10G)
525 speed |= AQ_NIC_RATE_10G;
526 if (*link_speeds & ETH_LINK_SPEED_5G)
527 speed |= AQ_NIC_RATE_5G;
528 if (*link_speeds & ETH_LINK_SPEED_1G)
529 speed |= AQ_NIC_RATE_1G;
530 if (*link_speeds & ETH_LINK_SPEED_2_5G)
531 speed |= AQ_NIC_RATE_2G5;
532 if (*link_speeds & ETH_LINK_SPEED_100M)
533 speed |= AQ_NIC_RATE_100M;
536 err = hw->aq_fw_ops->set_link_speed(hw, speed);
540 if (rte_intr_allow_others(intr_handle)) {
541 /* check if lsc interrupt is enabled */
542 if (dev->data->dev_conf.intr_conf.lsc != 0)
543 atl_dev_lsc_interrupt_setup(dev, true);
545 atl_dev_lsc_interrupt_setup(dev, false);
547 rte_intr_callback_unregister(intr_handle,
548 atl_dev_interrupt_handler, dev);
549 if (dev->data->dev_conf.intr_conf.lsc != 0)
550 PMD_INIT_LOG(INFO, "lsc won't enable because of"
551 " no intr multiplex");
554 /* check if rxq interrupt is enabled */
555 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
556 rte_intr_dp_is_en(intr_handle))
557 atl_dev_rxq_interrupt_setup(dev);
559 /* enable uio/vfio intr/eventfd mapping */
560 rte_intr_enable(intr_handle);
562 /* resume enabled intr since hw reset */
563 atl_enable_intr(dev);
568 atl_stop_queues(dev);
573 * Stop device: disable rx and tx functions to allow for reconfiguring.
576 atl_dev_stop(struct rte_eth_dev *dev)
578 struct rte_eth_link link;
580 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
581 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
582 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
584 PMD_INIT_FUNC_TRACE();
586 /* disable interrupts */
587 atl_disable_intr(hw);
591 hw->adapter_stopped = 1;
593 atl_stop_queues(dev);
595 /* Clear stored conf */
596 dev->data->scattered_rx = 0;
599 /* Clear recorded link status */
600 memset(&link, 0, sizeof(link));
601 rte_eth_linkstatus_set(dev, &link);
603 if (!rte_intr_allow_others(intr_handle))
604 /* resume to the default handler */
605 rte_intr_callback_register(intr_handle,
606 atl_dev_interrupt_handler,
609 /* Clean datapath event and queue/vec mapping */
610 rte_intr_efd_disable(intr_handle);
611 if (intr_handle->intr_vec != NULL) {
612 rte_free(intr_handle->intr_vec);
613 intr_handle->intr_vec = NULL;
618 * Set device link up: enable tx.
621 atl_dev_set_link_up(struct rte_eth_dev *dev)
623 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 return hw->aq_fw_ops->set_link_speed(hw,
626 hw->aq_nic_cfg->link_speed_msk);
630 * Set device link down: disable tx.
633 atl_dev_set_link_down(struct rte_eth_dev *dev)
635 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
637 return hw->aq_fw_ops->set_link_speed(hw, 0);
641 * Reset and stop device.
644 atl_dev_close(struct rte_eth_dev *dev)
646 PMD_INIT_FUNC_TRACE();
650 atl_free_queues(dev);
654 atl_dev_reset(struct rte_eth_dev *dev)
658 ret = eth_atl_dev_uninit(dev);
662 ret = eth_atl_dev_init(dev);
669 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
671 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
672 struct aq_hw_s *hw = &adapter->hw;
673 struct atl_sw_stats *swstats = &adapter->sw_stats;
676 hw->aq_fw_ops->update_stats(hw);
678 /* Fill out the rte_eth_stats statistics structure */
679 stats->ipackets = hw->curr_stats.dma_pkt_rc;
680 stats->ibytes = hw->curr_stats.dma_oct_rc;
681 stats->imissed = hw->curr_stats.dpc;
682 stats->ierrors = hw->curr_stats.erpt;
684 stats->opackets = hw->curr_stats.dma_pkt_tc;
685 stats->obytes = hw->curr_stats.dma_oct_tc;
688 stats->rx_nombuf = swstats->rx_nombuf;
690 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
691 stats->q_ipackets[i] = swstats->q_ipackets[i];
692 stats->q_opackets[i] = swstats->q_opackets[i];
693 stats->q_ibytes[i] = swstats->q_ibytes[i];
694 stats->q_obytes[i] = swstats->q_obytes[i];
695 stats->q_errors[i] = swstats->q_errors[i];
701 atl_dev_stats_reset(struct rte_eth_dev *dev)
703 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
704 struct aq_hw_s *hw = &adapter->hw;
706 hw->aq_fw_ops->update_stats(hw);
708 /* Reset software totals */
709 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
711 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
715 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
716 struct rte_eth_xstat_name *xstats_names,
722 return RTE_DIM(atl_xstats_tbl);
724 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
725 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
726 atl_xstats_tbl[i].name);
732 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
735 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
736 struct aq_hw_s *hw = &adapter->hw;
742 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
744 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
745 atl_xstats_tbl[i].offset);
752 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
754 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756 unsigned int ret = 0;
758 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
762 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
763 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
765 ret += 1; /* add string null-terminator */
774 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
776 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
778 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
779 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
781 dev_info->min_rx_bufsize = 1024;
782 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
783 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
784 dev_info->max_vfs = pci_dev->max_vfs;
786 dev_info->max_hash_mac_addrs = 0;
787 dev_info->max_vmdq_pools = 0;
788 dev_info->vmdq_queue_num = 0;
790 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
792 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
795 dev_info->default_rxconf = (struct rte_eth_rxconf) {
796 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
799 dev_info->default_txconf = (struct rte_eth_txconf) {
800 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
803 dev_info->rx_desc_lim = rx_desc_lim;
804 dev_info->tx_desc_lim = tx_desc_lim;
806 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
807 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
808 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
810 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
811 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
812 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
813 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
816 static const uint32_t *
817 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
819 static const uint32_t ptypes[] = {
821 RTE_PTYPE_L2_ETHER_ARP,
822 RTE_PTYPE_L2_ETHER_VLAN,
832 if (dev->rx_pkt_burst == atl_recv_pkts)
838 /* return 0 means link status changed, -1 means not changed */
840 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
842 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
843 struct atl_interrupt *intr =
844 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
845 struct rte_eth_link link, old;
848 link.link_status = ETH_LINK_DOWN;
850 link.link_duplex = ETH_LINK_FULL_DUPLEX;
851 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
852 memset(&old, 0, sizeof(old));
854 /* load old link status */
855 rte_eth_linkstatus_get(dev, &old);
857 /* read current link status */
858 err = hw->aq_fw_ops->update_link_status(hw);
863 if (hw->aq_link_status.mbps == 0) {
864 /* write default (down) link status */
865 rte_eth_linkstatus_set(dev, &link);
866 if (link.link_status == old.link_status)
871 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
873 link.link_status = ETH_LINK_UP;
874 link.link_duplex = ETH_LINK_FULL_DUPLEX;
875 link.link_speed = hw->aq_link_status.mbps;
877 rte_eth_linkstatus_set(dev, &link);
879 if (link.link_status == old.link_status)
886 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
888 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
890 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
894 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
896 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
898 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
902 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
904 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
906 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
910 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
912 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914 if (dev->data->promiscuous == 1)
915 return; /* must remain in all_multicast mode */
917 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
921 * It clears the interrupt causes and enables the interrupt.
922 * It will be called once only during nic initialized.
925 * Pointer to struct rte_eth_dev.
930 * - On success, zero.
931 * - On failure, a negative value.
935 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
937 atl_dev_link_status_print(dev);
942 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
949 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
951 struct atl_interrupt *intr =
952 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
953 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
956 hw_atl_b0_hw_irq_read(hw, &cause);
958 atl_disable_intr(hw);
959 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
960 ATL_FLAG_NEED_LINK_UPDATE : 0;
966 * It gets and then prints the link status.
969 * Pointer to struct rte_eth_dev.
972 * - On success, zero.
973 * - On failure, a negative value.
976 atl_dev_link_status_print(struct rte_eth_dev *dev)
978 struct rte_eth_link link;
980 memset(&link, 0, sizeof(link));
981 rte_eth_linkstatus_get(dev, &link);
982 if (link.link_status) {
983 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
984 (int)(dev->data->port_id),
985 (unsigned int)link.link_speed,
986 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
987 "full-duplex" : "half-duplex");
989 PMD_DRV_LOG(INFO, " Port %d: Link Down",
990 (int)(dev->data->port_id));
996 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
998 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
999 pci_dev->addr.domain,
1001 pci_dev->addr.devid,
1002 pci_dev->addr.function);
1006 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1010 * It executes link_update after knowing an interrupt occurred.
1013 * Pointer to struct rte_eth_dev.
1016 * - On success, zero.
1017 * - On failure, a negative value.
1020 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1021 struct rte_intr_handle *intr_handle)
1023 struct atl_interrupt *intr =
1024 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1026 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1027 atl_dev_link_update(dev, 0);
1028 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1029 atl_dev_link_status_print(dev);
1030 _rte_eth_dev_callback_process(dev,
1031 RTE_ETH_EVENT_INTR_LSC, NULL);
1034 atl_enable_intr(dev);
1035 rte_intr_enable(intr_handle);
1041 * Interrupt handler triggered by NIC for handling
1042 * specific interrupt.
1045 * Pointer to interrupt handle.
1047 * The address of parameter (struct rte_eth_dev *) regsitered before.
1053 atl_dev_interrupt_handler(void *param)
1055 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1057 atl_dev_interrupt_get_status(dev);
1058 atl_dev_interrupt_action(dev, dev->intr_handle);
1063 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1065 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1067 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1068 fc_conf->mode = RTE_FC_NONE;
1069 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1070 fc_conf->mode = RTE_FC_FULL;
1071 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1072 fc_conf->mode = RTE_FC_RX_PAUSE;
1073 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1074 fc_conf->mode = RTE_FC_TX_PAUSE;
1080 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1082 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1083 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1086 if (hw->aq_fw_ops->set_flow_control == NULL)
1089 if (fc_conf->mode == RTE_FC_NONE)
1090 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1091 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1092 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1093 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1094 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1095 else if (fc_conf->mode == RTE_FC_FULL)
1096 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1098 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1099 return hw->aq_fw_ops->set_flow_control(hw);
1105 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1106 u8 *mac_addr, bool enable)
1108 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1109 unsigned int h = 0U;
1110 unsigned int l = 0U;
1114 h = (mac_addr[0] << 8) | (mac_addr[1]);
1115 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1116 (mac_addr[4] << 8) | mac_addr[5];
1119 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1120 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1121 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1124 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1126 err = aq_hw_err_from_flags(hw);
1132 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1133 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1135 if (is_zero_ether_addr(mac_addr)) {
1136 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1140 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1144 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1146 atl_update_mac_addr(dev, index, NULL, false);
1150 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1152 atl_remove_mac_addr(dev, 0);
1153 atl_add_mac_addr(dev, addr, 0, 0);
1158 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1160 struct rte_eth_dev_info dev_info;
1161 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1163 atl_dev_info_get(dev, &dev_info);
1165 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1168 /* update max frame size */
1169 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1175 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1176 struct ether_addr *mc_addr_set,
1177 uint32_t nb_mc_addr)
1179 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1182 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1185 /* Update whole uc filters table */
1186 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1187 u8 *mac_addr = NULL;
1190 if (i < nb_mc_addr) {
1191 mac_addr = mc_addr_set[i].addr_bytes;
1192 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1193 (mac_addr[4] << 8) | mac_addr[5];
1194 h = (mac_addr[0] << 8) | mac_addr[1];
1197 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1198 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1199 HW_ATL_B0_MAC_MIN + i);
1200 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1201 HW_ATL_B0_MAC_MIN + i);
1202 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1203 HW_ATL_B0_MAC_MIN + i);
1210 atl_reta_update(struct rte_eth_dev *dev,
1211 struct rte_eth_rss_reta_entry64 *reta_conf,
1215 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1218 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1219 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1220 dev->data->nb_rx_queues - 1);
1222 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1227 atl_reta_query(struct rte_eth_dev *dev,
1228 struct rte_eth_rss_reta_entry64 *reta_conf,
1232 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1234 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1235 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1236 reta_conf->mask = ~0U;
1241 atl_rss_hash_update(struct rte_eth_dev *dev,
1242 struct rte_eth_rss_conf *rss_conf)
1244 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1245 struct aq_hw_cfg_s *cfg =
1246 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1247 static u8 def_rss_key[40] = {
1248 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1249 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1250 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1251 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1252 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1255 cfg->is_rss = !!rss_conf->rss_hf;
1256 if (rss_conf->rss_key) {
1257 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1258 rss_conf->rss_key_len);
1259 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1261 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1262 sizeof(def_rss_key));
1263 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1266 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1267 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1272 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1273 struct rte_eth_rss_conf *rss_conf)
1275 struct aq_hw_cfg_s *cfg =
1276 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1278 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1279 if (rss_conf->rss_key) {
1280 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1281 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1282 rss_conf->rss_key_len);
1288 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1289 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1290 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1292 RTE_INIT(atl_init_log)
1294 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1295 if (atl_logtype_init >= 0)
1296 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1297 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1298 if (atl_logtype_driver >= 0)
1299 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);