1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
53 struct rte_eth_fc_conf *fc_conf);
54 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
55 struct rte_eth_fc_conf *fc_conf);
57 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
60 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
61 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
62 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
63 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
64 struct rte_intr_handle *handle);
65 static void atl_dev_interrupt_handler(void *param);
68 static int atl_add_mac_addr(struct rte_eth_dev *dev,
69 struct ether_addr *mac_addr,
70 uint32_t index, uint32_t pool);
71 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
72 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
73 struct ether_addr *mac_addr);
75 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
76 struct ether_addr *mc_addr_set,
80 static int atl_reta_update(struct rte_eth_dev *dev,
81 struct rte_eth_rss_reta_entry64 *reta_conf,
83 static int atl_reta_query(struct rte_eth_dev *dev,
84 struct rte_eth_rss_reta_entry64 *reta_conf,
86 static int atl_rss_hash_update(struct rte_eth_dev *dev,
87 struct rte_eth_rss_conf *rss_conf);
88 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
89 struct rte_eth_rss_conf *rss_conf);
92 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
93 struct rte_pci_device *pci_dev);
94 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
96 static void atl_dev_info_get(struct rte_eth_dev *dev,
97 struct rte_eth_dev_info *dev_info);
100 int atl_logtype_driver;
103 * The set of PCI devices this driver supports
105 static const struct rte_pci_id pci_id_atl_map[] = {
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
128 { .vendor_id = 0, /* sentinel */ },
131 static struct rte_pci_driver rte_atl_pmd = {
132 .id_table = pci_id_atl_map,
133 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
134 RTE_PCI_DRV_IOVA_AS_VA,
135 .probe = eth_atl_pci_probe,
136 .remove = eth_atl_pci_remove,
139 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
140 | DEV_RX_OFFLOAD_IPV4_CKSUM \
141 | DEV_RX_OFFLOAD_UDP_CKSUM \
142 | DEV_RX_OFFLOAD_TCP_CKSUM \
143 | DEV_RX_OFFLOAD_JUMBO_FRAME)
145 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
146 | DEV_TX_OFFLOAD_IPV4_CKSUM \
147 | DEV_TX_OFFLOAD_UDP_CKSUM \
148 | DEV_TX_OFFLOAD_TCP_CKSUM \
149 | DEV_TX_OFFLOAD_TCP_TSO \
150 | DEV_TX_OFFLOAD_MULTI_SEGS)
152 static const struct rte_eth_desc_lim rx_desc_lim = {
153 .nb_max = ATL_MAX_RING_DESC,
154 .nb_min = ATL_MIN_RING_DESC,
155 .nb_align = ATL_RXD_ALIGN,
158 static const struct rte_eth_desc_lim tx_desc_lim = {
159 .nb_max = ATL_MAX_RING_DESC,
160 .nb_min = ATL_MIN_RING_DESC,
161 .nb_align = ATL_TXD_ALIGN,
162 .nb_seg_max = ATL_TX_MAX_SEG,
163 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
166 #define ATL_XSTATS_FIELD(name) { \
168 offsetof(struct aq_stats_s, name) \
171 struct atl_xstats_tbl_s {
176 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
177 ATL_XSTATS_FIELD(uprc),
178 ATL_XSTATS_FIELD(mprc),
179 ATL_XSTATS_FIELD(bprc),
180 ATL_XSTATS_FIELD(erpt),
181 ATL_XSTATS_FIELD(uptc),
182 ATL_XSTATS_FIELD(mptc),
183 ATL_XSTATS_FIELD(bptc),
184 ATL_XSTATS_FIELD(erpr),
185 ATL_XSTATS_FIELD(ubrc),
186 ATL_XSTATS_FIELD(ubtc),
187 ATL_XSTATS_FIELD(mbrc),
188 ATL_XSTATS_FIELD(mbtc),
189 ATL_XSTATS_FIELD(bbrc),
190 ATL_XSTATS_FIELD(bbtc),
193 static const struct eth_dev_ops atl_eth_dev_ops = {
194 .dev_configure = atl_dev_configure,
195 .dev_start = atl_dev_start,
196 .dev_stop = atl_dev_stop,
197 .dev_set_link_up = atl_dev_set_link_up,
198 .dev_set_link_down = atl_dev_set_link_down,
199 .dev_close = atl_dev_close,
200 .dev_reset = atl_dev_reset,
203 .promiscuous_enable = atl_dev_promiscuous_enable,
204 .promiscuous_disable = atl_dev_promiscuous_disable,
205 .allmulticast_enable = atl_dev_allmulticast_enable,
206 .allmulticast_disable = atl_dev_allmulticast_disable,
209 .link_update = atl_dev_link_update,
212 .stats_get = atl_dev_stats_get,
213 .xstats_get = atl_dev_xstats_get,
214 .xstats_get_names = atl_dev_xstats_get_names,
215 .stats_reset = atl_dev_stats_reset,
216 .xstats_reset = atl_dev_stats_reset,
218 .fw_version_get = atl_fw_version_get,
219 .dev_infos_get = atl_dev_info_get,
220 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
223 .rx_queue_start = atl_rx_queue_start,
224 .rx_queue_stop = atl_rx_queue_stop,
225 .rx_queue_setup = atl_rx_queue_setup,
226 .rx_queue_release = atl_rx_queue_release,
228 .tx_queue_start = atl_tx_queue_start,
229 .tx_queue_stop = atl_tx_queue_stop,
230 .tx_queue_setup = atl_tx_queue_setup,
231 .tx_queue_release = atl_tx_queue_release,
233 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
234 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
236 .rx_queue_count = atl_rx_queue_count,
237 .rx_descriptor_status = atl_dev_rx_descriptor_status,
238 .tx_descriptor_status = atl_dev_tx_descriptor_status,
241 .flow_ctrl_get = atl_flow_ctrl_get,
242 .flow_ctrl_set = atl_flow_ctrl_set,
245 .mac_addr_add = atl_add_mac_addr,
246 .mac_addr_remove = atl_remove_mac_addr,
247 .mac_addr_set = atl_set_default_mac_addr,
248 .set_mc_addr_list = atl_dev_set_mc_addr_list,
249 .rxq_info_get = atl_rxq_info_get,
250 .txq_info_get = atl_txq_info_get,
252 .reta_update = atl_reta_update,
253 .reta_query = atl_reta_query,
254 .rss_hash_update = atl_rss_hash_update,
255 .rss_hash_conf_get = atl_rss_hash_conf_get,
258 static inline int32_t
259 atl_reset_hw(struct aq_hw_s *hw)
261 return hw_atl_b0_hw_reset(hw);
265 atl_enable_intr(struct rte_eth_dev *dev)
267 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
269 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
273 atl_disable_intr(struct aq_hw_s *hw)
275 PMD_INIT_FUNC_TRACE();
276 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
280 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
282 struct atl_adapter *adapter =
283 (struct atl_adapter *)eth_dev->data->dev_private;
284 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
285 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
286 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
289 PMD_INIT_FUNC_TRACE();
291 eth_dev->dev_ops = &atl_eth_dev_ops;
292 eth_dev->rx_pkt_burst = &atl_recv_pkts;
293 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
294 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
296 /* For secondary processes, the primary process has done all the work */
297 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
300 /* Vendor and Device ID need to be set before init of shared code */
301 hw->device_id = pci_dev->id.device_id;
302 hw->vendor_id = pci_dev->id.vendor_id;
303 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
305 /* Hardware configuration - hardcode */
306 adapter->hw_cfg.is_lro = false;
307 adapter->hw_cfg.wol = false;
308 adapter->hw_cfg.is_rss = false;
309 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
311 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
317 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
318 adapter->hw_cfg.aq_rss.indirection_table_size =
319 HW_ATL_B0_RSS_REDIRECTION_MAX;
321 hw->aq_nic_cfg = &adapter->hw_cfg;
323 /* disable interrupt */
324 atl_disable_intr(hw);
326 /* Allocate memory for storing MAC addresses */
327 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
328 if (eth_dev->data->mac_addrs == NULL) {
329 PMD_INIT_LOG(ERR, "MAC Malloc failed");
333 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
337 /* Copy the permanent MAC address */
338 if (hw->aq_fw_ops->get_mac_permanent(hw,
339 eth_dev->data->mac_addrs->addr_bytes) != 0)
342 /* Reset the hw statistics */
343 atl_dev_stats_reset(eth_dev);
345 rte_intr_callback_register(intr_handle,
346 atl_dev_interrupt_handler, eth_dev);
348 /* enable uio/vfio intr/eventfd mapping */
349 rte_intr_enable(intr_handle);
351 /* enable support intr */
352 atl_enable_intr(eth_dev);
358 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
361 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 PMD_INIT_FUNC_TRACE();
366 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
369 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
371 if (hw->adapter_stopped == 0)
372 atl_dev_close(eth_dev);
374 eth_dev->dev_ops = NULL;
375 eth_dev->rx_pkt_burst = NULL;
376 eth_dev->tx_pkt_burst = NULL;
378 /* disable uio intr before callback unregister */
379 rte_intr_disable(intr_handle);
380 rte_intr_callback_unregister(intr_handle,
381 atl_dev_interrupt_handler, eth_dev);
383 rte_free(eth_dev->data->mac_addrs);
384 eth_dev->data->mac_addrs = NULL;
390 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
391 struct rte_pci_device *pci_dev)
393 return rte_eth_dev_pci_generic_probe(pci_dev,
394 sizeof(struct atl_adapter), eth_atl_dev_init);
398 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
400 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
404 atl_dev_configure(struct rte_eth_dev *dev)
406 struct atl_interrupt *intr =
407 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
409 PMD_INIT_FUNC_TRACE();
411 /* set flag to update link status after init */
412 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
418 * Configure device link speed and setup link.
419 * It returns 0 on success.
422 atl_dev_start(struct rte_eth_dev *dev)
424 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
425 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
426 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
427 uint32_t intr_vector = 0;
428 uint32_t *link_speeds;
433 PMD_INIT_FUNC_TRACE();
435 /* set adapter started */
436 hw->adapter_stopped = 0;
438 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
440 "Invalid link_speeds for port %u, fix speed not supported",
445 /* disable uio/vfio intr/eventfd mapping */
446 rte_intr_disable(intr_handle);
448 /* reinitialize adapter
449 * this calls reset and start
451 status = atl_reset_hw(hw);
455 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
457 hw_atl_b0_hw_start(hw);
458 /* check and configure queue intr-vector mapping */
459 if ((rte_intr_cap_multiple(intr_handle) ||
460 !RTE_ETH_DEV_SRIOV(dev).active) &&
461 dev->data->dev_conf.intr_conf.rxq != 0) {
462 intr_vector = dev->data->nb_rx_queues;
463 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
464 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
465 ATL_MAX_INTR_QUEUE_NUM);
468 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
469 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
474 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
475 intr_handle->intr_vec = rte_zmalloc("intr_vec",
476 dev->data->nb_rx_queues * sizeof(int), 0);
477 if (intr_handle->intr_vec == NULL) {
478 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
479 " intr_vec", dev->data->nb_rx_queues);
484 /* initialize transmission unit */
487 /* This can fail when allocating mbufs for descriptor rings */
488 err = atl_rx_init(dev);
490 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
494 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
495 hw->fw_ver_actual >> 24,
496 (hw->fw_ver_actual >> 16) & 0xFF,
497 hw->fw_ver_actual & 0xFFFF);
498 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
500 err = atl_start_queues(dev);
502 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
506 err = hw->aq_fw_ops->update_link_status(hw);
511 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
513 link_speeds = &dev->data->dev_conf.link_speeds;
517 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
518 speed = hw->aq_nic_cfg->link_speed_msk;
520 if (*link_speeds & ETH_LINK_SPEED_10G)
521 speed |= AQ_NIC_RATE_10G;
522 if (*link_speeds & ETH_LINK_SPEED_5G)
523 speed |= AQ_NIC_RATE_5G;
524 if (*link_speeds & ETH_LINK_SPEED_1G)
525 speed |= AQ_NIC_RATE_1G;
526 if (*link_speeds & ETH_LINK_SPEED_2_5G)
527 speed |= AQ_NIC_RATE_2G5;
528 if (*link_speeds & ETH_LINK_SPEED_100M)
529 speed |= AQ_NIC_RATE_100M;
532 err = hw->aq_fw_ops->set_link_speed(hw, speed);
536 if (rte_intr_allow_others(intr_handle)) {
537 /* check if lsc interrupt is enabled */
538 if (dev->data->dev_conf.intr_conf.lsc != 0)
539 atl_dev_lsc_interrupt_setup(dev, true);
541 atl_dev_lsc_interrupt_setup(dev, false);
543 rte_intr_callback_unregister(intr_handle,
544 atl_dev_interrupt_handler, dev);
545 if (dev->data->dev_conf.intr_conf.lsc != 0)
546 PMD_INIT_LOG(INFO, "lsc won't enable because of"
547 " no intr multiplex");
550 /* check if rxq interrupt is enabled */
551 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
552 rte_intr_dp_is_en(intr_handle))
553 atl_dev_rxq_interrupt_setup(dev);
555 /* enable uio/vfio intr/eventfd mapping */
556 rte_intr_enable(intr_handle);
558 /* resume enabled intr since hw reset */
559 atl_enable_intr(dev);
564 atl_stop_queues(dev);
569 * Stop device: disable rx and tx functions to allow for reconfiguring.
572 atl_dev_stop(struct rte_eth_dev *dev)
574 struct rte_eth_link link;
576 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
577 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
578 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
580 PMD_INIT_FUNC_TRACE();
582 /* disable interrupts */
583 atl_disable_intr(hw);
587 hw->adapter_stopped = 1;
589 atl_stop_queues(dev);
591 /* Clear stored conf */
592 dev->data->scattered_rx = 0;
595 /* Clear recorded link status */
596 memset(&link, 0, sizeof(link));
597 rte_eth_linkstatus_set(dev, &link);
599 if (!rte_intr_allow_others(intr_handle))
600 /* resume to the default handler */
601 rte_intr_callback_register(intr_handle,
602 atl_dev_interrupt_handler,
605 /* Clean datapath event and queue/vec mapping */
606 rte_intr_efd_disable(intr_handle);
607 if (intr_handle->intr_vec != NULL) {
608 rte_free(intr_handle->intr_vec);
609 intr_handle->intr_vec = NULL;
614 * Set device link up: enable tx.
617 atl_dev_set_link_up(struct rte_eth_dev *dev)
619 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
621 return hw->aq_fw_ops->set_link_speed(hw,
622 hw->aq_nic_cfg->link_speed_msk);
626 * Set device link down: disable tx.
629 atl_dev_set_link_down(struct rte_eth_dev *dev)
631 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633 return hw->aq_fw_ops->set_link_speed(hw, 0);
637 * Reset and stop device.
640 atl_dev_close(struct rte_eth_dev *dev)
642 PMD_INIT_FUNC_TRACE();
646 atl_free_queues(dev);
650 atl_dev_reset(struct rte_eth_dev *dev)
654 ret = eth_atl_dev_uninit(dev);
658 ret = eth_atl_dev_init(dev);
665 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
667 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
668 struct aq_hw_s *hw = &adapter->hw;
669 struct atl_sw_stats *swstats = &adapter->sw_stats;
672 hw->aq_fw_ops->update_stats(hw);
674 /* Fill out the rte_eth_stats statistics structure */
675 stats->ipackets = hw->curr_stats.dma_pkt_rc;
676 stats->ibytes = hw->curr_stats.dma_oct_rc;
677 stats->imissed = hw->curr_stats.dpc;
678 stats->ierrors = hw->curr_stats.erpt;
680 stats->opackets = hw->curr_stats.dma_pkt_tc;
681 stats->obytes = hw->curr_stats.dma_oct_tc;
684 stats->rx_nombuf = swstats->rx_nombuf;
686 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
687 stats->q_ipackets[i] = swstats->q_ipackets[i];
688 stats->q_opackets[i] = swstats->q_opackets[i];
689 stats->q_ibytes[i] = swstats->q_ibytes[i];
690 stats->q_obytes[i] = swstats->q_obytes[i];
691 stats->q_errors[i] = swstats->q_errors[i];
697 atl_dev_stats_reset(struct rte_eth_dev *dev)
699 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
700 struct aq_hw_s *hw = &adapter->hw;
702 hw->aq_fw_ops->update_stats(hw);
704 /* Reset software totals */
705 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
707 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
711 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
712 struct rte_eth_xstat_name *xstats_names,
718 return RTE_DIM(atl_xstats_tbl);
720 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
721 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
722 atl_xstats_tbl[i].name);
728 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
731 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
732 struct aq_hw_s *hw = &adapter->hw;
738 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
740 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
741 atl_xstats_tbl[i].offset);
748 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
750 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
752 unsigned int ret = 0;
754 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
758 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
759 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
761 ret += 1; /* add string null-terminator */
770 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
772 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
774 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
775 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
777 dev_info->min_rx_bufsize = 1024;
778 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
779 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
780 dev_info->max_vfs = pci_dev->max_vfs;
782 dev_info->max_hash_mac_addrs = 0;
783 dev_info->max_vmdq_pools = 0;
784 dev_info->vmdq_queue_num = 0;
786 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
788 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
791 dev_info->default_rxconf = (struct rte_eth_rxconf) {
792 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
795 dev_info->default_txconf = (struct rte_eth_txconf) {
796 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
799 dev_info->rx_desc_lim = rx_desc_lim;
800 dev_info->tx_desc_lim = tx_desc_lim;
802 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
803 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
804 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
806 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
807 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
808 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
809 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
812 static const uint32_t *
813 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
815 static const uint32_t ptypes[] = {
817 RTE_PTYPE_L2_ETHER_ARP,
818 RTE_PTYPE_L2_ETHER_VLAN,
828 if (dev->rx_pkt_burst == atl_recv_pkts)
834 /* return 0 means link status changed, -1 means not changed */
836 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
838 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
839 struct atl_interrupt *intr =
840 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
841 struct rte_eth_link link, old;
844 link.link_status = ETH_LINK_DOWN;
846 link.link_duplex = ETH_LINK_FULL_DUPLEX;
847 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
848 memset(&old, 0, sizeof(old));
850 /* load old link status */
851 rte_eth_linkstatus_get(dev, &old);
853 /* read current link status */
854 err = hw->aq_fw_ops->update_link_status(hw);
859 if (hw->aq_link_status.mbps == 0) {
860 /* write default (down) link status */
861 rte_eth_linkstatus_set(dev, &link);
862 if (link.link_status == old.link_status)
867 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
869 link.link_status = ETH_LINK_UP;
870 link.link_duplex = ETH_LINK_FULL_DUPLEX;
871 link.link_speed = hw->aq_link_status.mbps;
873 rte_eth_linkstatus_set(dev, &link);
875 if (link.link_status == old.link_status)
882 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
884 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
886 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
890 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
892 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
898 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
900 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
902 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
906 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
908 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
910 if (dev->data->promiscuous == 1)
911 return; /* must remain in all_multicast mode */
913 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
917 * It clears the interrupt causes and enables the interrupt.
918 * It will be called once only during nic initialized.
921 * Pointer to struct rte_eth_dev.
926 * - On success, zero.
927 * - On failure, a negative value.
931 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
933 atl_dev_link_status_print(dev);
938 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
945 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
947 struct atl_interrupt *intr =
948 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
949 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
952 hw_atl_b0_hw_irq_read(hw, &cause);
954 atl_disable_intr(hw);
955 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
956 ATL_FLAG_NEED_LINK_UPDATE : 0;
962 * It gets and then prints the link status.
965 * Pointer to struct rte_eth_dev.
968 * - On success, zero.
969 * - On failure, a negative value.
972 atl_dev_link_status_print(struct rte_eth_dev *dev)
974 struct rte_eth_link link;
976 memset(&link, 0, sizeof(link));
977 rte_eth_linkstatus_get(dev, &link);
978 if (link.link_status) {
979 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
980 (int)(dev->data->port_id),
981 (unsigned int)link.link_speed,
982 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
983 "full-duplex" : "half-duplex");
985 PMD_DRV_LOG(INFO, " Port %d: Link Down",
986 (int)(dev->data->port_id));
992 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
994 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
995 pci_dev->addr.domain,
998 pci_dev->addr.function);
1002 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1006 * It executes link_update after knowing an interrupt occurred.
1009 * Pointer to struct rte_eth_dev.
1012 * - On success, zero.
1013 * - On failure, a negative value.
1016 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1017 struct rte_intr_handle *intr_handle)
1019 struct atl_interrupt *intr =
1020 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1022 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1023 atl_dev_link_update(dev, 0);
1024 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1025 atl_dev_link_status_print(dev);
1026 _rte_eth_dev_callback_process(dev,
1027 RTE_ETH_EVENT_INTR_LSC, NULL);
1030 atl_enable_intr(dev);
1031 rte_intr_enable(intr_handle);
1037 * Interrupt handler triggered by NIC for handling
1038 * specific interrupt.
1041 * Pointer to interrupt handle.
1043 * The address of parameter (struct rte_eth_dev *) regsitered before.
1049 atl_dev_interrupt_handler(void *param)
1051 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1053 atl_dev_interrupt_get_status(dev);
1054 atl_dev_interrupt_action(dev, dev->intr_handle);
1059 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1061 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1064 fc_conf->mode = RTE_FC_NONE;
1065 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1066 fc_conf->mode = RTE_FC_FULL;
1067 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1068 fc_conf->mode = RTE_FC_RX_PAUSE;
1069 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1070 fc_conf->mode = RTE_FC_TX_PAUSE;
1076 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1078 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1079 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1082 if (hw->aq_fw_ops->set_flow_control == NULL)
1085 if (fc_conf->mode == RTE_FC_NONE)
1086 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1087 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1088 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1089 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1090 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1091 else if (fc_conf->mode == RTE_FC_FULL)
1092 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1094 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1095 return hw->aq_fw_ops->set_flow_control(hw);
1101 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1102 u8 *mac_addr, bool enable)
1104 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1105 unsigned int h = 0U;
1106 unsigned int l = 0U;
1110 h = (mac_addr[0] << 8) | (mac_addr[1]);
1111 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1112 (mac_addr[4] << 8) | mac_addr[5];
1115 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1116 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1117 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1120 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1122 err = aq_hw_err_from_flags(hw);
1128 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1129 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1131 if (is_zero_ether_addr(mac_addr)) {
1132 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1136 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1140 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1142 atl_update_mac_addr(dev, index, NULL, false);
1146 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1148 atl_remove_mac_addr(dev, 0);
1149 atl_add_mac_addr(dev, addr, 0, 0);
1154 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1155 struct ether_addr *mc_addr_set,
1156 uint32_t nb_mc_addr)
1158 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1161 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1164 /* Update whole uc filters table */
1165 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1166 u8 *mac_addr = NULL;
1169 if (i < nb_mc_addr) {
1170 mac_addr = mc_addr_set[i].addr_bytes;
1171 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1172 (mac_addr[4] << 8) | mac_addr[5];
1173 h = (mac_addr[0] << 8) | mac_addr[1];
1176 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1177 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1178 HW_ATL_B0_MAC_MIN + i);
1179 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1180 HW_ATL_B0_MAC_MIN + i);
1181 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1182 HW_ATL_B0_MAC_MIN + i);
1189 atl_reta_update(struct rte_eth_dev *dev,
1190 struct rte_eth_rss_reta_entry64 *reta_conf,
1194 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1195 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1197 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1198 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1199 dev->data->nb_rx_queues - 1);
1201 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1206 atl_reta_query(struct rte_eth_dev *dev,
1207 struct rte_eth_rss_reta_entry64 *reta_conf,
1211 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1213 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1214 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1215 reta_conf->mask = ~0U;
1220 atl_rss_hash_update(struct rte_eth_dev *dev,
1221 struct rte_eth_rss_conf *rss_conf)
1223 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224 struct aq_hw_cfg_s *cfg =
1225 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1226 static u8 def_rss_key[40] = {
1227 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1228 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1229 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1230 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1231 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1234 cfg->is_rss = !!rss_conf->rss_hf;
1235 if (rss_conf->rss_key) {
1236 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1237 rss_conf->rss_key_len);
1238 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1240 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1241 sizeof(def_rss_key));
1242 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1245 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1246 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1251 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1252 struct rte_eth_rss_conf *rss_conf)
1254 struct aq_hw_cfg_s *cfg =
1255 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1257 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1258 if (rss_conf->rss_key) {
1259 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1260 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1261 rss_conf->rss_key_len);
1267 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1268 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1269 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1271 RTE_INIT(atl_init_log)
1273 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1274 if (atl_logtype_init >= 0)
1275 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1276 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1277 if (atl_logtype_driver >= 0)
1278 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);