1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
20 static int atl_dev_configure(struct rte_eth_dev *dev);
21 static int atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 struct rte_eth_xstat_name *xstats_names,
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats, unsigned int n);
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 struct rte_eth_dev_info *dev_info);
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 uint16_t vlan_id, int on);
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 uint16_t queue_id, int on);
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 enum rte_vlan_type vlan_type, uint16_t tpid);
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 struct rte_dev_eeprom_info *eeprom);
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 struct rte_dev_reg_info *regs);
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 struct rte_eth_fc_conf *fc_conf);
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 struct ether_addr *mac_addr,
97 uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 struct ether_addr *mac_addr);
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 struct ether_addr *mc_addr_set,
104 uint32_t nb_mc_addr);
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 struct rte_eth_rss_reta_entry64 *reta_conf,
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 struct rte_eth_rss_conf *rss_conf);
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 struct rte_eth_dev_info *dev_info);
126 int atl_logtype_init;
127 int atl_logtype_driver;
130 * The set of PCI devices this driver supports
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
153 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 { .vendor_id = 0, /* sentinel */ },
158 static struct rte_pci_driver rte_atl_pmd = {
159 .id_table = pci_id_atl_map,
160 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 RTE_PCI_DRV_IOVA_AS_VA,
162 .probe = eth_atl_pci_probe,
163 .remove = eth_atl_pci_remove,
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 | DEV_RX_OFFLOAD_IPV4_CKSUM \
168 | DEV_RX_OFFLOAD_UDP_CKSUM \
169 | DEV_RX_OFFLOAD_TCP_CKSUM \
170 | DEV_RX_OFFLOAD_JUMBO_FRAME \
171 | DEV_RX_OFFLOAD_MACSEC_STRIP \
172 | DEV_RX_OFFLOAD_VLAN_FILTER)
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 | DEV_TX_OFFLOAD_IPV4_CKSUM \
176 | DEV_TX_OFFLOAD_UDP_CKSUM \
177 | DEV_TX_OFFLOAD_TCP_CKSUM \
178 | DEV_TX_OFFLOAD_TCP_TSO \
179 | DEV_TX_OFFLOAD_MACSEC_INSERT \
180 | DEV_TX_OFFLOAD_MULTI_SEGS)
182 static const struct rte_eth_desc_lim rx_desc_lim = {
183 .nb_max = ATL_MAX_RING_DESC,
184 .nb_min = ATL_MIN_RING_DESC,
185 .nb_align = ATL_RXD_ALIGN,
188 static const struct rte_eth_desc_lim tx_desc_lim = {
189 .nb_max = ATL_MAX_RING_DESC,
190 .nb_min = ATL_MIN_RING_DESC,
191 .nb_align = ATL_TXD_ALIGN,
192 .nb_seg_max = ATL_TX_MAX_SEG,
193 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
196 #define ATL_XSTATS_FIELD(name) { \
198 offsetof(struct aq_stats_s, name) \
201 struct atl_xstats_tbl_s {
206 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
207 ATL_XSTATS_FIELD(uprc),
208 ATL_XSTATS_FIELD(mprc),
209 ATL_XSTATS_FIELD(bprc),
210 ATL_XSTATS_FIELD(erpt),
211 ATL_XSTATS_FIELD(uptc),
212 ATL_XSTATS_FIELD(mptc),
213 ATL_XSTATS_FIELD(bptc),
214 ATL_XSTATS_FIELD(erpr),
215 ATL_XSTATS_FIELD(ubrc),
216 ATL_XSTATS_FIELD(ubtc),
217 ATL_XSTATS_FIELD(mbrc),
218 ATL_XSTATS_FIELD(mbtc),
219 ATL_XSTATS_FIELD(bbrc),
220 ATL_XSTATS_FIELD(bbtc),
223 static const struct eth_dev_ops atl_eth_dev_ops = {
224 .dev_configure = atl_dev_configure,
225 .dev_start = atl_dev_start,
226 .dev_stop = atl_dev_stop,
227 .dev_set_link_up = atl_dev_set_link_up,
228 .dev_set_link_down = atl_dev_set_link_down,
229 .dev_close = atl_dev_close,
230 .dev_reset = atl_dev_reset,
233 .promiscuous_enable = atl_dev_promiscuous_enable,
234 .promiscuous_disable = atl_dev_promiscuous_disable,
235 .allmulticast_enable = atl_dev_allmulticast_enable,
236 .allmulticast_disable = atl_dev_allmulticast_disable,
239 .link_update = atl_dev_link_update,
241 .get_reg = atl_dev_get_regs,
244 .stats_get = atl_dev_stats_get,
245 .xstats_get = atl_dev_xstats_get,
246 .xstats_get_names = atl_dev_xstats_get_names,
247 .stats_reset = atl_dev_stats_reset,
248 .xstats_reset = atl_dev_stats_reset,
250 .fw_version_get = atl_fw_version_get,
251 .dev_infos_get = atl_dev_info_get,
252 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
254 .mtu_set = atl_dev_mtu_set,
257 .vlan_filter_set = atl_vlan_filter_set,
258 .vlan_offload_set = atl_vlan_offload_set,
259 .vlan_tpid_set = atl_vlan_tpid_set,
260 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
263 .rx_queue_start = atl_rx_queue_start,
264 .rx_queue_stop = atl_rx_queue_stop,
265 .rx_queue_setup = atl_rx_queue_setup,
266 .rx_queue_release = atl_rx_queue_release,
268 .tx_queue_start = atl_tx_queue_start,
269 .tx_queue_stop = atl_tx_queue_stop,
270 .tx_queue_setup = atl_tx_queue_setup,
271 .tx_queue_release = atl_tx_queue_release,
273 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
274 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
276 .rx_queue_count = atl_rx_queue_count,
277 .rx_descriptor_status = atl_dev_rx_descriptor_status,
278 .tx_descriptor_status = atl_dev_tx_descriptor_status,
281 .get_eeprom_length = atl_dev_get_eeprom_length,
282 .get_eeprom = atl_dev_get_eeprom,
283 .set_eeprom = atl_dev_set_eeprom,
286 .flow_ctrl_get = atl_flow_ctrl_get,
287 .flow_ctrl_set = atl_flow_ctrl_set,
290 .mac_addr_add = atl_add_mac_addr,
291 .mac_addr_remove = atl_remove_mac_addr,
292 .mac_addr_set = atl_set_default_mac_addr,
293 .set_mc_addr_list = atl_dev_set_mc_addr_list,
294 .rxq_info_get = atl_rxq_info_get,
295 .txq_info_get = atl_txq_info_get,
297 .reta_update = atl_reta_update,
298 .reta_query = atl_reta_query,
299 .rss_hash_update = atl_rss_hash_update,
300 .rss_hash_conf_get = atl_rss_hash_conf_get,
303 static inline int32_t
304 atl_reset_hw(struct aq_hw_s *hw)
306 return hw_atl_b0_hw_reset(hw);
310 atl_enable_intr(struct rte_eth_dev *dev)
312 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
314 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
318 atl_disable_intr(struct aq_hw_s *hw)
320 PMD_INIT_FUNC_TRACE();
321 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
325 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
327 struct atl_adapter *adapter =
328 (struct atl_adapter *)eth_dev->data->dev_private;
329 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
330 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
331 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
334 PMD_INIT_FUNC_TRACE();
336 eth_dev->dev_ops = &atl_eth_dev_ops;
337 eth_dev->rx_pkt_burst = &atl_recv_pkts;
338 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
339 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
341 /* For secondary processes, the primary process has done all the work */
342 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
345 /* Vendor and Device ID need to be set before init of shared code */
346 hw->device_id = pci_dev->id.device_id;
347 hw->vendor_id = pci_dev->id.vendor_id;
348 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
350 /* Hardware configuration - hardcode */
351 adapter->hw_cfg.is_lro = false;
352 adapter->hw_cfg.wol = false;
353 adapter->hw_cfg.is_rss = false;
354 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
356 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
362 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
363 adapter->hw_cfg.aq_rss.indirection_table_size =
364 HW_ATL_B0_RSS_REDIRECTION_MAX;
366 hw->aq_nic_cfg = &adapter->hw_cfg;
368 /* disable interrupt */
369 atl_disable_intr(hw);
371 /* Allocate memory for storing MAC addresses */
372 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
373 if (eth_dev->data->mac_addrs == NULL) {
374 PMD_INIT_LOG(ERR, "MAC Malloc failed");
378 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
382 /* Copy the permanent MAC address */
383 if (hw->aq_fw_ops->get_mac_permanent(hw,
384 eth_dev->data->mac_addrs->addr_bytes) != 0)
387 /* Reset the hw statistics */
388 atl_dev_stats_reset(eth_dev);
390 rte_intr_callback_register(intr_handle,
391 atl_dev_interrupt_handler, eth_dev);
393 /* enable uio/vfio intr/eventfd mapping */
394 rte_intr_enable(intr_handle);
396 /* enable support intr */
397 atl_enable_intr(eth_dev);
403 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
405 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
406 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
409 PMD_INIT_FUNC_TRACE();
411 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
414 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
416 if (hw->adapter_stopped == 0)
417 atl_dev_close(eth_dev);
419 eth_dev->dev_ops = NULL;
420 eth_dev->rx_pkt_burst = NULL;
421 eth_dev->tx_pkt_burst = NULL;
423 /* disable uio intr before callback unregister */
424 rte_intr_disable(intr_handle);
425 rte_intr_callback_unregister(intr_handle,
426 atl_dev_interrupt_handler, eth_dev);
428 rte_free(eth_dev->data->mac_addrs);
429 eth_dev->data->mac_addrs = NULL;
435 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
436 struct rte_pci_device *pci_dev)
438 return rte_eth_dev_pci_generic_probe(pci_dev,
439 sizeof(struct atl_adapter), eth_atl_dev_init);
443 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
445 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
449 atl_dev_configure(struct rte_eth_dev *dev)
451 struct atl_interrupt *intr =
452 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
454 PMD_INIT_FUNC_TRACE();
456 /* set flag to update link status after init */
457 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
463 * Configure device link speed and setup link.
464 * It returns 0 on success.
467 atl_dev_start(struct rte_eth_dev *dev)
469 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
470 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
471 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
472 uint32_t intr_vector = 0;
476 PMD_INIT_FUNC_TRACE();
478 /* set adapter started */
479 hw->adapter_stopped = 0;
481 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
483 "Invalid link_speeds for port %u, fix speed not supported",
488 /* disable uio/vfio intr/eventfd mapping */
489 rte_intr_disable(intr_handle);
491 /* reinitialize adapter
492 * this calls reset and start
494 status = atl_reset_hw(hw);
498 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
500 hw_atl_b0_hw_start(hw);
501 /* check and configure queue intr-vector mapping */
502 if ((rte_intr_cap_multiple(intr_handle) ||
503 !RTE_ETH_DEV_SRIOV(dev).active) &&
504 dev->data->dev_conf.intr_conf.rxq != 0) {
505 intr_vector = dev->data->nb_rx_queues;
506 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
507 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
508 ATL_MAX_INTR_QUEUE_NUM);
511 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
512 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
517 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
518 intr_handle->intr_vec = rte_zmalloc("intr_vec",
519 dev->data->nb_rx_queues * sizeof(int), 0);
520 if (intr_handle->intr_vec == NULL) {
521 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
522 " intr_vec", dev->data->nb_rx_queues);
527 /* initialize transmission unit */
530 /* This can fail when allocating mbufs for descriptor rings */
531 err = atl_rx_init(dev);
533 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
537 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
538 hw->fw_ver_actual >> 24,
539 (hw->fw_ver_actual >> 16) & 0xFF,
540 hw->fw_ver_actual & 0xFFFF);
541 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
543 err = atl_start_queues(dev);
545 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
549 err = atl_dev_set_link_up(dev);
551 err = hw->aq_fw_ops->update_link_status(hw);
556 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
561 if (rte_intr_allow_others(intr_handle)) {
562 /* check if lsc interrupt is enabled */
563 if (dev->data->dev_conf.intr_conf.lsc != 0)
564 atl_dev_lsc_interrupt_setup(dev, true);
566 atl_dev_lsc_interrupt_setup(dev, false);
568 rte_intr_callback_unregister(intr_handle,
569 atl_dev_interrupt_handler, dev);
570 if (dev->data->dev_conf.intr_conf.lsc != 0)
571 PMD_INIT_LOG(INFO, "lsc won't enable because of"
572 " no intr multiplex");
575 /* check if rxq interrupt is enabled */
576 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
577 rte_intr_dp_is_en(intr_handle))
578 atl_dev_rxq_interrupt_setup(dev);
580 /* enable uio/vfio intr/eventfd mapping */
581 rte_intr_enable(intr_handle);
583 /* resume enabled intr since hw reset */
584 atl_enable_intr(dev);
589 atl_stop_queues(dev);
594 * Stop device: disable rx and tx functions to allow for reconfiguring.
597 atl_dev_stop(struct rte_eth_dev *dev)
599 struct rte_eth_link link;
601 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
602 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
603 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
605 PMD_INIT_FUNC_TRACE();
607 /* disable interrupts */
608 atl_disable_intr(hw);
612 hw->adapter_stopped = 1;
614 atl_stop_queues(dev);
616 /* Clear stored conf */
617 dev->data->scattered_rx = 0;
620 /* Clear recorded link status */
621 memset(&link, 0, sizeof(link));
622 rte_eth_linkstatus_set(dev, &link);
624 if (!rte_intr_allow_others(intr_handle))
625 /* resume to the default handler */
626 rte_intr_callback_register(intr_handle,
627 atl_dev_interrupt_handler,
630 /* Clean datapath event and queue/vec mapping */
631 rte_intr_efd_disable(intr_handle);
632 if (intr_handle->intr_vec != NULL) {
633 rte_free(intr_handle->intr_vec);
634 intr_handle->intr_vec = NULL;
639 * Set device link up: enable tx.
642 atl_dev_set_link_up(struct rte_eth_dev *dev)
644 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
646 uint32_t speed_mask = 0;
648 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
649 speed_mask = hw->aq_nic_cfg->link_speed_msk;
651 if (link_speeds & ETH_LINK_SPEED_10G)
652 speed_mask |= AQ_NIC_RATE_10G;
653 if (link_speeds & ETH_LINK_SPEED_5G)
654 speed_mask |= AQ_NIC_RATE_5G;
655 if (link_speeds & ETH_LINK_SPEED_1G)
656 speed_mask |= AQ_NIC_RATE_1G;
657 if (link_speeds & ETH_LINK_SPEED_2_5G)
658 speed_mask |= AQ_NIC_RATE_2G5;
659 if (link_speeds & ETH_LINK_SPEED_100M)
660 speed_mask |= AQ_NIC_RATE_100M;
663 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
667 * Set device link down: disable tx.
670 atl_dev_set_link_down(struct rte_eth_dev *dev)
672 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
674 return hw->aq_fw_ops->set_link_speed(hw, 0);
678 * Reset and stop device.
681 atl_dev_close(struct rte_eth_dev *dev)
683 PMD_INIT_FUNC_TRACE();
687 atl_free_queues(dev);
691 atl_dev_reset(struct rte_eth_dev *dev)
695 ret = eth_atl_dev_uninit(dev);
699 ret = eth_atl_dev_init(dev);
705 atl_dev_configure_macsec(struct rte_eth_dev *dev)
707 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
708 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
709 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
710 struct macsec_msg_fw_request msg_macsec;
711 struct macsec_msg_fw_response response;
713 if (!aqcfg->common.macsec_enabled ||
714 hw->aq_fw_ops->send_macsec_req == NULL)
717 memset(&msg_macsec, 0, sizeof(msg_macsec));
719 /* Creating set of sc/sa structures from parameters provided by DPDK */
721 /* Configure macsec */
722 msg_macsec.msg_type = macsec_cfg_msg;
723 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
724 msg_macsec.cfg.interrupts_enabled = 1;
726 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
731 memset(&msg_macsec, 0, sizeof(msg_macsec));
733 /* Configure TX SC */
735 msg_macsec.msg_type = macsec_add_tx_sc_msg;
736 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
737 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
739 /* MAC addr for TX */
740 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
741 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
742 msg_macsec.txsc.sa_mask = 0x3f;
744 msg_macsec.txsc.da_mask = 0;
745 msg_macsec.txsc.tci = 0x0B;
746 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
749 * Creating SCI (Secure Channel Identifier).
750 * SCI constructed from Source MAC and Port identifier
752 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
753 (msg_macsec.txsc.mac_sa[0] >> 16);
754 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
756 uint32_t port_identifier = 1;
758 msg_macsec.txsc.sci[1] = sci_hi_part;
759 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
761 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
766 memset(&msg_macsec, 0, sizeof(msg_macsec));
768 /* Configure RX SC */
770 msg_macsec.msg_type = macsec_add_rx_sc_msg;
771 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
772 msg_macsec.rxsc.replay_protect =
773 aqcfg->common.replay_protection_enabled;
774 msg_macsec.rxsc.anti_replay_window = 0;
776 /* MAC addr for RX */
777 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
778 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
779 msg_macsec.rxsc.da_mask = 0;//0x3f;
781 msg_macsec.rxsc.sa_mask = 0;
783 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
788 memset(&msg_macsec, 0, sizeof(msg_macsec));
790 /* Configure RX SC */
792 msg_macsec.msg_type = macsec_add_tx_sa_msg;
793 msg_macsec.txsa.index = aqcfg->txsa.idx;
794 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
796 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
797 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
798 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
799 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
801 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
806 memset(&msg_macsec, 0, sizeof(msg_macsec));
808 /* Configure RX SA */
810 msg_macsec.msg_type = macsec_add_rx_sa_msg;
811 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
812 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
814 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
815 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
816 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
817 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
819 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
827 int atl_macsec_enable(struct rte_eth_dev *dev,
828 uint8_t encr, uint8_t repl_prot)
830 struct aq_hw_cfg_s *cfg =
831 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
833 cfg->aq_macsec.common.macsec_enabled = 1;
834 cfg->aq_macsec.common.encryption_enabled = encr;
835 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
840 int atl_macsec_disable(struct rte_eth_dev *dev)
842 struct aq_hw_cfg_s *cfg =
843 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
845 cfg->aq_macsec.common.macsec_enabled = 0;
850 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
852 struct aq_hw_cfg_s *cfg =
853 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
855 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
856 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
861 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
862 uint8_t *mac, uint16_t pi)
864 struct aq_hw_cfg_s *cfg =
865 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
867 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
868 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
869 cfg->aq_macsec.rxsc.pi = pi;
874 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
875 uint8_t idx, uint8_t an,
876 uint32_t pn, uint8_t *key)
878 struct aq_hw_cfg_s *cfg =
879 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
881 cfg->aq_macsec.txsa.idx = idx;
882 cfg->aq_macsec.txsa.pn = pn;
883 cfg->aq_macsec.txsa.an = an;
885 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
889 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
890 uint8_t idx, uint8_t an,
891 uint32_t pn, uint8_t *key)
893 struct aq_hw_cfg_s *cfg =
894 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
896 cfg->aq_macsec.rxsa.idx = idx;
897 cfg->aq_macsec.rxsa.pn = pn;
898 cfg->aq_macsec.rxsa.an = an;
900 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
905 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
907 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
908 struct aq_hw_s *hw = &adapter->hw;
909 struct atl_sw_stats *swstats = &adapter->sw_stats;
912 hw->aq_fw_ops->update_stats(hw);
914 /* Fill out the rte_eth_stats statistics structure */
915 stats->ipackets = hw->curr_stats.dma_pkt_rc;
916 stats->ibytes = hw->curr_stats.dma_oct_rc;
917 stats->imissed = hw->curr_stats.dpc;
918 stats->ierrors = hw->curr_stats.erpt;
920 stats->opackets = hw->curr_stats.dma_pkt_tc;
921 stats->obytes = hw->curr_stats.dma_oct_tc;
924 stats->rx_nombuf = swstats->rx_nombuf;
926 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
927 stats->q_ipackets[i] = swstats->q_ipackets[i];
928 stats->q_opackets[i] = swstats->q_opackets[i];
929 stats->q_ibytes[i] = swstats->q_ibytes[i];
930 stats->q_obytes[i] = swstats->q_obytes[i];
931 stats->q_errors[i] = swstats->q_errors[i];
937 atl_dev_stats_reset(struct rte_eth_dev *dev)
939 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
940 struct aq_hw_s *hw = &adapter->hw;
942 hw->aq_fw_ops->update_stats(hw);
944 /* Reset software totals */
945 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
947 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
951 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
952 struct rte_eth_xstat_name *xstats_names,
958 return RTE_DIM(atl_xstats_tbl);
960 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
961 strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
962 RTE_ETH_XSTATS_NAME_SIZE);
968 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
971 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
972 struct aq_hw_s *hw = &adapter->hw;
978 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
980 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
981 atl_xstats_tbl[i].offset);
988 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
990 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
992 unsigned int ret = 0;
994 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
998 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
999 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1001 ret += 1; /* add string null-terminator */
1010 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1012 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1014 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1015 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1017 dev_info->min_rx_bufsize = 1024;
1018 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1019 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1020 dev_info->max_vfs = pci_dev->max_vfs;
1022 dev_info->max_hash_mac_addrs = 0;
1023 dev_info->max_vmdq_pools = 0;
1024 dev_info->vmdq_queue_num = 0;
1026 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1028 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1031 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1032 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1035 dev_info->default_txconf = (struct rte_eth_txconf) {
1036 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1039 dev_info->rx_desc_lim = rx_desc_lim;
1040 dev_info->tx_desc_lim = tx_desc_lim;
1042 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1043 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1044 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1046 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1047 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1048 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1049 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1052 static const uint32_t *
1053 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1055 static const uint32_t ptypes[] = {
1057 RTE_PTYPE_L2_ETHER_ARP,
1058 RTE_PTYPE_L2_ETHER_VLAN,
1068 if (dev->rx_pkt_burst == atl_recv_pkts)
1075 atl_dev_delayed_handler(void *param)
1077 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1079 atl_dev_configure_macsec(dev);
1083 /* return 0 means link status changed, -1 means not changed */
1085 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1087 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088 struct rte_eth_link link, old;
1091 link.link_status = ETH_LINK_DOWN;
1092 link.link_speed = 0;
1093 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1094 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1095 memset(&old, 0, sizeof(old));
1097 /* load old link status */
1098 rte_eth_linkstatus_get(dev, &old);
1100 /* read current link status */
1101 err = hw->aq_fw_ops->update_link_status(hw);
1106 if (hw->aq_link_status.mbps == 0) {
1107 /* write default (down) link status */
1108 rte_eth_linkstatus_set(dev, &link);
1109 if (link.link_status == old.link_status)
1114 link.link_status = ETH_LINK_UP;
1115 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1116 link.link_speed = hw->aq_link_status.mbps;
1118 rte_eth_linkstatus_set(dev, &link);
1120 if (link.link_status == old.link_status)
1123 if (rte_eal_alarm_set(1000 * 1000,
1124 atl_dev_delayed_handler, (void *)dev) < 0)
1125 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1131 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1133 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1135 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1139 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1141 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1143 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1147 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1149 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1151 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1155 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1157 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1159 if (dev->data->promiscuous == 1)
1160 return; /* must remain in all_multicast mode */
1162 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1166 * It clears the interrupt causes and enables the interrupt.
1167 * It will be called once only during nic initialized.
1170 * Pointer to struct rte_eth_dev.
1172 * Enable or Disable.
1175 * - On success, zero.
1176 * - On failure, a negative value.
1180 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1182 atl_dev_link_status_print(dev);
1187 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1194 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1196 struct atl_interrupt *intr =
1197 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1198 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1201 hw_atl_b0_hw_irq_read(hw, &cause);
1203 atl_disable_intr(hw);
1205 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1206 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1212 * It gets and then prints the link status.
1215 * Pointer to struct rte_eth_dev.
1218 * - On success, zero.
1219 * - On failure, a negative value.
1222 atl_dev_link_status_print(struct rte_eth_dev *dev)
1224 struct rte_eth_link link;
1226 memset(&link, 0, sizeof(link));
1227 rte_eth_linkstatus_get(dev, &link);
1228 if (link.link_status) {
1229 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1230 (int)(dev->data->port_id),
1231 (unsigned int)link.link_speed,
1232 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1233 "full-duplex" : "half-duplex");
1235 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1236 (int)(dev->data->port_id));
1242 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1244 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1245 pci_dev->addr.domain,
1247 pci_dev->addr.devid,
1248 pci_dev->addr.function);
1252 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1256 * It executes link_update after knowing an interrupt occurred.
1259 * Pointer to struct rte_eth_dev.
1262 * - On success, zero.
1263 * - On failure, a negative value.
1266 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1267 struct rte_intr_handle *intr_handle)
1269 struct atl_interrupt *intr =
1270 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1271 struct atl_adapter *adapter =
1272 (struct atl_adapter *)dev->data->dev_private;
1273 struct aq_hw_s *hw = &adapter->hw;
1275 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1278 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1280 /* Notify userapp if link status changed */
1281 if (!atl_dev_link_update(dev, 0)) {
1282 atl_dev_link_status_print(dev);
1283 _rte_eth_dev_callback_process(dev,
1284 RTE_ETH_EVENT_INTR_LSC, NULL);
1286 if (hw->aq_fw_ops->send_macsec_req == NULL)
1289 /* Check macsec Keys expired */
1290 struct get_stats req = { 0 };
1291 struct macsec_msg_fw_request msg = { 0 };
1292 struct macsec_msg_fw_response resp = { 0 };
1294 req.ingress_sa_index = 0x0;
1295 req.egress_sc_index = 0x0;
1296 req.egress_sa_index = 0x0;
1297 msg.msg_type = macsec_get_stats_msg;
1300 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1302 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1305 if (resp.stats.egress_threshold_expired ||
1306 resp.stats.ingress_threshold_expired ||
1307 resp.stats.egress_expired ||
1308 resp.stats.ingress_expired) {
1309 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1310 _rte_eth_dev_callback_process(dev,
1311 RTE_ETH_EVENT_MACSEC, NULL);
1315 atl_enable_intr(dev);
1316 rte_intr_enable(intr_handle);
1322 * Interrupt handler triggered by NIC for handling
1323 * specific interrupt.
1326 * Pointer to interrupt handle.
1328 * The address of parameter (struct rte_eth_dev *) regsitered before.
1334 atl_dev_interrupt_handler(void *param)
1336 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1338 atl_dev_interrupt_get_status(dev);
1339 atl_dev_interrupt_action(dev, dev->intr_handle);
1342 #define SFP_EEPROM_SIZE 0xff
1345 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1347 return SFP_EEPROM_SIZE;
1350 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1351 struct rte_dev_eeprom_info *eeprom)
1353 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1354 uint32_t dev_addr = SMBUS_DEVICE_ID;
1356 if (hw->aq_fw_ops->get_eeprom == NULL)
1359 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1360 eeprom->data == NULL)
1364 dev_addr = eeprom->magic;
1366 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1367 eeprom->length, eeprom->offset);
1370 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1371 struct rte_dev_eeprom_info *eeprom)
1373 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1374 uint32_t dev_addr = SMBUS_DEVICE_ID;
1376 if (hw->aq_fw_ops->set_eeprom == NULL)
1379 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1383 dev_addr = eeprom->magic;
1385 return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1386 eeprom->data, eeprom->length);
1390 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1392 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1396 if (regs->data == NULL) {
1397 regs->length = hw_atl_utils_hw_get_reg_length();
1398 regs->width = sizeof(u32);
1402 /* Only full register dump is supported */
1403 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1406 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1408 /* Device version */
1409 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1410 regs->version = mif_id & 0xFFU;
1416 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1418 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1420 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1421 fc_conf->mode = RTE_FC_NONE;
1422 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1423 fc_conf->mode = RTE_FC_FULL;
1424 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1425 fc_conf->mode = RTE_FC_RX_PAUSE;
1426 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1427 fc_conf->mode = RTE_FC_TX_PAUSE;
1433 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1435 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1436 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1439 if (hw->aq_fw_ops->set_flow_control == NULL)
1442 if (fc_conf->mode == RTE_FC_NONE)
1443 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1444 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1445 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1446 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1447 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1448 else if (fc_conf->mode == RTE_FC_FULL)
1449 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1451 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1452 return hw->aq_fw_ops->set_flow_control(hw);
1458 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1459 u8 *mac_addr, bool enable)
1461 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1462 unsigned int h = 0U;
1463 unsigned int l = 0U;
1467 h = (mac_addr[0] << 8) | (mac_addr[1]);
1468 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1469 (mac_addr[4] << 8) | mac_addr[5];
1472 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1473 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1474 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1477 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1479 err = aq_hw_err_from_flags(hw);
1485 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1486 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1488 if (is_zero_ether_addr(mac_addr)) {
1489 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1493 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1497 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1499 atl_update_mac_addr(dev, index, NULL, false);
1503 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1505 atl_remove_mac_addr(dev, 0);
1506 atl_add_mac_addr(dev, addr, 0, 0);
1511 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1513 struct rte_eth_dev_info dev_info;
1514 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1516 atl_dev_info_get(dev, &dev_info);
1518 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1521 /* update max frame size */
1522 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1528 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1530 struct aq_hw_cfg_s *cfg =
1531 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1532 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1536 PMD_INIT_FUNC_TRACE();
1538 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1539 if (cfg->vlan_filter[i] == vlan_id) {
1541 /* Disable VLAN filter. */
1542 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1544 /* Clear VLAN filter entry */
1545 cfg->vlan_filter[i] = 0;
1551 /* VLAN_ID was not found. So, nothing to delete. */
1552 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1555 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1556 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1559 /* Try to found free VLAN filter to add new VLAN_ID */
1560 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1561 if (cfg->vlan_filter[i] == 0)
1565 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1566 /* We have no free VLAN filter to add new VLAN_ID*/
1571 cfg->vlan_filter[i] = vlan_id;
1572 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1573 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1574 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1577 /* Enable VLAN promisc mode if vlan_filter empty */
1578 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1579 if (cfg->vlan_filter[i] != 0)
1583 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1589 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1591 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1592 struct aq_hw_cfg_s *cfg =
1593 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1596 PMD_INIT_FUNC_TRACE();
1598 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1599 if (cfg->vlan_filter[i])
1600 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1606 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1608 struct aq_hw_cfg_s *cfg =
1609 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1610 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 PMD_INIT_FUNC_TRACE();
1616 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1618 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1620 for (i = 0; i < dev->data->nb_rx_queues; i++)
1621 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1623 if (mask & ETH_VLAN_EXTEND_MASK)
1630 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1633 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636 PMD_INIT_FUNC_TRACE();
1638 switch (vlan_type) {
1639 case ETH_VLAN_TYPE_INNER:
1640 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1642 case ETH_VLAN_TYPE_OUTER:
1643 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1646 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1654 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1656 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658 PMD_INIT_FUNC_TRACE();
1660 if (queue_id > dev->data->nb_rx_queues) {
1661 PMD_DRV_LOG(ERR, "Invalid queue id");
1665 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1669 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1670 struct ether_addr *mc_addr_set,
1671 uint32_t nb_mc_addr)
1673 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1676 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1679 /* Update whole uc filters table */
1680 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1681 u8 *mac_addr = NULL;
1684 if (i < nb_mc_addr) {
1685 mac_addr = mc_addr_set[i].addr_bytes;
1686 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1687 (mac_addr[4] << 8) | mac_addr[5];
1688 h = (mac_addr[0] << 8) | mac_addr[1];
1691 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1692 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1693 HW_ATL_B0_MAC_MIN + i);
1694 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1695 HW_ATL_B0_MAC_MIN + i);
1696 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1697 HW_ATL_B0_MAC_MIN + i);
1704 atl_reta_update(struct rte_eth_dev *dev,
1705 struct rte_eth_rss_reta_entry64 *reta_conf,
1709 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1712 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1713 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1714 dev->data->nb_rx_queues - 1);
1716 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1721 atl_reta_query(struct rte_eth_dev *dev,
1722 struct rte_eth_rss_reta_entry64 *reta_conf,
1726 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1728 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1729 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1730 reta_conf->mask = ~0U;
1735 atl_rss_hash_update(struct rte_eth_dev *dev,
1736 struct rte_eth_rss_conf *rss_conf)
1738 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1739 struct aq_hw_cfg_s *cfg =
1740 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1741 static u8 def_rss_key[40] = {
1742 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1743 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1744 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1745 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1746 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1749 cfg->is_rss = !!rss_conf->rss_hf;
1750 if (rss_conf->rss_key) {
1751 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1752 rss_conf->rss_key_len);
1753 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1755 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1756 sizeof(def_rss_key));
1757 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1760 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1761 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1766 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1767 struct rte_eth_rss_conf *rss_conf)
1769 struct aq_hw_cfg_s *cfg =
1770 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1772 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1773 if (rss_conf->rss_key) {
1774 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1775 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1776 rss_conf->rss_key_len);
1783 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1785 if (strcmp(dev->device->driver->name, drv->driver.name))
1792 is_atlantic_supported(struct rte_eth_dev *dev)
1794 return is_device_supported(dev, &rte_atl_pmd);
1797 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1798 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1799 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1801 RTE_INIT(atl_init_log)
1803 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1804 if (atl_logtype_init >= 0)
1805 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1806 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1807 if (atl_logtype_driver >= 0)
1808 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);