1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
20 static int atl_dev_configure(struct rte_eth_dev *dev);
21 static int atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static int atl_dev_close(struct rte_eth_dev *dev);
26 static int atl_dev_reset(struct rte_eth_dev *dev);
27 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 struct rte_eth_xstat_name *xstats_names,
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats, unsigned int n);
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 uint16_t vlan_id, int on);
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 uint16_t queue_id, int on);
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 enum rte_vlan_type vlan_type, uint16_t tpid);
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 struct rte_dev_eeprom_info *eeprom);
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 struct rte_dev_reg_info *regs);
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 struct rte_ether_addr *mac_addr,
94 uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 struct rte_ether_addr *mac_addr);
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 struct rte_ether_addr *mc_addr_set,
101 uint32_t nb_mc_addr);
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 struct rte_eth_dev_info *dev_info);
124 * The set of PCI devices this driver supports
126 static const struct rte_pci_id pci_id_atl_map[] = {
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149 { .vendor_id = 0, /* sentinel */ },
152 static struct rte_pci_driver rte_atl_pmd = {
153 .id_table = pci_id_atl_map,
154 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
155 .probe = eth_atl_pci_probe,
156 .remove = eth_atl_pci_remove,
159 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
160 | DEV_RX_OFFLOAD_IPV4_CKSUM \
161 | DEV_RX_OFFLOAD_UDP_CKSUM \
162 | DEV_RX_OFFLOAD_TCP_CKSUM \
163 | DEV_RX_OFFLOAD_JUMBO_FRAME \
164 | DEV_RX_OFFLOAD_MACSEC_STRIP \
165 | DEV_RX_OFFLOAD_VLAN_FILTER)
167 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
168 | DEV_TX_OFFLOAD_IPV4_CKSUM \
169 | DEV_TX_OFFLOAD_UDP_CKSUM \
170 | DEV_TX_OFFLOAD_TCP_CKSUM \
171 | DEV_TX_OFFLOAD_TCP_TSO \
172 | DEV_TX_OFFLOAD_MACSEC_INSERT \
173 | DEV_TX_OFFLOAD_MULTI_SEGS)
175 #define SFP_EEPROM_SIZE 0x100
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 .nb_max = ATL_MAX_RING_DESC,
179 .nb_min = ATL_MIN_RING_DESC,
180 .nb_align = ATL_RXD_ALIGN,
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 .nb_max = ATL_MAX_RING_DESC,
185 .nb_min = ATL_MIN_RING_DESC,
186 .nb_align = ATL_TXD_ALIGN,
187 .nb_seg_max = ATL_TX_MAX_SEG,
188 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
191 enum atl_xstats_type {
196 #define ATL_XSTATS_FIELD(name) { \
198 offsetof(struct aq_stats_s, name), \
202 #define ATL_MACSEC_XSTATS_FIELD(name) { \
204 offsetof(struct macsec_stats, name), \
208 struct atl_xstats_tbl_s {
211 enum atl_xstats_type type;
214 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
215 ATL_XSTATS_FIELD(uprc),
216 ATL_XSTATS_FIELD(mprc),
217 ATL_XSTATS_FIELD(bprc),
218 ATL_XSTATS_FIELD(erpt),
219 ATL_XSTATS_FIELD(uptc),
220 ATL_XSTATS_FIELD(mptc),
221 ATL_XSTATS_FIELD(bptc),
222 ATL_XSTATS_FIELD(erpr),
223 ATL_XSTATS_FIELD(ubrc),
224 ATL_XSTATS_FIELD(ubtc),
225 ATL_XSTATS_FIELD(mbrc),
226 ATL_XSTATS_FIELD(mbtc),
227 ATL_XSTATS_FIELD(bbrc),
228 ATL_XSTATS_FIELD(bbtc),
229 /* Ingress Common Counters */
230 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
231 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
232 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
233 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
234 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
235 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
236 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
237 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
238 /* Ingress SA Counters */
239 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
240 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
241 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
242 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
243 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
244 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
245 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
246 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
247 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
248 /* Egress Common Counters */
249 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
250 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
251 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
252 ATL_MACSEC_XSTATS_FIELD(out_too_long),
253 /* Egress SC Counters */
254 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
255 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
256 /* Egress SA Counters */
257 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
258 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
259 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
260 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
263 static const struct eth_dev_ops atl_eth_dev_ops = {
264 .dev_configure = atl_dev_configure,
265 .dev_start = atl_dev_start,
266 .dev_stop = atl_dev_stop,
267 .dev_set_link_up = atl_dev_set_link_up,
268 .dev_set_link_down = atl_dev_set_link_down,
269 .dev_close = atl_dev_close,
270 .dev_reset = atl_dev_reset,
273 .promiscuous_enable = atl_dev_promiscuous_enable,
274 .promiscuous_disable = atl_dev_promiscuous_disable,
275 .allmulticast_enable = atl_dev_allmulticast_enable,
276 .allmulticast_disable = atl_dev_allmulticast_disable,
279 .link_update = atl_dev_link_update,
281 .get_reg = atl_dev_get_regs,
284 .stats_get = atl_dev_stats_get,
285 .xstats_get = atl_dev_xstats_get,
286 .xstats_get_names = atl_dev_xstats_get_names,
287 .stats_reset = atl_dev_stats_reset,
288 .xstats_reset = atl_dev_stats_reset,
290 .fw_version_get = atl_fw_version_get,
291 .dev_infos_get = atl_dev_info_get,
292 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
294 .mtu_set = atl_dev_mtu_set,
297 .vlan_filter_set = atl_vlan_filter_set,
298 .vlan_offload_set = atl_vlan_offload_set,
299 .vlan_tpid_set = atl_vlan_tpid_set,
300 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
303 .rx_queue_start = atl_rx_queue_start,
304 .rx_queue_stop = atl_rx_queue_stop,
305 .rx_queue_setup = atl_rx_queue_setup,
306 .rx_queue_release = atl_rx_queue_release,
308 .tx_queue_start = atl_tx_queue_start,
309 .tx_queue_stop = atl_tx_queue_stop,
310 .tx_queue_setup = atl_tx_queue_setup,
311 .tx_queue_release = atl_tx_queue_release,
313 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
314 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
317 .get_eeprom_length = atl_dev_get_eeprom_length,
318 .get_eeprom = atl_dev_get_eeprom,
319 .set_eeprom = atl_dev_set_eeprom,
322 .flow_ctrl_get = atl_flow_ctrl_get,
323 .flow_ctrl_set = atl_flow_ctrl_set,
326 .mac_addr_add = atl_add_mac_addr,
327 .mac_addr_remove = atl_remove_mac_addr,
328 .mac_addr_set = atl_set_default_mac_addr,
329 .set_mc_addr_list = atl_dev_set_mc_addr_list,
330 .rxq_info_get = atl_rxq_info_get,
331 .txq_info_get = atl_txq_info_get,
333 .reta_update = atl_reta_update,
334 .reta_query = atl_reta_query,
335 .rss_hash_update = atl_rss_hash_update,
336 .rss_hash_conf_get = atl_rss_hash_conf_get,
339 static inline int32_t
340 atl_reset_hw(struct aq_hw_s *hw)
342 return hw_atl_b0_hw_reset(hw);
346 atl_enable_intr(struct rte_eth_dev *dev)
348 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
350 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
354 atl_disable_intr(struct aq_hw_s *hw)
356 PMD_INIT_FUNC_TRACE();
357 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
361 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
363 struct atl_adapter *adapter = eth_dev->data->dev_private;
364 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
365 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
366 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
369 PMD_INIT_FUNC_TRACE();
371 eth_dev->dev_ops = &atl_eth_dev_ops;
373 eth_dev->rx_queue_count = atl_rx_queue_count;
374 eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
375 eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
377 eth_dev->rx_pkt_burst = &atl_recv_pkts;
378 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
379 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
381 /* For secondary processes, the primary process has done all the work */
382 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385 /* Vendor and Device ID need to be set before init of shared code */
386 hw->device_id = pci_dev->id.device_id;
387 hw->vendor_id = pci_dev->id.vendor_id;
388 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
390 /* Hardware configuration - hardcode */
391 adapter->hw_cfg.is_lro = false;
392 adapter->hw_cfg.wol = false;
393 adapter->hw_cfg.is_rss = false;
394 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
396 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
402 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403 adapter->hw_cfg.aq_rss.indirection_table_size =
404 HW_ATL_B0_RSS_REDIRECTION_MAX;
406 hw->aq_nic_cfg = &adapter->hw_cfg;
408 pthread_mutex_init(&hw->mbox_mutex, NULL);
410 /* disable interrupt */
411 atl_disable_intr(hw);
413 /* Allocate memory for storing MAC addresses */
414 eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 PMD_INIT_LOG(ERR, "MAC Malloc failed");
421 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
425 /* Copy the permanent MAC address */
426 if (hw->aq_fw_ops->get_mac_permanent(hw,
427 eth_dev->data->mac_addrs->addr_bytes) != 0)
430 /* Reset the hw statistics */
431 atl_dev_stats_reset(eth_dev);
433 rte_intr_callback_register(intr_handle,
434 atl_dev_interrupt_handler, eth_dev);
436 /* enable uio/vfio intr/eventfd mapping */
437 rte_intr_enable(intr_handle);
439 /* enable support intr */
440 atl_enable_intr(eth_dev);
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
448 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
452 PMD_INIT_FUNC_TRACE();
454 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
457 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
459 if (hw->adapter_stopped == 0)
460 atl_dev_close(eth_dev);
462 eth_dev->dev_ops = NULL;
463 eth_dev->rx_pkt_burst = NULL;
464 eth_dev->tx_pkt_burst = NULL;
466 /* disable uio intr before callback unregister */
467 rte_intr_disable(intr_handle);
468 rte_intr_callback_unregister(intr_handle,
469 atl_dev_interrupt_handler, eth_dev);
471 rte_free(eth_dev->data->mac_addrs);
472 eth_dev->data->mac_addrs = NULL;
474 pthread_mutex_destroy(&hw->mbox_mutex);
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481 struct rte_pci_device *pci_dev)
483 return rte_eth_dev_pci_generic_probe(pci_dev,
484 sizeof(struct atl_adapter), eth_atl_dev_init);
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
490 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
494 atl_dev_configure(struct rte_eth_dev *dev)
496 struct atl_interrupt *intr =
497 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
499 PMD_INIT_FUNC_TRACE();
501 /* set flag to update link status after init */
502 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
508 * Configure device link speed and setup link.
509 * It returns 0 on success.
512 atl_dev_start(struct rte_eth_dev *dev)
514 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 uint32_t intr_vector = 0;
521 PMD_INIT_FUNC_TRACE();
523 /* set adapter started */
524 hw->adapter_stopped = 0;
526 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
528 "Invalid link_speeds for port %u, fix speed not supported",
533 /* disable uio/vfio intr/eventfd mapping */
534 rte_intr_disable(intr_handle);
536 /* reinitialize adapter
537 * this calls reset and start
539 status = atl_reset_hw(hw);
543 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
545 hw_atl_b0_hw_start(hw);
546 /* check and configure queue intr-vector mapping */
547 if ((rte_intr_cap_multiple(intr_handle) ||
548 !RTE_ETH_DEV_SRIOV(dev).active) &&
549 dev->data->dev_conf.intr_conf.rxq != 0) {
550 intr_vector = dev->data->nb_rx_queues;
551 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553 ATL_MAX_INTR_QUEUE_NUM);
556 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
562 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563 intr_handle->intr_vec = rte_zmalloc("intr_vec",
564 dev->data->nb_rx_queues * sizeof(int), 0);
565 if (intr_handle->intr_vec == NULL) {
566 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567 " intr_vec", dev->data->nb_rx_queues);
572 /* initialize transmission unit */
575 /* This can fail when allocating mbufs for descriptor rings */
576 err = atl_rx_init(dev);
578 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
582 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583 hw->fw_ver_actual >> 24,
584 (hw->fw_ver_actual >> 16) & 0xFF,
585 hw->fw_ver_actual & 0xFFFF);
586 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
588 err = atl_start_queues(dev);
590 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
594 err = atl_dev_set_link_up(dev);
596 err = hw->aq_fw_ops->update_link_status(hw);
601 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
603 if (rte_intr_allow_others(intr_handle)) {
604 /* check if lsc interrupt is enabled */
605 if (dev->data->dev_conf.intr_conf.lsc != 0)
606 atl_dev_lsc_interrupt_setup(dev, true);
608 atl_dev_lsc_interrupt_setup(dev, false);
610 rte_intr_callback_unregister(intr_handle,
611 atl_dev_interrupt_handler, dev);
612 if (dev->data->dev_conf.intr_conf.lsc != 0)
613 PMD_INIT_LOG(INFO, "lsc won't enable because of"
614 " no intr multiplex");
617 /* check if rxq interrupt is enabled */
618 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
619 rte_intr_dp_is_en(intr_handle))
620 atl_dev_rxq_interrupt_setup(dev);
622 /* enable uio/vfio intr/eventfd mapping */
623 rte_intr_enable(intr_handle);
625 /* resume enabled intr since hw reset */
626 atl_enable_intr(dev);
631 atl_stop_queues(dev);
636 * Stop device: disable rx and tx functions to allow for reconfiguring.
639 atl_dev_stop(struct rte_eth_dev *dev)
641 struct rte_eth_link link;
643 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
645 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
647 PMD_INIT_FUNC_TRACE();
649 /* disable interrupts */
650 atl_disable_intr(hw);
654 hw->adapter_stopped = 1;
656 atl_stop_queues(dev);
658 /* Clear stored conf */
659 dev->data->scattered_rx = 0;
662 /* Clear recorded link status */
663 memset(&link, 0, sizeof(link));
664 rte_eth_linkstatus_set(dev, &link);
666 if (!rte_intr_allow_others(intr_handle))
667 /* resume to the default handler */
668 rte_intr_callback_register(intr_handle,
669 atl_dev_interrupt_handler,
672 /* Clean datapath event and queue/vec mapping */
673 rte_intr_efd_disable(intr_handle);
674 if (intr_handle->intr_vec != NULL) {
675 rte_free(intr_handle->intr_vec);
676 intr_handle->intr_vec = NULL;
681 * Set device link up: enable tx.
684 atl_dev_set_link_up(struct rte_eth_dev *dev)
686 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
688 uint32_t speed_mask = 0;
690 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
691 speed_mask = hw->aq_nic_cfg->link_speed_msk;
693 if (link_speeds & ETH_LINK_SPEED_10G)
694 speed_mask |= AQ_NIC_RATE_10G;
695 if (link_speeds & ETH_LINK_SPEED_5G)
696 speed_mask |= AQ_NIC_RATE_5G;
697 if (link_speeds & ETH_LINK_SPEED_1G)
698 speed_mask |= AQ_NIC_RATE_1G;
699 if (link_speeds & ETH_LINK_SPEED_2_5G)
700 speed_mask |= AQ_NIC_RATE_2G5;
701 if (link_speeds & ETH_LINK_SPEED_100M)
702 speed_mask |= AQ_NIC_RATE_100M;
705 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
709 * Set device link down: disable tx.
712 atl_dev_set_link_down(struct rte_eth_dev *dev)
714 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716 return hw->aq_fw_ops->set_link_speed(hw, 0);
720 * Reset and stop device.
723 atl_dev_close(struct rte_eth_dev *dev)
725 PMD_INIT_FUNC_TRACE();
729 atl_free_queues(dev);
735 atl_dev_reset(struct rte_eth_dev *dev)
739 ret = eth_atl_dev_uninit(dev);
743 ret = eth_atl_dev_init(dev);
749 atl_dev_configure_macsec(struct rte_eth_dev *dev)
751 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
752 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
753 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
754 struct macsec_msg_fw_request msg_macsec;
755 struct macsec_msg_fw_response response;
757 if (!aqcfg->common.macsec_enabled ||
758 hw->aq_fw_ops->send_macsec_req == NULL)
761 memset(&msg_macsec, 0, sizeof(msg_macsec));
763 /* Creating set of sc/sa structures from parameters provided by DPDK */
765 /* Configure macsec */
766 msg_macsec.msg_type = macsec_cfg_msg;
767 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
768 msg_macsec.cfg.interrupts_enabled = 1;
770 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
775 memset(&msg_macsec, 0, sizeof(msg_macsec));
777 /* Configure TX SC */
779 msg_macsec.msg_type = macsec_add_tx_sc_msg;
780 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
781 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
783 /* MAC addr for TX */
784 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
785 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
786 msg_macsec.txsc.sa_mask = 0x3f;
788 msg_macsec.txsc.da_mask = 0;
789 msg_macsec.txsc.tci = 0x0B;
790 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
793 * Creating SCI (Secure Channel Identifier).
794 * SCI constructed from Source MAC and Port identifier
796 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
797 (msg_macsec.txsc.mac_sa[0] >> 16);
798 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
800 uint32_t port_identifier = 1;
802 msg_macsec.txsc.sci[1] = sci_hi_part;
803 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
805 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
810 memset(&msg_macsec, 0, sizeof(msg_macsec));
812 /* Configure RX SC */
814 msg_macsec.msg_type = macsec_add_rx_sc_msg;
815 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
816 msg_macsec.rxsc.replay_protect =
817 aqcfg->common.replay_protection_enabled;
818 msg_macsec.rxsc.anti_replay_window = 0;
820 /* MAC addr for RX */
821 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
822 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
823 msg_macsec.rxsc.da_mask = 0;//0x3f;
825 msg_macsec.rxsc.sa_mask = 0;
827 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
832 memset(&msg_macsec, 0, sizeof(msg_macsec));
834 /* Configure RX SC */
836 msg_macsec.msg_type = macsec_add_tx_sa_msg;
837 msg_macsec.txsa.index = aqcfg->txsa.idx;
838 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
840 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
841 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
842 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
843 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
845 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
850 memset(&msg_macsec, 0, sizeof(msg_macsec));
852 /* Configure RX SA */
854 msg_macsec.msg_type = macsec_add_rx_sa_msg;
855 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
856 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
858 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
859 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
860 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
861 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
863 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
871 int atl_macsec_enable(struct rte_eth_dev *dev,
872 uint8_t encr, uint8_t repl_prot)
874 struct aq_hw_cfg_s *cfg =
875 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
877 cfg->aq_macsec.common.macsec_enabled = 1;
878 cfg->aq_macsec.common.encryption_enabled = encr;
879 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
884 int atl_macsec_disable(struct rte_eth_dev *dev)
886 struct aq_hw_cfg_s *cfg =
887 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
889 cfg->aq_macsec.common.macsec_enabled = 0;
894 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
896 struct aq_hw_cfg_s *cfg =
897 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
899 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
900 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
906 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
907 uint8_t *mac, uint16_t pi)
909 struct aq_hw_cfg_s *cfg =
910 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
912 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
913 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
915 cfg->aq_macsec.rxsc.pi = pi;
920 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
921 uint8_t idx, uint8_t an,
922 uint32_t pn, uint8_t *key)
924 struct aq_hw_cfg_s *cfg =
925 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
927 cfg->aq_macsec.txsa.idx = idx;
928 cfg->aq_macsec.txsa.pn = pn;
929 cfg->aq_macsec.txsa.an = an;
931 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
935 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
936 uint8_t idx, uint8_t an,
937 uint32_t pn, uint8_t *key)
939 struct aq_hw_cfg_s *cfg =
940 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
942 cfg->aq_macsec.rxsa.idx = idx;
943 cfg->aq_macsec.rxsa.pn = pn;
944 cfg->aq_macsec.rxsa.an = an;
946 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
951 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
953 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
954 struct aq_hw_s *hw = &adapter->hw;
955 struct atl_sw_stats *swstats = &adapter->sw_stats;
958 hw->aq_fw_ops->update_stats(hw);
960 /* Fill out the rte_eth_stats statistics structure */
961 stats->ipackets = hw->curr_stats.dma_pkt_rc;
962 stats->ibytes = hw->curr_stats.dma_oct_rc;
963 stats->imissed = hw->curr_stats.dpc;
964 stats->ierrors = hw->curr_stats.erpt;
966 stats->opackets = hw->curr_stats.dma_pkt_tc;
967 stats->obytes = hw->curr_stats.dma_oct_tc;
970 stats->rx_nombuf = swstats->rx_nombuf;
972 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
973 stats->q_ipackets[i] = swstats->q_ipackets[i];
974 stats->q_opackets[i] = swstats->q_opackets[i];
975 stats->q_ibytes[i] = swstats->q_ibytes[i];
976 stats->q_obytes[i] = swstats->q_obytes[i];
977 stats->q_errors[i] = swstats->q_errors[i];
983 atl_dev_stats_reset(struct rte_eth_dev *dev)
985 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
986 struct aq_hw_s *hw = &adapter->hw;
988 hw->aq_fw_ops->update_stats(hw);
990 /* Reset software totals */
991 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
993 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
999 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
1001 struct atl_adapter *adapter =
1002 (struct atl_adapter *)dev->data->dev_private;
1004 struct aq_hw_s *hw = &adapter->hw;
1005 unsigned int i, count = 0;
1007 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1008 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1009 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1019 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1020 struct rte_eth_xstat_name *xstats_names,
1024 unsigned int count = atl_dev_xstats_get_count(dev);
1027 for (i = 0; i < size && i < count; i++) {
1028 snprintf(xstats_names[i].name,
1029 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1030 atl_xstats_tbl[i].name);
1038 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1041 struct atl_adapter *adapter = dev->data->dev_private;
1042 struct aq_hw_s *hw = &adapter->hw;
1043 struct get_stats req = { 0 };
1044 struct macsec_msg_fw_request msg = { 0 };
1045 struct macsec_msg_fw_response resp = { 0 };
1048 unsigned int count = atl_dev_xstats_get_count(dev);
1053 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1054 req.ingress_sa_index = 0xff;
1055 req.egress_sc_index = 0xff;
1056 req.egress_sa_index = 0xff;
1058 msg.msg_type = macsec_get_stats_msg;
1061 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1064 for (i = 0; i < n && i < count; i++) {
1067 switch (atl_xstats_tbl[i].type) {
1068 case XSTATS_TYPE_MSM:
1069 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1070 atl_xstats_tbl[i].offset);
1072 case XSTATS_TYPE_MACSEC:
1075 *(u64 *)((uint8_t *)&resp.stats +
1076 atl_xstats_tbl[i].offset);
1086 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1088 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089 uint32_t fw_ver = 0;
1090 unsigned int ret = 0;
1092 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1096 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1097 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1099 ret += 1; /* add string null-terminator */
1108 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1110 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1112 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1113 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1115 dev_info->min_rx_bufsize = 1024;
1116 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1117 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1118 dev_info->max_vfs = pci_dev->max_vfs;
1120 dev_info->max_hash_mac_addrs = 0;
1121 dev_info->max_vmdq_pools = 0;
1122 dev_info->vmdq_queue_num = 0;
1124 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1126 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1129 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1130 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1133 dev_info->default_txconf = (struct rte_eth_txconf) {
1134 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1137 dev_info->rx_desc_lim = rx_desc_lim;
1138 dev_info->tx_desc_lim = tx_desc_lim;
1140 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1141 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1142 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1144 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1145 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1146 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1147 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1152 static const uint32_t *
1153 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1155 static const uint32_t ptypes[] = {
1157 RTE_PTYPE_L2_ETHER_ARP,
1158 RTE_PTYPE_L2_ETHER_VLAN,
1168 if (dev->rx_pkt_burst == atl_recv_pkts)
1175 atl_dev_delayed_handler(void *param)
1177 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1179 atl_dev_configure_macsec(dev);
1183 /* return 0 means link status changed, -1 means not changed */
1185 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1187 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1188 struct rte_eth_link link, old;
1189 u32 fc = AQ_NIC_FC_OFF;
1192 link.link_status = ETH_LINK_DOWN;
1193 link.link_speed = 0;
1194 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1195 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1196 memset(&old, 0, sizeof(old));
1198 /* load old link status */
1199 rte_eth_linkstatus_get(dev, &old);
1201 /* read current link status */
1202 err = hw->aq_fw_ops->update_link_status(hw);
1207 if (hw->aq_link_status.mbps == 0) {
1208 /* write default (down) link status */
1209 rte_eth_linkstatus_set(dev, &link);
1210 if (link.link_status == old.link_status)
1215 link.link_status = ETH_LINK_UP;
1216 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1217 link.link_speed = hw->aq_link_status.mbps;
1219 rte_eth_linkstatus_set(dev, &link);
1221 if (link.link_status == old.link_status)
1224 /* Driver has to update flow control settings on RX block
1225 * on any link event.
1226 * We should query FW whether it negotiated FC.
1228 if (hw->aq_fw_ops->get_flow_control) {
1229 hw->aq_fw_ops->get_flow_control(hw, &fc);
1230 hw_atl_b0_set_fc(hw, fc, 0U);
1233 if (rte_eal_alarm_set(1000 * 1000,
1234 atl_dev_delayed_handler, (void *)dev) < 0)
1235 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1241 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1243 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1245 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1251 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1253 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1255 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1261 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1263 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1265 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1271 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1273 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1275 if (dev->data->promiscuous == 1)
1276 return 0; /* must remain in all_multicast mode */
1278 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1284 * It clears the interrupt causes and enables the interrupt.
1285 * It will be called once only during nic initialized.
1288 * Pointer to struct rte_eth_dev.
1290 * Enable or Disable.
1293 * - On success, zero.
1294 * - On failure, a negative value.
1298 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1300 atl_dev_link_status_print(dev);
1305 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1312 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1314 struct atl_interrupt *intr =
1315 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1316 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1319 hw_atl_b0_hw_irq_read(hw, &cause);
1321 atl_disable_intr(hw);
1323 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1324 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1330 * It gets and then prints the link status.
1333 * Pointer to struct rte_eth_dev.
1336 * - On success, zero.
1337 * - On failure, a negative value.
1340 atl_dev_link_status_print(struct rte_eth_dev *dev)
1342 struct rte_eth_link link;
1344 memset(&link, 0, sizeof(link));
1345 rte_eth_linkstatus_get(dev, &link);
1346 if (link.link_status) {
1347 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1348 (int)(dev->data->port_id),
1349 (unsigned int)link.link_speed,
1350 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1351 "full-duplex" : "half-duplex");
1353 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1354 (int)(dev->data->port_id));
1360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1362 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1363 pci_dev->addr.domain,
1365 pci_dev->addr.devid,
1366 pci_dev->addr.function);
1370 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1374 * It executes link_update after knowing an interrupt occurred.
1377 * Pointer to struct rte_eth_dev.
1380 * - On success, zero.
1381 * - On failure, a negative value.
1384 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1385 struct rte_intr_handle *intr_handle)
1387 struct atl_interrupt *intr =
1388 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1389 struct atl_adapter *adapter = dev->data->dev_private;
1390 struct aq_hw_s *hw = &adapter->hw;
1392 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1395 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1397 /* Notify userapp if link status changed */
1398 if (!atl_dev_link_update(dev, 0)) {
1399 atl_dev_link_status_print(dev);
1400 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1402 if (hw->aq_fw_ops->send_macsec_req == NULL)
1405 /* Check macsec Keys expired */
1406 struct get_stats req = { 0 };
1407 struct macsec_msg_fw_request msg = { 0 };
1408 struct macsec_msg_fw_response resp = { 0 };
1410 req.ingress_sa_index = 0x0;
1411 req.egress_sc_index = 0x0;
1412 req.egress_sa_index = 0x0;
1413 msg.msg_type = macsec_get_stats_msg;
1416 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1418 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1421 if (resp.stats.egress_threshold_expired ||
1422 resp.stats.ingress_threshold_expired ||
1423 resp.stats.egress_expired ||
1424 resp.stats.ingress_expired) {
1425 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1426 rte_eth_dev_callback_process(dev,
1427 RTE_ETH_EVENT_MACSEC, NULL);
1431 atl_enable_intr(dev);
1432 rte_intr_ack(intr_handle);
1438 * Interrupt handler triggered by NIC for handling
1439 * specific interrupt.
1442 * Pointer to interrupt handle.
1444 * The address of parameter (struct rte_eth_dev *) regsitered before.
1450 atl_dev_interrupt_handler(void *param)
1452 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1454 atl_dev_interrupt_get_status(dev);
1455 atl_dev_interrupt_action(dev, dev->intr_handle);
1460 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1462 return SFP_EEPROM_SIZE;
1465 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1466 struct rte_dev_eeprom_info *eeprom)
1468 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1469 uint32_t dev_addr = SMBUS_DEVICE_ID;
1471 if (hw->aq_fw_ops->get_eeprom == NULL)
1474 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1475 eeprom->data == NULL)
1478 if (eeprom->magic > 0x7F)
1482 dev_addr = eeprom->magic;
1484 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1485 eeprom->length, eeprom->offset);
1488 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1489 struct rte_dev_eeprom_info *eeprom)
1491 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492 uint32_t dev_addr = SMBUS_DEVICE_ID;
1494 if (hw->aq_fw_ops->set_eeprom == NULL)
1497 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1498 eeprom->data == NULL)
1501 if (eeprom->magic > 0x7F)
1505 dev_addr = eeprom->magic;
1507 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1508 eeprom->length, eeprom->offset);
1512 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1514 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1518 if (regs->data == NULL) {
1519 regs->length = hw_atl_utils_hw_get_reg_length();
1520 regs->width = sizeof(u32);
1524 /* Only full register dump is supported */
1525 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1528 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1530 /* Device version */
1531 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1532 regs->version = mif_id & 0xFFU;
1538 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1540 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541 u32 fc = AQ_NIC_FC_OFF;
1543 if (hw->aq_fw_ops->get_flow_control == NULL)
1546 hw->aq_fw_ops->get_flow_control(hw, &fc);
1548 if (fc == AQ_NIC_FC_OFF)
1549 fc_conf->mode = RTE_FC_NONE;
1550 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1551 fc_conf->mode = RTE_FC_FULL;
1552 else if (fc & AQ_NIC_FC_RX)
1553 fc_conf->mode = RTE_FC_RX_PAUSE;
1554 else if (fc & AQ_NIC_FC_TX)
1555 fc_conf->mode = RTE_FC_TX_PAUSE;
1561 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1563 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1564 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1567 if (hw->aq_fw_ops->set_flow_control == NULL)
1570 if (fc_conf->mode == RTE_FC_NONE)
1571 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1572 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1573 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1574 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1575 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1576 else if (fc_conf->mode == RTE_FC_FULL)
1577 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1579 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1580 return hw->aq_fw_ops->set_flow_control(hw);
1586 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1587 u8 *mac_addr, bool enable)
1589 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1590 unsigned int h = 0U;
1591 unsigned int l = 0U;
1595 h = (mac_addr[0] << 8) | (mac_addr[1]);
1596 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1597 (mac_addr[4] << 8) | mac_addr[5];
1600 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1601 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1602 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1605 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1607 err = aq_hw_err_from_flags(hw);
1613 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1614 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1616 if (rte_is_zero_ether_addr(mac_addr)) {
1617 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1621 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1625 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1627 atl_update_mac_addr(dev, index, NULL, false);
1631 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1633 atl_remove_mac_addr(dev, 0);
1634 atl_add_mac_addr(dev, addr, 0, 0);
1639 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1641 struct rte_eth_dev_info dev_info;
1643 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1645 ret = atl_dev_info_get(dev, &dev_info);
1649 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1652 /* update max frame size */
1653 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1659 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1661 struct aq_hw_cfg_s *cfg =
1662 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1663 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1667 PMD_INIT_FUNC_TRACE();
1669 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1670 if (cfg->vlan_filter[i] == vlan_id) {
1672 /* Disable VLAN filter. */
1673 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1675 /* Clear VLAN filter entry */
1676 cfg->vlan_filter[i] = 0;
1682 /* VLAN_ID was not found. So, nothing to delete. */
1683 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1686 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1687 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1690 /* Try to found free VLAN filter to add new VLAN_ID */
1691 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1692 if (cfg->vlan_filter[i] == 0)
1696 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1697 /* We have no free VLAN filter to add new VLAN_ID*/
1702 cfg->vlan_filter[i] = vlan_id;
1703 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1704 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1705 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1708 /* Enable VLAN promisc mode if vlan_filter empty */
1709 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1710 if (cfg->vlan_filter[i] != 0)
1714 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1720 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1722 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 struct aq_hw_cfg_s *cfg =
1724 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1727 PMD_INIT_FUNC_TRACE();
1729 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1730 if (cfg->vlan_filter[i])
1731 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1737 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1739 struct aq_hw_cfg_s *cfg =
1740 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1741 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1745 PMD_INIT_FUNC_TRACE();
1747 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1749 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1751 for (i = 0; i < dev->data->nb_rx_queues; i++)
1752 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1754 if (mask & ETH_VLAN_EXTEND_MASK)
1761 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1764 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1767 PMD_INIT_FUNC_TRACE();
1769 switch (vlan_type) {
1770 case ETH_VLAN_TYPE_INNER:
1771 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1773 case ETH_VLAN_TYPE_OUTER:
1774 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1777 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1785 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1787 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789 PMD_INIT_FUNC_TRACE();
1791 if (queue_id > dev->data->nb_rx_queues) {
1792 PMD_DRV_LOG(ERR, "Invalid queue id");
1796 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1800 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1801 struct rte_ether_addr *mc_addr_set,
1802 uint32_t nb_mc_addr)
1804 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1807 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1810 /* Update whole uc filters table */
1811 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1812 u8 *mac_addr = NULL;
1815 if (i < nb_mc_addr) {
1816 mac_addr = mc_addr_set[i].addr_bytes;
1817 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1818 (mac_addr[4] << 8) | mac_addr[5];
1819 h = (mac_addr[0] << 8) | mac_addr[1];
1822 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1823 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1824 HW_ATL_B0_MAC_MIN + i);
1825 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1826 HW_ATL_B0_MAC_MIN + i);
1827 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1828 HW_ATL_B0_MAC_MIN + i);
1835 atl_reta_update(struct rte_eth_dev *dev,
1836 struct rte_eth_rss_reta_entry64 *reta_conf,
1840 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1841 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1843 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1844 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1845 dev->data->nb_rx_queues - 1);
1847 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1852 atl_reta_query(struct rte_eth_dev *dev,
1853 struct rte_eth_rss_reta_entry64 *reta_conf,
1857 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1859 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1860 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1861 reta_conf->mask = ~0U;
1866 atl_rss_hash_update(struct rte_eth_dev *dev,
1867 struct rte_eth_rss_conf *rss_conf)
1869 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1870 struct aq_hw_cfg_s *cfg =
1871 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1872 static u8 def_rss_key[40] = {
1873 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1874 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1875 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1876 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1877 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1880 cfg->is_rss = !!rss_conf->rss_hf;
1881 if (rss_conf->rss_key) {
1882 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1883 rss_conf->rss_key_len);
1884 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1886 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1887 sizeof(def_rss_key));
1888 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1891 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1892 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1897 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1898 struct rte_eth_rss_conf *rss_conf)
1900 struct aq_hw_cfg_s *cfg =
1901 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1903 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1904 if (rss_conf->rss_key) {
1905 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1906 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1907 rss_conf->rss_key_len);
1914 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1916 if (strcmp(dev->device->driver->name, drv->driver.name))
1923 is_atlantic_supported(struct rte_eth_dev *dev)
1925 return is_device_supported(dev, &rte_atl_pmd);
1928 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1929 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1930 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1931 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1932 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);