1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
20 static int atl_dev_configure(struct rte_eth_dev *dev);
21 static int atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int atl_dev_reset(struct rte_eth_dev *dev);
27 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 struct rte_eth_xstat_name *xstats_names,
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats, unsigned int n);
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 uint16_t vlan_id, int on);
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 uint16_t queue_id, int on);
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 enum rte_vlan_type vlan_type, uint16_t tpid);
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 struct rte_dev_eeprom_info *eeprom);
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 struct rte_dev_reg_info *regs);
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 struct rte_ether_addr *mac_addr,
94 uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 struct rte_ether_addr *mac_addr);
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 struct rte_ether_addr *mc_addr_set,
101 uint32_t nb_mc_addr);
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 struct rte_eth_dev_info *dev_info);
124 * The set of PCI devices this driver supports
126 static const struct rte_pci_id pci_id_atl_map[] = {
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149 { .vendor_id = 0, /* sentinel */ },
152 static struct rte_pci_driver rte_atl_pmd = {
153 .id_table = pci_id_atl_map,
154 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
155 .probe = eth_atl_pci_probe,
156 .remove = eth_atl_pci_remove,
159 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
160 | DEV_RX_OFFLOAD_IPV4_CKSUM \
161 | DEV_RX_OFFLOAD_UDP_CKSUM \
162 | DEV_RX_OFFLOAD_TCP_CKSUM \
163 | DEV_RX_OFFLOAD_JUMBO_FRAME \
164 | DEV_RX_OFFLOAD_MACSEC_STRIP \
165 | DEV_RX_OFFLOAD_VLAN_FILTER)
167 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
168 | DEV_TX_OFFLOAD_IPV4_CKSUM \
169 | DEV_TX_OFFLOAD_UDP_CKSUM \
170 | DEV_TX_OFFLOAD_TCP_CKSUM \
171 | DEV_TX_OFFLOAD_TCP_TSO \
172 | DEV_TX_OFFLOAD_MACSEC_INSERT \
173 | DEV_TX_OFFLOAD_MULTI_SEGS)
175 #define SFP_EEPROM_SIZE 0x100
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 .nb_max = ATL_MAX_RING_DESC,
179 .nb_min = ATL_MIN_RING_DESC,
180 .nb_align = ATL_RXD_ALIGN,
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 .nb_max = ATL_MAX_RING_DESC,
185 .nb_min = ATL_MIN_RING_DESC,
186 .nb_align = ATL_TXD_ALIGN,
187 .nb_seg_max = ATL_TX_MAX_SEG,
188 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
191 enum atl_xstats_type {
196 #define ATL_XSTATS_FIELD(name) { \
198 offsetof(struct aq_stats_s, name), \
202 #define ATL_MACSEC_XSTATS_FIELD(name) { \
204 offsetof(struct macsec_stats, name), \
208 struct atl_xstats_tbl_s {
211 enum atl_xstats_type type;
214 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
215 ATL_XSTATS_FIELD(uprc),
216 ATL_XSTATS_FIELD(mprc),
217 ATL_XSTATS_FIELD(bprc),
218 ATL_XSTATS_FIELD(erpt),
219 ATL_XSTATS_FIELD(uptc),
220 ATL_XSTATS_FIELD(mptc),
221 ATL_XSTATS_FIELD(bptc),
222 ATL_XSTATS_FIELD(erpr),
223 ATL_XSTATS_FIELD(ubrc),
224 ATL_XSTATS_FIELD(ubtc),
225 ATL_XSTATS_FIELD(mbrc),
226 ATL_XSTATS_FIELD(mbtc),
227 ATL_XSTATS_FIELD(bbrc),
228 ATL_XSTATS_FIELD(bbtc),
229 /* Ingress Common Counters */
230 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
231 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
232 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
233 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
234 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
235 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
236 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
237 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
238 /* Ingress SA Counters */
239 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
240 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
241 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
242 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
243 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
244 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
245 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
246 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
247 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
248 /* Egress Common Counters */
249 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
250 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
251 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
252 ATL_MACSEC_XSTATS_FIELD(out_too_long),
253 /* Egress SC Counters */
254 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
255 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
256 /* Egress SA Counters */
257 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
258 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
259 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
260 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
263 static const struct eth_dev_ops atl_eth_dev_ops = {
264 .dev_configure = atl_dev_configure,
265 .dev_start = atl_dev_start,
266 .dev_stop = atl_dev_stop,
267 .dev_set_link_up = atl_dev_set_link_up,
268 .dev_set_link_down = atl_dev_set_link_down,
269 .dev_close = atl_dev_close,
270 .dev_reset = atl_dev_reset,
273 .promiscuous_enable = atl_dev_promiscuous_enable,
274 .promiscuous_disable = atl_dev_promiscuous_disable,
275 .allmulticast_enable = atl_dev_allmulticast_enable,
276 .allmulticast_disable = atl_dev_allmulticast_disable,
279 .link_update = atl_dev_link_update,
281 .get_reg = atl_dev_get_regs,
284 .stats_get = atl_dev_stats_get,
285 .xstats_get = atl_dev_xstats_get,
286 .xstats_get_names = atl_dev_xstats_get_names,
287 .stats_reset = atl_dev_stats_reset,
288 .xstats_reset = atl_dev_stats_reset,
290 .fw_version_get = atl_fw_version_get,
291 .dev_infos_get = atl_dev_info_get,
292 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
294 .mtu_set = atl_dev_mtu_set,
297 .vlan_filter_set = atl_vlan_filter_set,
298 .vlan_offload_set = atl_vlan_offload_set,
299 .vlan_tpid_set = atl_vlan_tpid_set,
300 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
303 .rx_queue_start = atl_rx_queue_start,
304 .rx_queue_stop = atl_rx_queue_stop,
305 .rx_queue_setup = atl_rx_queue_setup,
306 .rx_queue_release = atl_rx_queue_release,
308 .tx_queue_start = atl_tx_queue_start,
309 .tx_queue_stop = atl_tx_queue_stop,
310 .tx_queue_setup = atl_tx_queue_setup,
311 .tx_queue_release = atl_tx_queue_release,
313 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
314 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
317 .get_eeprom_length = atl_dev_get_eeprom_length,
318 .get_eeprom = atl_dev_get_eeprom,
319 .set_eeprom = atl_dev_set_eeprom,
322 .flow_ctrl_get = atl_flow_ctrl_get,
323 .flow_ctrl_set = atl_flow_ctrl_set,
326 .mac_addr_add = atl_add_mac_addr,
327 .mac_addr_remove = atl_remove_mac_addr,
328 .mac_addr_set = atl_set_default_mac_addr,
329 .set_mc_addr_list = atl_dev_set_mc_addr_list,
330 .rxq_info_get = atl_rxq_info_get,
331 .txq_info_get = atl_txq_info_get,
333 .reta_update = atl_reta_update,
334 .reta_query = atl_reta_query,
335 .rss_hash_update = atl_rss_hash_update,
336 .rss_hash_conf_get = atl_rss_hash_conf_get,
339 static inline int32_t
340 atl_reset_hw(struct aq_hw_s *hw)
342 return hw_atl_b0_hw_reset(hw);
346 atl_enable_intr(struct rte_eth_dev *dev)
348 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
350 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
354 atl_disable_intr(struct aq_hw_s *hw)
356 PMD_INIT_FUNC_TRACE();
357 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
361 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
363 struct atl_adapter *adapter = eth_dev->data->dev_private;
364 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
365 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
366 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
369 PMD_INIT_FUNC_TRACE();
371 eth_dev->dev_ops = &atl_eth_dev_ops;
373 eth_dev->rx_queue_count = atl_rx_queue_count;
374 eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
375 eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
377 eth_dev->rx_pkt_burst = &atl_recv_pkts;
378 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
379 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
381 /* For secondary processes, the primary process has done all the work */
382 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385 /* Vendor and Device ID need to be set before init of shared code */
386 hw->device_id = pci_dev->id.device_id;
387 hw->vendor_id = pci_dev->id.vendor_id;
388 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
390 /* Hardware configuration - hardcode */
391 adapter->hw_cfg.is_lro = false;
392 adapter->hw_cfg.wol = false;
393 adapter->hw_cfg.is_rss = false;
394 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
396 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
402 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403 adapter->hw_cfg.aq_rss.indirection_table_size =
404 HW_ATL_B0_RSS_REDIRECTION_MAX;
406 hw->aq_nic_cfg = &adapter->hw_cfg;
408 pthread_mutex_init(&hw->mbox_mutex, NULL);
410 /* disable interrupt */
411 atl_disable_intr(hw);
413 /* Allocate memory for storing MAC addresses */
414 eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 PMD_INIT_LOG(ERR, "MAC Malloc failed");
421 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
425 /* Copy the permanent MAC address */
426 if (hw->aq_fw_ops->get_mac_permanent(hw,
427 eth_dev->data->mac_addrs->addr_bytes) != 0)
430 /* Reset the hw statistics */
431 atl_dev_stats_reset(eth_dev);
433 rte_intr_callback_register(intr_handle,
434 atl_dev_interrupt_handler, eth_dev);
436 /* enable uio/vfio intr/eventfd mapping */
437 rte_intr_enable(intr_handle);
439 /* enable support intr */
440 atl_enable_intr(eth_dev);
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
448 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
452 PMD_INIT_FUNC_TRACE();
454 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
457 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
459 if (hw->adapter_stopped == 0)
460 atl_dev_close(eth_dev);
462 eth_dev->dev_ops = NULL;
463 eth_dev->rx_pkt_burst = NULL;
464 eth_dev->tx_pkt_burst = NULL;
466 /* disable uio intr before callback unregister */
467 rte_intr_disable(intr_handle);
468 rte_intr_callback_unregister(intr_handle,
469 atl_dev_interrupt_handler, eth_dev);
471 rte_free(eth_dev->data->mac_addrs);
472 eth_dev->data->mac_addrs = NULL;
474 pthread_mutex_destroy(&hw->mbox_mutex);
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481 struct rte_pci_device *pci_dev)
483 return rte_eth_dev_pci_generic_probe(pci_dev,
484 sizeof(struct atl_adapter), eth_atl_dev_init);
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
490 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
494 atl_dev_configure(struct rte_eth_dev *dev)
496 struct atl_interrupt *intr =
497 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
499 PMD_INIT_FUNC_TRACE();
501 /* set flag to update link status after init */
502 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
508 * Configure device link speed and setup link.
509 * It returns 0 on success.
512 atl_dev_start(struct rte_eth_dev *dev)
514 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 uint32_t intr_vector = 0;
521 PMD_INIT_FUNC_TRACE();
523 /* set adapter started */
524 hw->adapter_stopped = 0;
526 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
528 "Invalid link_speeds for port %u, fix speed not supported",
533 /* disable uio/vfio intr/eventfd mapping */
534 rte_intr_disable(intr_handle);
536 /* reinitialize adapter
537 * this calls reset and start
539 status = atl_reset_hw(hw);
543 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
545 hw_atl_b0_hw_start(hw);
546 /* check and configure queue intr-vector mapping */
547 if ((rte_intr_cap_multiple(intr_handle) ||
548 !RTE_ETH_DEV_SRIOV(dev).active) &&
549 dev->data->dev_conf.intr_conf.rxq != 0) {
550 intr_vector = dev->data->nb_rx_queues;
551 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553 ATL_MAX_INTR_QUEUE_NUM);
556 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
562 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563 intr_handle->intr_vec = rte_zmalloc("intr_vec",
564 dev->data->nb_rx_queues * sizeof(int), 0);
565 if (intr_handle->intr_vec == NULL) {
566 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567 " intr_vec", dev->data->nb_rx_queues);
572 /* initialize transmission unit */
575 /* This can fail when allocating mbufs for descriptor rings */
576 err = atl_rx_init(dev);
578 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
582 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583 hw->fw_ver_actual >> 24,
584 (hw->fw_ver_actual >> 16) & 0xFF,
585 hw->fw_ver_actual & 0xFFFF);
586 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
588 err = atl_start_queues(dev);
590 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
594 err = atl_dev_set_link_up(dev);
596 err = hw->aq_fw_ops->update_link_status(hw);
601 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
603 if (rte_intr_allow_others(intr_handle)) {
604 /* check if lsc interrupt is enabled */
605 if (dev->data->dev_conf.intr_conf.lsc != 0)
606 atl_dev_lsc_interrupt_setup(dev, true);
608 atl_dev_lsc_interrupt_setup(dev, false);
610 rte_intr_callback_unregister(intr_handle,
611 atl_dev_interrupt_handler, dev);
612 if (dev->data->dev_conf.intr_conf.lsc != 0)
613 PMD_INIT_LOG(INFO, "lsc won't enable because of"
614 " no intr multiplex");
617 /* check if rxq interrupt is enabled */
618 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
619 rte_intr_dp_is_en(intr_handle))
620 atl_dev_rxq_interrupt_setup(dev);
622 /* enable uio/vfio intr/eventfd mapping */
623 rte_intr_enable(intr_handle);
625 /* resume enabled intr since hw reset */
626 atl_enable_intr(dev);
631 atl_stop_queues(dev);
636 * Stop device: disable rx and tx functions to allow for reconfiguring.
639 atl_dev_stop(struct rte_eth_dev *dev)
641 struct rte_eth_link link;
643 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
645 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
647 PMD_INIT_FUNC_TRACE();
649 /* disable interrupts */
650 atl_disable_intr(hw);
654 hw->adapter_stopped = 1;
656 atl_stop_queues(dev);
658 /* Clear stored conf */
659 dev->data->scattered_rx = 0;
662 /* Clear recorded link status */
663 memset(&link, 0, sizeof(link));
664 rte_eth_linkstatus_set(dev, &link);
666 if (!rte_intr_allow_others(intr_handle))
667 /* resume to the default handler */
668 rte_intr_callback_register(intr_handle,
669 atl_dev_interrupt_handler,
672 /* Clean datapath event and queue/vec mapping */
673 rte_intr_efd_disable(intr_handle);
674 if (intr_handle->intr_vec != NULL) {
675 rte_free(intr_handle->intr_vec);
676 intr_handle->intr_vec = NULL;
681 * Set device link up: enable tx.
684 atl_dev_set_link_up(struct rte_eth_dev *dev)
686 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
688 uint32_t speed_mask = 0;
690 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
691 speed_mask = hw->aq_nic_cfg->link_speed_msk;
693 if (link_speeds & ETH_LINK_SPEED_10G)
694 speed_mask |= AQ_NIC_RATE_10G;
695 if (link_speeds & ETH_LINK_SPEED_5G)
696 speed_mask |= AQ_NIC_RATE_5G;
697 if (link_speeds & ETH_LINK_SPEED_1G)
698 speed_mask |= AQ_NIC_RATE_1G;
699 if (link_speeds & ETH_LINK_SPEED_2_5G)
700 speed_mask |= AQ_NIC_RATE_2G5;
701 if (link_speeds & ETH_LINK_SPEED_100M)
702 speed_mask |= AQ_NIC_RATE_100M;
705 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
709 * Set device link down: disable tx.
712 atl_dev_set_link_down(struct rte_eth_dev *dev)
714 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716 return hw->aq_fw_ops->set_link_speed(hw, 0);
720 * Reset and stop device.
723 atl_dev_close(struct rte_eth_dev *dev)
725 PMD_INIT_FUNC_TRACE();
729 atl_free_queues(dev);
733 atl_dev_reset(struct rte_eth_dev *dev)
737 ret = eth_atl_dev_uninit(dev);
741 ret = eth_atl_dev_init(dev);
747 atl_dev_configure_macsec(struct rte_eth_dev *dev)
749 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
750 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
751 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
752 struct macsec_msg_fw_request msg_macsec;
753 struct macsec_msg_fw_response response;
755 if (!aqcfg->common.macsec_enabled ||
756 hw->aq_fw_ops->send_macsec_req == NULL)
759 memset(&msg_macsec, 0, sizeof(msg_macsec));
761 /* Creating set of sc/sa structures from parameters provided by DPDK */
763 /* Configure macsec */
764 msg_macsec.msg_type = macsec_cfg_msg;
765 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
766 msg_macsec.cfg.interrupts_enabled = 1;
768 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
773 memset(&msg_macsec, 0, sizeof(msg_macsec));
775 /* Configure TX SC */
777 msg_macsec.msg_type = macsec_add_tx_sc_msg;
778 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
779 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
781 /* MAC addr for TX */
782 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
783 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
784 msg_macsec.txsc.sa_mask = 0x3f;
786 msg_macsec.txsc.da_mask = 0;
787 msg_macsec.txsc.tci = 0x0B;
788 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
791 * Creating SCI (Secure Channel Identifier).
792 * SCI constructed from Source MAC and Port identifier
794 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
795 (msg_macsec.txsc.mac_sa[0] >> 16);
796 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
798 uint32_t port_identifier = 1;
800 msg_macsec.txsc.sci[1] = sci_hi_part;
801 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
803 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
808 memset(&msg_macsec, 0, sizeof(msg_macsec));
810 /* Configure RX SC */
812 msg_macsec.msg_type = macsec_add_rx_sc_msg;
813 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
814 msg_macsec.rxsc.replay_protect =
815 aqcfg->common.replay_protection_enabled;
816 msg_macsec.rxsc.anti_replay_window = 0;
818 /* MAC addr for RX */
819 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
820 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
821 msg_macsec.rxsc.da_mask = 0;//0x3f;
823 msg_macsec.rxsc.sa_mask = 0;
825 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
830 memset(&msg_macsec, 0, sizeof(msg_macsec));
832 /* Configure RX SC */
834 msg_macsec.msg_type = macsec_add_tx_sa_msg;
835 msg_macsec.txsa.index = aqcfg->txsa.idx;
836 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
838 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
839 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
840 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
841 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
843 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
848 memset(&msg_macsec, 0, sizeof(msg_macsec));
850 /* Configure RX SA */
852 msg_macsec.msg_type = macsec_add_rx_sa_msg;
853 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
854 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
856 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
857 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
858 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
859 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
861 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
869 int atl_macsec_enable(struct rte_eth_dev *dev,
870 uint8_t encr, uint8_t repl_prot)
872 struct aq_hw_cfg_s *cfg =
873 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
875 cfg->aq_macsec.common.macsec_enabled = 1;
876 cfg->aq_macsec.common.encryption_enabled = encr;
877 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
882 int atl_macsec_disable(struct rte_eth_dev *dev)
884 struct aq_hw_cfg_s *cfg =
885 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
887 cfg->aq_macsec.common.macsec_enabled = 0;
892 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
894 struct aq_hw_cfg_s *cfg =
895 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
897 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
898 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
904 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
905 uint8_t *mac, uint16_t pi)
907 struct aq_hw_cfg_s *cfg =
908 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
910 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
911 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
913 cfg->aq_macsec.rxsc.pi = pi;
918 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
919 uint8_t idx, uint8_t an,
920 uint32_t pn, uint8_t *key)
922 struct aq_hw_cfg_s *cfg =
923 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
925 cfg->aq_macsec.txsa.idx = idx;
926 cfg->aq_macsec.txsa.pn = pn;
927 cfg->aq_macsec.txsa.an = an;
929 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
933 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
934 uint8_t idx, uint8_t an,
935 uint32_t pn, uint8_t *key)
937 struct aq_hw_cfg_s *cfg =
938 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
940 cfg->aq_macsec.rxsa.idx = idx;
941 cfg->aq_macsec.rxsa.pn = pn;
942 cfg->aq_macsec.rxsa.an = an;
944 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
949 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
951 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
952 struct aq_hw_s *hw = &adapter->hw;
953 struct atl_sw_stats *swstats = &adapter->sw_stats;
956 hw->aq_fw_ops->update_stats(hw);
958 /* Fill out the rte_eth_stats statistics structure */
959 stats->ipackets = hw->curr_stats.dma_pkt_rc;
960 stats->ibytes = hw->curr_stats.dma_oct_rc;
961 stats->imissed = hw->curr_stats.dpc;
962 stats->ierrors = hw->curr_stats.erpt;
964 stats->opackets = hw->curr_stats.dma_pkt_tc;
965 stats->obytes = hw->curr_stats.dma_oct_tc;
968 stats->rx_nombuf = swstats->rx_nombuf;
970 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
971 stats->q_ipackets[i] = swstats->q_ipackets[i];
972 stats->q_opackets[i] = swstats->q_opackets[i];
973 stats->q_ibytes[i] = swstats->q_ibytes[i];
974 stats->q_obytes[i] = swstats->q_obytes[i];
975 stats->q_errors[i] = swstats->q_errors[i];
981 atl_dev_stats_reset(struct rte_eth_dev *dev)
983 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
984 struct aq_hw_s *hw = &adapter->hw;
986 hw->aq_fw_ops->update_stats(hw);
988 /* Reset software totals */
989 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
991 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
997 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
999 struct atl_adapter *adapter =
1000 (struct atl_adapter *)dev->data->dev_private;
1002 struct aq_hw_s *hw = &adapter->hw;
1003 unsigned int i, count = 0;
1005 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1006 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1007 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1017 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1018 struct rte_eth_xstat_name *xstats_names,
1022 unsigned int count = atl_dev_xstats_get_count(dev);
1025 for (i = 0; i < size && i < count; i++) {
1026 snprintf(xstats_names[i].name,
1027 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1028 atl_xstats_tbl[i].name);
1036 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1039 struct atl_adapter *adapter = dev->data->dev_private;
1040 struct aq_hw_s *hw = &adapter->hw;
1041 struct get_stats req = { 0 };
1042 struct macsec_msg_fw_request msg = { 0 };
1043 struct macsec_msg_fw_response resp = { 0 };
1046 unsigned int count = atl_dev_xstats_get_count(dev);
1051 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1052 req.ingress_sa_index = 0xff;
1053 req.egress_sc_index = 0xff;
1054 req.egress_sa_index = 0xff;
1056 msg.msg_type = macsec_get_stats_msg;
1059 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1062 for (i = 0; i < n && i < count; i++) {
1065 switch (atl_xstats_tbl[i].type) {
1066 case XSTATS_TYPE_MSM:
1067 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1068 atl_xstats_tbl[i].offset);
1070 case XSTATS_TYPE_MACSEC:
1073 *(u64 *)((uint8_t *)&resp.stats +
1074 atl_xstats_tbl[i].offset);
1084 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1086 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087 uint32_t fw_ver = 0;
1088 unsigned int ret = 0;
1090 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1094 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1095 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1097 ret += 1; /* add string null-terminator */
1106 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1108 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1110 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1111 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1113 dev_info->min_rx_bufsize = 1024;
1114 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1115 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1116 dev_info->max_vfs = pci_dev->max_vfs;
1118 dev_info->max_hash_mac_addrs = 0;
1119 dev_info->max_vmdq_pools = 0;
1120 dev_info->vmdq_queue_num = 0;
1122 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1124 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1127 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1128 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1131 dev_info->default_txconf = (struct rte_eth_txconf) {
1132 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1135 dev_info->rx_desc_lim = rx_desc_lim;
1136 dev_info->tx_desc_lim = tx_desc_lim;
1138 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1139 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1140 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1142 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1143 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1144 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1145 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1150 static const uint32_t *
1151 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1153 static const uint32_t ptypes[] = {
1155 RTE_PTYPE_L2_ETHER_ARP,
1156 RTE_PTYPE_L2_ETHER_VLAN,
1166 if (dev->rx_pkt_burst == atl_recv_pkts)
1173 atl_dev_delayed_handler(void *param)
1175 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1177 atl_dev_configure_macsec(dev);
1181 /* return 0 means link status changed, -1 means not changed */
1183 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1185 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186 struct rte_eth_link link, old;
1187 u32 fc = AQ_NIC_FC_OFF;
1190 link.link_status = ETH_LINK_DOWN;
1191 link.link_speed = 0;
1192 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1193 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1194 memset(&old, 0, sizeof(old));
1196 /* load old link status */
1197 rte_eth_linkstatus_get(dev, &old);
1199 /* read current link status */
1200 err = hw->aq_fw_ops->update_link_status(hw);
1205 if (hw->aq_link_status.mbps == 0) {
1206 /* write default (down) link status */
1207 rte_eth_linkstatus_set(dev, &link);
1208 if (link.link_status == old.link_status)
1213 link.link_status = ETH_LINK_UP;
1214 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1215 link.link_speed = hw->aq_link_status.mbps;
1217 rte_eth_linkstatus_set(dev, &link);
1219 if (link.link_status == old.link_status)
1222 /* Driver has to update flow control settings on RX block
1223 * on any link event.
1224 * We should query FW whether it negotiated FC.
1226 if (hw->aq_fw_ops->get_flow_control) {
1227 hw->aq_fw_ops->get_flow_control(hw, &fc);
1228 hw_atl_b0_set_fc(hw, fc, 0U);
1231 if (rte_eal_alarm_set(1000 * 1000,
1232 atl_dev_delayed_handler, (void *)dev) < 0)
1233 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1239 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1241 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1243 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1249 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1251 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1253 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1259 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1261 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1269 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1271 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1273 if (dev->data->promiscuous == 1)
1274 return 0; /* must remain in all_multicast mode */
1276 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1282 * It clears the interrupt causes and enables the interrupt.
1283 * It will be called once only during nic initialized.
1286 * Pointer to struct rte_eth_dev.
1288 * Enable or Disable.
1291 * - On success, zero.
1292 * - On failure, a negative value.
1296 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1298 atl_dev_link_status_print(dev);
1303 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1310 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1312 struct atl_interrupt *intr =
1313 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1314 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1317 hw_atl_b0_hw_irq_read(hw, &cause);
1319 atl_disable_intr(hw);
1321 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1322 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1328 * It gets and then prints the link status.
1331 * Pointer to struct rte_eth_dev.
1334 * - On success, zero.
1335 * - On failure, a negative value.
1338 atl_dev_link_status_print(struct rte_eth_dev *dev)
1340 struct rte_eth_link link;
1342 memset(&link, 0, sizeof(link));
1343 rte_eth_linkstatus_get(dev, &link);
1344 if (link.link_status) {
1345 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1346 (int)(dev->data->port_id),
1347 (unsigned int)link.link_speed,
1348 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1349 "full-duplex" : "half-duplex");
1351 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1352 (int)(dev->data->port_id));
1358 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1360 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1361 pci_dev->addr.domain,
1363 pci_dev->addr.devid,
1364 pci_dev->addr.function);
1368 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1372 * It executes link_update after knowing an interrupt occurred.
1375 * Pointer to struct rte_eth_dev.
1378 * - On success, zero.
1379 * - On failure, a negative value.
1382 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1383 struct rte_intr_handle *intr_handle)
1385 struct atl_interrupt *intr =
1386 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1387 struct atl_adapter *adapter = dev->data->dev_private;
1388 struct aq_hw_s *hw = &adapter->hw;
1390 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1393 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1395 /* Notify userapp if link status changed */
1396 if (!atl_dev_link_update(dev, 0)) {
1397 atl_dev_link_status_print(dev);
1398 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1400 if (hw->aq_fw_ops->send_macsec_req == NULL)
1403 /* Check macsec Keys expired */
1404 struct get_stats req = { 0 };
1405 struct macsec_msg_fw_request msg = { 0 };
1406 struct macsec_msg_fw_response resp = { 0 };
1408 req.ingress_sa_index = 0x0;
1409 req.egress_sc_index = 0x0;
1410 req.egress_sa_index = 0x0;
1411 msg.msg_type = macsec_get_stats_msg;
1414 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1416 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1419 if (resp.stats.egress_threshold_expired ||
1420 resp.stats.ingress_threshold_expired ||
1421 resp.stats.egress_expired ||
1422 resp.stats.ingress_expired) {
1423 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1424 rte_eth_dev_callback_process(dev,
1425 RTE_ETH_EVENT_MACSEC, NULL);
1429 atl_enable_intr(dev);
1430 rte_intr_ack(intr_handle);
1436 * Interrupt handler triggered by NIC for handling
1437 * specific interrupt.
1440 * Pointer to interrupt handle.
1442 * The address of parameter (struct rte_eth_dev *) regsitered before.
1448 atl_dev_interrupt_handler(void *param)
1450 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1452 atl_dev_interrupt_get_status(dev);
1453 atl_dev_interrupt_action(dev, dev->intr_handle);
1458 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1460 return SFP_EEPROM_SIZE;
1463 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1464 struct rte_dev_eeprom_info *eeprom)
1466 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467 uint32_t dev_addr = SMBUS_DEVICE_ID;
1469 if (hw->aq_fw_ops->get_eeprom == NULL)
1472 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1473 eeprom->data == NULL)
1476 if (eeprom->magic > 0x7F)
1480 dev_addr = eeprom->magic;
1482 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1483 eeprom->length, eeprom->offset);
1486 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1487 struct rte_dev_eeprom_info *eeprom)
1489 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490 uint32_t dev_addr = SMBUS_DEVICE_ID;
1492 if (hw->aq_fw_ops->set_eeprom == NULL)
1495 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1496 eeprom->data == NULL)
1499 if (eeprom->magic > 0x7F)
1503 dev_addr = eeprom->magic;
1505 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1506 eeprom->length, eeprom->offset);
1510 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1512 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516 if (regs->data == NULL) {
1517 regs->length = hw_atl_utils_hw_get_reg_length();
1518 regs->width = sizeof(u32);
1522 /* Only full register dump is supported */
1523 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1526 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1528 /* Device version */
1529 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1530 regs->version = mif_id & 0xFFU;
1536 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1538 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539 u32 fc = AQ_NIC_FC_OFF;
1541 if (hw->aq_fw_ops->get_flow_control == NULL)
1544 hw->aq_fw_ops->get_flow_control(hw, &fc);
1546 if (fc == AQ_NIC_FC_OFF)
1547 fc_conf->mode = RTE_FC_NONE;
1548 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1549 fc_conf->mode = RTE_FC_FULL;
1550 else if (fc & AQ_NIC_FC_RX)
1551 fc_conf->mode = RTE_FC_RX_PAUSE;
1552 else if (fc & AQ_NIC_FC_TX)
1553 fc_conf->mode = RTE_FC_TX_PAUSE;
1559 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1561 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1562 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1565 if (hw->aq_fw_ops->set_flow_control == NULL)
1568 if (fc_conf->mode == RTE_FC_NONE)
1569 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1570 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1571 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1572 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1573 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1574 else if (fc_conf->mode == RTE_FC_FULL)
1575 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1577 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1578 return hw->aq_fw_ops->set_flow_control(hw);
1584 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1585 u8 *mac_addr, bool enable)
1587 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1588 unsigned int h = 0U;
1589 unsigned int l = 0U;
1593 h = (mac_addr[0] << 8) | (mac_addr[1]);
1594 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1595 (mac_addr[4] << 8) | mac_addr[5];
1598 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1599 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1600 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1603 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1605 err = aq_hw_err_from_flags(hw);
1611 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1612 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1614 if (rte_is_zero_ether_addr(mac_addr)) {
1615 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1619 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1623 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1625 atl_update_mac_addr(dev, index, NULL, false);
1629 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1631 atl_remove_mac_addr(dev, 0);
1632 atl_add_mac_addr(dev, addr, 0, 0);
1637 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1639 struct rte_eth_dev_info dev_info;
1641 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1643 ret = atl_dev_info_get(dev, &dev_info);
1647 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1650 /* update max frame size */
1651 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1657 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1659 struct aq_hw_cfg_s *cfg =
1660 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1661 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665 PMD_INIT_FUNC_TRACE();
1667 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1668 if (cfg->vlan_filter[i] == vlan_id) {
1670 /* Disable VLAN filter. */
1671 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1673 /* Clear VLAN filter entry */
1674 cfg->vlan_filter[i] = 0;
1680 /* VLAN_ID was not found. So, nothing to delete. */
1681 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1684 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1685 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1688 /* Try to found free VLAN filter to add new VLAN_ID */
1689 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1690 if (cfg->vlan_filter[i] == 0)
1694 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1695 /* We have no free VLAN filter to add new VLAN_ID*/
1700 cfg->vlan_filter[i] = vlan_id;
1701 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1702 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1703 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1706 /* Enable VLAN promisc mode if vlan_filter empty */
1707 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1708 if (cfg->vlan_filter[i] != 0)
1712 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1718 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1720 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721 struct aq_hw_cfg_s *cfg =
1722 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1725 PMD_INIT_FUNC_TRACE();
1727 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1728 if (cfg->vlan_filter[i])
1729 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1735 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1737 struct aq_hw_cfg_s *cfg =
1738 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1739 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 PMD_INIT_FUNC_TRACE();
1745 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1747 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1749 for (i = 0; i < dev->data->nb_rx_queues; i++)
1750 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1752 if (mask & ETH_VLAN_EXTEND_MASK)
1759 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1762 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765 PMD_INIT_FUNC_TRACE();
1767 switch (vlan_type) {
1768 case ETH_VLAN_TYPE_INNER:
1769 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1771 case ETH_VLAN_TYPE_OUTER:
1772 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1775 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1783 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1785 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1787 PMD_INIT_FUNC_TRACE();
1789 if (queue_id > dev->data->nb_rx_queues) {
1790 PMD_DRV_LOG(ERR, "Invalid queue id");
1794 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1798 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1799 struct rte_ether_addr *mc_addr_set,
1800 uint32_t nb_mc_addr)
1802 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1808 /* Update whole uc filters table */
1809 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1810 u8 *mac_addr = NULL;
1813 if (i < nb_mc_addr) {
1814 mac_addr = mc_addr_set[i].addr_bytes;
1815 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1816 (mac_addr[4] << 8) | mac_addr[5];
1817 h = (mac_addr[0] << 8) | mac_addr[1];
1820 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1821 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1822 HW_ATL_B0_MAC_MIN + i);
1823 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1824 HW_ATL_B0_MAC_MIN + i);
1825 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1826 HW_ATL_B0_MAC_MIN + i);
1833 atl_reta_update(struct rte_eth_dev *dev,
1834 struct rte_eth_rss_reta_entry64 *reta_conf,
1838 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1839 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1841 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1842 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1843 dev->data->nb_rx_queues - 1);
1845 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1850 atl_reta_query(struct rte_eth_dev *dev,
1851 struct rte_eth_rss_reta_entry64 *reta_conf,
1855 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1857 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1858 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1859 reta_conf->mask = ~0U;
1864 atl_rss_hash_update(struct rte_eth_dev *dev,
1865 struct rte_eth_rss_conf *rss_conf)
1867 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1868 struct aq_hw_cfg_s *cfg =
1869 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1870 static u8 def_rss_key[40] = {
1871 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1872 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1873 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1874 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1875 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1878 cfg->is_rss = !!rss_conf->rss_hf;
1879 if (rss_conf->rss_key) {
1880 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1881 rss_conf->rss_key_len);
1882 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1884 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1885 sizeof(def_rss_key));
1886 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1889 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1890 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1895 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1896 struct rte_eth_rss_conf *rss_conf)
1898 struct aq_hw_cfg_s *cfg =
1899 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1901 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1902 if (rss_conf->rss_key) {
1903 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1904 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1905 rss_conf->rss_key_len);
1912 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1914 if (strcmp(dev->device->driver->name, drv->driver.name))
1921 is_atlantic_supported(struct rte_eth_dev *dev)
1923 return is_device_supported(dev, &rte_atl_pmd);
1926 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1927 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1928 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1929 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1930 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);