1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
20 static int atl_dev_configure(struct rte_eth_dev *dev);
21 static int atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int atl_dev_reset(struct rte_eth_dev *dev);
27 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 struct rte_eth_xstat_name *xstats_names,
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats, unsigned int n);
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 uint16_t vlan_id, int on);
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 uint16_t queue_id, int on);
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 enum rte_vlan_type vlan_type, uint16_t tpid);
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 struct rte_dev_eeprom_info *eeprom);
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 struct rte_dev_reg_info *regs);
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 struct rte_ether_addr *mac_addr,
94 uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 struct rte_ether_addr *mac_addr);
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 struct rte_ether_addr *mc_addr_set,
101 uint32_t nb_mc_addr);
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 struct rte_eth_dev_info *dev_info);
123 int atl_logtype_init;
124 int atl_logtype_driver;
127 * The set of PCI devices this driver supports
129 static const struct rte_pci_id pci_id_atl_map[] = {
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
150 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
151 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
152 { .vendor_id = 0, /* sentinel */ },
155 static struct rte_pci_driver rte_atl_pmd = {
156 .id_table = pci_id_atl_map,
157 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
158 .probe = eth_atl_pci_probe,
159 .remove = eth_atl_pci_remove,
162 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
163 | DEV_RX_OFFLOAD_IPV4_CKSUM \
164 | DEV_RX_OFFLOAD_UDP_CKSUM \
165 | DEV_RX_OFFLOAD_TCP_CKSUM \
166 | DEV_RX_OFFLOAD_JUMBO_FRAME \
167 | DEV_RX_OFFLOAD_MACSEC_STRIP \
168 | DEV_RX_OFFLOAD_VLAN_FILTER)
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 | DEV_TX_OFFLOAD_IPV4_CKSUM \
172 | DEV_TX_OFFLOAD_UDP_CKSUM \
173 | DEV_TX_OFFLOAD_TCP_CKSUM \
174 | DEV_TX_OFFLOAD_TCP_TSO \
175 | DEV_TX_OFFLOAD_MACSEC_INSERT \
176 | DEV_TX_OFFLOAD_MULTI_SEGS)
178 #define SFP_EEPROM_SIZE 0x100
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 .nb_max = ATL_MAX_RING_DESC,
182 .nb_min = ATL_MIN_RING_DESC,
183 .nb_align = ATL_RXD_ALIGN,
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 .nb_max = ATL_MAX_RING_DESC,
188 .nb_min = ATL_MIN_RING_DESC,
189 .nb_align = ATL_TXD_ALIGN,
190 .nb_seg_max = ATL_TX_MAX_SEG,
191 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
194 enum atl_xstats_type {
199 #define ATL_XSTATS_FIELD(name) { \
201 offsetof(struct aq_stats_s, name), \
205 #define ATL_MACSEC_XSTATS_FIELD(name) { \
207 offsetof(struct macsec_stats, name), \
211 struct atl_xstats_tbl_s {
214 enum atl_xstats_type type;
217 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
218 ATL_XSTATS_FIELD(uprc),
219 ATL_XSTATS_FIELD(mprc),
220 ATL_XSTATS_FIELD(bprc),
221 ATL_XSTATS_FIELD(erpt),
222 ATL_XSTATS_FIELD(uptc),
223 ATL_XSTATS_FIELD(mptc),
224 ATL_XSTATS_FIELD(bptc),
225 ATL_XSTATS_FIELD(erpr),
226 ATL_XSTATS_FIELD(ubrc),
227 ATL_XSTATS_FIELD(ubtc),
228 ATL_XSTATS_FIELD(mbrc),
229 ATL_XSTATS_FIELD(mbtc),
230 ATL_XSTATS_FIELD(bbrc),
231 ATL_XSTATS_FIELD(bbtc),
232 /* Ingress Common Counters */
233 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
234 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
235 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
236 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
237 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
238 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
239 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
240 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
241 /* Ingress SA Counters */
242 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
243 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
244 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
245 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
246 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
247 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
248 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
249 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
250 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
251 /* Egress Common Counters */
252 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
253 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
254 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
255 ATL_MACSEC_XSTATS_FIELD(out_too_long),
256 /* Egress SC Counters */
257 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
258 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
259 /* Egress SA Counters */
260 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
261 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
262 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
263 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
266 static const struct eth_dev_ops atl_eth_dev_ops = {
267 .dev_configure = atl_dev_configure,
268 .dev_start = atl_dev_start,
269 .dev_stop = atl_dev_stop,
270 .dev_set_link_up = atl_dev_set_link_up,
271 .dev_set_link_down = atl_dev_set_link_down,
272 .dev_close = atl_dev_close,
273 .dev_reset = atl_dev_reset,
276 .promiscuous_enable = atl_dev_promiscuous_enable,
277 .promiscuous_disable = atl_dev_promiscuous_disable,
278 .allmulticast_enable = atl_dev_allmulticast_enable,
279 .allmulticast_disable = atl_dev_allmulticast_disable,
282 .link_update = atl_dev_link_update,
284 .get_reg = atl_dev_get_regs,
287 .stats_get = atl_dev_stats_get,
288 .xstats_get = atl_dev_xstats_get,
289 .xstats_get_names = atl_dev_xstats_get_names,
290 .stats_reset = atl_dev_stats_reset,
291 .xstats_reset = atl_dev_stats_reset,
293 .fw_version_get = atl_fw_version_get,
294 .dev_infos_get = atl_dev_info_get,
295 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
297 .mtu_set = atl_dev_mtu_set,
300 .vlan_filter_set = atl_vlan_filter_set,
301 .vlan_offload_set = atl_vlan_offload_set,
302 .vlan_tpid_set = atl_vlan_tpid_set,
303 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
306 .rx_queue_start = atl_rx_queue_start,
307 .rx_queue_stop = atl_rx_queue_stop,
308 .rx_queue_setup = atl_rx_queue_setup,
309 .rx_queue_release = atl_rx_queue_release,
311 .tx_queue_start = atl_tx_queue_start,
312 .tx_queue_stop = atl_tx_queue_stop,
313 .tx_queue_setup = atl_tx_queue_setup,
314 .tx_queue_release = atl_tx_queue_release,
316 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
317 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
319 .rx_queue_count = atl_rx_queue_count,
320 .rx_descriptor_status = atl_dev_rx_descriptor_status,
321 .tx_descriptor_status = atl_dev_tx_descriptor_status,
324 .get_eeprom_length = atl_dev_get_eeprom_length,
325 .get_eeprom = atl_dev_get_eeprom,
326 .set_eeprom = atl_dev_set_eeprom,
329 .flow_ctrl_get = atl_flow_ctrl_get,
330 .flow_ctrl_set = atl_flow_ctrl_set,
333 .mac_addr_add = atl_add_mac_addr,
334 .mac_addr_remove = atl_remove_mac_addr,
335 .mac_addr_set = atl_set_default_mac_addr,
336 .set_mc_addr_list = atl_dev_set_mc_addr_list,
337 .rxq_info_get = atl_rxq_info_get,
338 .txq_info_get = atl_txq_info_get,
340 .reta_update = atl_reta_update,
341 .reta_query = atl_reta_query,
342 .rss_hash_update = atl_rss_hash_update,
343 .rss_hash_conf_get = atl_rss_hash_conf_get,
346 static inline int32_t
347 atl_reset_hw(struct aq_hw_s *hw)
349 return hw_atl_b0_hw_reset(hw);
353 atl_enable_intr(struct rte_eth_dev *dev)
355 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
357 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
361 atl_disable_intr(struct aq_hw_s *hw)
363 PMD_INIT_FUNC_TRACE();
364 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
368 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
370 struct atl_adapter *adapter = eth_dev->data->dev_private;
371 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
372 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
373 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
376 PMD_INIT_FUNC_TRACE();
378 eth_dev->dev_ops = &atl_eth_dev_ops;
379 eth_dev->rx_pkt_burst = &atl_recv_pkts;
380 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
381 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
383 /* For secondary processes, the primary process has done all the work */
384 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
387 /* Vendor and Device ID need to be set before init of shared code */
388 hw->device_id = pci_dev->id.device_id;
389 hw->vendor_id = pci_dev->id.vendor_id;
390 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
392 /* Hardware configuration - hardcode */
393 adapter->hw_cfg.is_lro = false;
394 adapter->hw_cfg.wol = false;
395 adapter->hw_cfg.is_rss = false;
396 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
398 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
404 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
405 adapter->hw_cfg.aq_rss.indirection_table_size =
406 HW_ATL_B0_RSS_REDIRECTION_MAX;
408 hw->aq_nic_cfg = &adapter->hw_cfg;
410 /* disable interrupt */
411 atl_disable_intr(hw);
413 /* Allocate memory for storing MAC addresses */
414 eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 PMD_INIT_LOG(ERR, "MAC Malloc failed");
421 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
425 /* Copy the permanent MAC address */
426 if (hw->aq_fw_ops->get_mac_permanent(hw,
427 eth_dev->data->mac_addrs->addr_bytes) != 0)
430 /* Reset the hw statistics */
431 atl_dev_stats_reset(eth_dev);
433 rte_intr_callback_register(intr_handle,
434 atl_dev_interrupt_handler, eth_dev);
436 /* enable uio/vfio intr/eventfd mapping */
437 rte_intr_enable(intr_handle);
439 /* enable support intr */
440 atl_enable_intr(eth_dev);
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
448 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
452 PMD_INIT_FUNC_TRACE();
454 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
457 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
459 if (hw->adapter_stopped == 0)
460 atl_dev_close(eth_dev);
462 eth_dev->dev_ops = NULL;
463 eth_dev->rx_pkt_burst = NULL;
464 eth_dev->tx_pkt_burst = NULL;
466 /* disable uio intr before callback unregister */
467 rte_intr_disable(intr_handle);
468 rte_intr_callback_unregister(intr_handle,
469 atl_dev_interrupt_handler, eth_dev);
471 rte_free(eth_dev->data->mac_addrs);
472 eth_dev->data->mac_addrs = NULL;
478 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
479 struct rte_pci_device *pci_dev)
481 return rte_eth_dev_pci_generic_probe(pci_dev,
482 sizeof(struct atl_adapter), eth_atl_dev_init);
486 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
488 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
492 atl_dev_configure(struct rte_eth_dev *dev)
494 struct atl_interrupt *intr =
495 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
497 PMD_INIT_FUNC_TRACE();
499 /* set flag to update link status after init */
500 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
506 * Configure device link speed and setup link.
507 * It returns 0 on success.
510 atl_dev_start(struct rte_eth_dev *dev)
512 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
514 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
515 uint32_t intr_vector = 0;
519 PMD_INIT_FUNC_TRACE();
521 /* set adapter started */
522 hw->adapter_stopped = 0;
524 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
526 "Invalid link_speeds for port %u, fix speed not supported",
531 /* disable uio/vfio intr/eventfd mapping */
532 rte_intr_disable(intr_handle);
534 /* reinitialize adapter
535 * this calls reset and start
537 status = atl_reset_hw(hw);
541 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
543 hw_atl_b0_hw_start(hw);
544 /* check and configure queue intr-vector mapping */
545 if ((rte_intr_cap_multiple(intr_handle) ||
546 !RTE_ETH_DEV_SRIOV(dev).active) &&
547 dev->data->dev_conf.intr_conf.rxq != 0) {
548 intr_vector = dev->data->nb_rx_queues;
549 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
550 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
551 ATL_MAX_INTR_QUEUE_NUM);
554 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
555 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
560 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
561 intr_handle->intr_vec = rte_zmalloc("intr_vec",
562 dev->data->nb_rx_queues * sizeof(int), 0);
563 if (intr_handle->intr_vec == NULL) {
564 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
565 " intr_vec", dev->data->nb_rx_queues);
570 /* initialize transmission unit */
573 /* This can fail when allocating mbufs for descriptor rings */
574 err = atl_rx_init(dev);
576 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
580 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
581 hw->fw_ver_actual >> 24,
582 (hw->fw_ver_actual >> 16) & 0xFF,
583 hw->fw_ver_actual & 0xFFFF);
584 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
586 err = atl_start_queues(dev);
588 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
592 err = atl_dev_set_link_up(dev);
594 err = hw->aq_fw_ops->update_link_status(hw);
599 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
601 if (rte_intr_allow_others(intr_handle)) {
602 /* check if lsc interrupt is enabled */
603 if (dev->data->dev_conf.intr_conf.lsc != 0)
604 atl_dev_lsc_interrupt_setup(dev, true);
606 atl_dev_lsc_interrupt_setup(dev, false);
608 rte_intr_callback_unregister(intr_handle,
609 atl_dev_interrupt_handler, dev);
610 if (dev->data->dev_conf.intr_conf.lsc != 0)
611 PMD_INIT_LOG(INFO, "lsc won't enable because of"
612 " no intr multiplex");
615 /* check if rxq interrupt is enabled */
616 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
617 rte_intr_dp_is_en(intr_handle))
618 atl_dev_rxq_interrupt_setup(dev);
620 /* enable uio/vfio intr/eventfd mapping */
621 rte_intr_enable(intr_handle);
623 /* resume enabled intr since hw reset */
624 atl_enable_intr(dev);
629 atl_stop_queues(dev);
634 * Stop device: disable rx and tx functions to allow for reconfiguring.
637 atl_dev_stop(struct rte_eth_dev *dev)
639 struct rte_eth_link link;
641 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
643 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
645 PMD_INIT_FUNC_TRACE();
647 /* disable interrupts */
648 atl_disable_intr(hw);
652 hw->adapter_stopped = 1;
654 atl_stop_queues(dev);
656 /* Clear stored conf */
657 dev->data->scattered_rx = 0;
660 /* Clear recorded link status */
661 memset(&link, 0, sizeof(link));
662 rte_eth_linkstatus_set(dev, &link);
664 if (!rte_intr_allow_others(intr_handle))
665 /* resume to the default handler */
666 rte_intr_callback_register(intr_handle,
667 atl_dev_interrupt_handler,
670 /* Clean datapath event and queue/vec mapping */
671 rte_intr_efd_disable(intr_handle);
672 if (intr_handle->intr_vec != NULL) {
673 rte_free(intr_handle->intr_vec);
674 intr_handle->intr_vec = NULL;
679 * Set device link up: enable tx.
682 atl_dev_set_link_up(struct rte_eth_dev *dev)
684 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
686 uint32_t speed_mask = 0;
688 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
689 speed_mask = hw->aq_nic_cfg->link_speed_msk;
691 if (link_speeds & ETH_LINK_SPEED_10G)
692 speed_mask |= AQ_NIC_RATE_10G;
693 if (link_speeds & ETH_LINK_SPEED_5G)
694 speed_mask |= AQ_NIC_RATE_5G;
695 if (link_speeds & ETH_LINK_SPEED_1G)
696 speed_mask |= AQ_NIC_RATE_1G;
697 if (link_speeds & ETH_LINK_SPEED_2_5G)
698 speed_mask |= AQ_NIC_RATE_2G5;
699 if (link_speeds & ETH_LINK_SPEED_100M)
700 speed_mask |= AQ_NIC_RATE_100M;
703 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
707 * Set device link down: disable tx.
710 atl_dev_set_link_down(struct rte_eth_dev *dev)
712 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
714 return hw->aq_fw_ops->set_link_speed(hw, 0);
718 * Reset and stop device.
721 atl_dev_close(struct rte_eth_dev *dev)
723 PMD_INIT_FUNC_TRACE();
727 atl_free_queues(dev);
731 atl_dev_reset(struct rte_eth_dev *dev)
735 ret = eth_atl_dev_uninit(dev);
739 ret = eth_atl_dev_init(dev);
745 atl_dev_configure_macsec(struct rte_eth_dev *dev)
747 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
748 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
749 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
750 struct macsec_msg_fw_request msg_macsec;
751 struct macsec_msg_fw_response response;
753 if (!aqcfg->common.macsec_enabled ||
754 hw->aq_fw_ops->send_macsec_req == NULL)
757 memset(&msg_macsec, 0, sizeof(msg_macsec));
759 /* Creating set of sc/sa structures from parameters provided by DPDK */
761 /* Configure macsec */
762 msg_macsec.msg_type = macsec_cfg_msg;
763 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
764 msg_macsec.cfg.interrupts_enabled = 1;
766 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
771 memset(&msg_macsec, 0, sizeof(msg_macsec));
773 /* Configure TX SC */
775 msg_macsec.msg_type = macsec_add_tx_sc_msg;
776 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
777 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
779 /* MAC addr for TX */
780 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
781 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
782 msg_macsec.txsc.sa_mask = 0x3f;
784 msg_macsec.txsc.da_mask = 0;
785 msg_macsec.txsc.tci = 0x0B;
786 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
789 * Creating SCI (Secure Channel Identifier).
790 * SCI constructed from Source MAC and Port identifier
792 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
793 (msg_macsec.txsc.mac_sa[0] >> 16);
794 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
796 uint32_t port_identifier = 1;
798 msg_macsec.txsc.sci[1] = sci_hi_part;
799 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
801 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
806 memset(&msg_macsec, 0, sizeof(msg_macsec));
808 /* Configure RX SC */
810 msg_macsec.msg_type = macsec_add_rx_sc_msg;
811 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
812 msg_macsec.rxsc.replay_protect =
813 aqcfg->common.replay_protection_enabled;
814 msg_macsec.rxsc.anti_replay_window = 0;
816 /* MAC addr for RX */
817 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
818 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
819 msg_macsec.rxsc.da_mask = 0;//0x3f;
821 msg_macsec.rxsc.sa_mask = 0;
823 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
828 memset(&msg_macsec, 0, sizeof(msg_macsec));
830 /* Configure RX SC */
832 msg_macsec.msg_type = macsec_add_tx_sa_msg;
833 msg_macsec.txsa.index = aqcfg->txsa.idx;
834 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
836 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
837 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
838 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
839 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
841 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
846 memset(&msg_macsec, 0, sizeof(msg_macsec));
848 /* Configure RX SA */
850 msg_macsec.msg_type = macsec_add_rx_sa_msg;
851 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
852 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
854 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
855 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
856 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
857 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
859 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
867 int atl_macsec_enable(struct rte_eth_dev *dev,
868 uint8_t encr, uint8_t repl_prot)
870 struct aq_hw_cfg_s *cfg =
871 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
873 cfg->aq_macsec.common.macsec_enabled = 1;
874 cfg->aq_macsec.common.encryption_enabled = encr;
875 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
880 int atl_macsec_disable(struct rte_eth_dev *dev)
882 struct aq_hw_cfg_s *cfg =
883 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
885 cfg->aq_macsec.common.macsec_enabled = 0;
890 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
892 struct aq_hw_cfg_s *cfg =
893 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
895 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
896 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
902 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
903 uint8_t *mac, uint16_t pi)
905 struct aq_hw_cfg_s *cfg =
906 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
908 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
909 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
911 cfg->aq_macsec.rxsc.pi = pi;
916 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
917 uint8_t idx, uint8_t an,
918 uint32_t pn, uint8_t *key)
920 struct aq_hw_cfg_s *cfg =
921 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
923 cfg->aq_macsec.txsa.idx = idx;
924 cfg->aq_macsec.txsa.pn = pn;
925 cfg->aq_macsec.txsa.an = an;
927 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
931 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
932 uint8_t idx, uint8_t an,
933 uint32_t pn, uint8_t *key)
935 struct aq_hw_cfg_s *cfg =
936 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
938 cfg->aq_macsec.rxsa.idx = idx;
939 cfg->aq_macsec.rxsa.pn = pn;
940 cfg->aq_macsec.rxsa.an = an;
942 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
947 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
949 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
950 struct aq_hw_s *hw = &adapter->hw;
951 struct atl_sw_stats *swstats = &adapter->sw_stats;
954 hw->aq_fw_ops->update_stats(hw);
956 /* Fill out the rte_eth_stats statistics structure */
957 stats->ipackets = hw->curr_stats.dma_pkt_rc;
958 stats->ibytes = hw->curr_stats.dma_oct_rc;
959 stats->imissed = hw->curr_stats.dpc;
960 stats->ierrors = hw->curr_stats.erpt;
962 stats->opackets = hw->curr_stats.dma_pkt_tc;
963 stats->obytes = hw->curr_stats.dma_oct_tc;
966 stats->rx_nombuf = swstats->rx_nombuf;
968 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
969 stats->q_ipackets[i] = swstats->q_ipackets[i];
970 stats->q_opackets[i] = swstats->q_opackets[i];
971 stats->q_ibytes[i] = swstats->q_ibytes[i];
972 stats->q_obytes[i] = swstats->q_obytes[i];
973 stats->q_errors[i] = swstats->q_errors[i];
979 atl_dev_stats_reset(struct rte_eth_dev *dev)
981 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
982 struct aq_hw_s *hw = &adapter->hw;
984 hw->aq_fw_ops->update_stats(hw);
986 /* Reset software totals */
987 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
989 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
995 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
997 struct atl_adapter *adapter =
998 (struct atl_adapter *)dev->data->dev_private;
1000 struct aq_hw_s *hw = &adapter->hw;
1001 unsigned int i, count = 0;
1003 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1004 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1005 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1015 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1016 struct rte_eth_xstat_name *xstats_names,
1020 unsigned int count = atl_dev_xstats_get_count(dev);
1023 for (i = 0; i < size && i < count; i++) {
1024 snprintf(xstats_names[i].name,
1025 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1026 atl_xstats_tbl[i].name);
1034 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1037 struct atl_adapter *adapter = dev->data->dev_private;
1038 struct aq_hw_s *hw = &adapter->hw;
1039 struct get_stats req = { 0 };
1040 struct macsec_msg_fw_request msg = { 0 };
1041 struct macsec_msg_fw_response resp = { 0 };
1044 unsigned int count = atl_dev_xstats_get_count(dev);
1049 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1050 req.ingress_sa_index = 0xff;
1051 req.egress_sc_index = 0xff;
1052 req.egress_sa_index = 0xff;
1054 msg.msg_type = macsec_get_stats_msg;
1057 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1060 for (i = 0; i < n && i < count; i++) {
1063 switch (atl_xstats_tbl[i].type) {
1064 case XSTATS_TYPE_MSM:
1065 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1066 atl_xstats_tbl[i].offset);
1068 case XSTATS_TYPE_MACSEC:
1071 *(u64 *)((uint8_t *)&resp.stats +
1072 atl_xstats_tbl[i].offset);
1082 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1084 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1085 uint32_t fw_ver = 0;
1086 unsigned int ret = 0;
1088 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1092 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1093 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1095 ret += 1; /* add string null-terminator */
1104 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1106 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1108 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1109 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1111 dev_info->min_rx_bufsize = 1024;
1112 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1113 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1114 dev_info->max_vfs = pci_dev->max_vfs;
1116 dev_info->max_hash_mac_addrs = 0;
1117 dev_info->max_vmdq_pools = 0;
1118 dev_info->vmdq_queue_num = 0;
1120 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1122 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1125 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1126 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1129 dev_info->default_txconf = (struct rte_eth_txconf) {
1130 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1133 dev_info->rx_desc_lim = rx_desc_lim;
1134 dev_info->tx_desc_lim = tx_desc_lim;
1136 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1137 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1138 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1140 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1141 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1142 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1143 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1148 static const uint32_t *
1149 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1151 static const uint32_t ptypes[] = {
1153 RTE_PTYPE_L2_ETHER_ARP,
1154 RTE_PTYPE_L2_ETHER_VLAN,
1164 if (dev->rx_pkt_burst == atl_recv_pkts)
1171 atl_dev_delayed_handler(void *param)
1173 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1175 atl_dev_configure_macsec(dev);
1179 /* return 0 means link status changed, -1 means not changed */
1181 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1183 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1184 struct rte_eth_link link, old;
1185 u32 fc = AQ_NIC_FC_OFF;
1188 link.link_status = ETH_LINK_DOWN;
1189 link.link_speed = 0;
1190 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1191 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1192 memset(&old, 0, sizeof(old));
1194 /* load old link status */
1195 rte_eth_linkstatus_get(dev, &old);
1197 /* read current link status */
1198 err = hw->aq_fw_ops->update_link_status(hw);
1203 if (hw->aq_link_status.mbps == 0) {
1204 /* write default (down) link status */
1205 rte_eth_linkstatus_set(dev, &link);
1206 if (link.link_status == old.link_status)
1211 link.link_status = ETH_LINK_UP;
1212 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1213 link.link_speed = hw->aq_link_status.mbps;
1215 rte_eth_linkstatus_set(dev, &link);
1217 if (link.link_status == old.link_status)
1220 /* Driver has to update flow control settings on RX block
1221 * on any link event.
1222 * We should query FW whether it negotiated FC.
1224 if (hw->aq_fw_ops->get_flow_control) {
1225 hw->aq_fw_ops->get_flow_control(hw, &fc);
1226 hw_atl_b0_set_fc(hw, fc, 0U);
1229 if (rte_eal_alarm_set(1000 * 1000,
1230 atl_dev_delayed_handler, (void *)dev) < 0)
1231 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1237 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1239 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1241 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1247 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1249 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1251 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1257 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1259 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1261 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1267 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1269 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1271 if (dev->data->promiscuous == 1)
1272 return 0; /* must remain in all_multicast mode */
1274 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1280 * It clears the interrupt causes and enables the interrupt.
1281 * It will be called once only during nic initialized.
1284 * Pointer to struct rte_eth_dev.
1286 * Enable or Disable.
1289 * - On success, zero.
1290 * - On failure, a negative value.
1294 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1296 atl_dev_link_status_print(dev);
1301 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1308 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1310 struct atl_interrupt *intr =
1311 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1312 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315 hw_atl_b0_hw_irq_read(hw, &cause);
1317 atl_disable_intr(hw);
1319 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1320 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1326 * It gets and then prints the link status.
1329 * Pointer to struct rte_eth_dev.
1332 * - On success, zero.
1333 * - On failure, a negative value.
1336 atl_dev_link_status_print(struct rte_eth_dev *dev)
1338 struct rte_eth_link link;
1340 memset(&link, 0, sizeof(link));
1341 rte_eth_linkstatus_get(dev, &link);
1342 if (link.link_status) {
1343 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1344 (int)(dev->data->port_id),
1345 (unsigned int)link.link_speed,
1346 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1347 "full-duplex" : "half-duplex");
1349 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1350 (int)(dev->data->port_id));
1356 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1358 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1359 pci_dev->addr.domain,
1361 pci_dev->addr.devid,
1362 pci_dev->addr.function);
1366 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1370 * It executes link_update after knowing an interrupt occurred.
1373 * Pointer to struct rte_eth_dev.
1376 * - On success, zero.
1377 * - On failure, a negative value.
1380 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1381 struct rte_intr_handle *intr_handle)
1383 struct atl_interrupt *intr =
1384 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1385 struct atl_adapter *adapter = dev->data->dev_private;
1386 struct aq_hw_s *hw = &adapter->hw;
1388 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1391 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1393 /* Notify userapp if link status changed */
1394 if (!atl_dev_link_update(dev, 0)) {
1395 atl_dev_link_status_print(dev);
1396 _rte_eth_dev_callback_process(dev,
1397 RTE_ETH_EVENT_INTR_LSC, NULL);
1399 if (hw->aq_fw_ops->send_macsec_req == NULL)
1402 /* Check macsec Keys expired */
1403 struct get_stats req = { 0 };
1404 struct macsec_msg_fw_request msg = { 0 };
1405 struct macsec_msg_fw_response resp = { 0 };
1407 req.ingress_sa_index = 0x0;
1408 req.egress_sc_index = 0x0;
1409 req.egress_sa_index = 0x0;
1410 msg.msg_type = macsec_get_stats_msg;
1413 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1415 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1418 if (resp.stats.egress_threshold_expired ||
1419 resp.stats.ingress_threshold_expired ||
1420 resp.stats.egress_expired ||
1421 resp.stats.ingress_expired) {
1422 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1423 _rte_eth_dev_callback_process(dev,
1424 RTE_ETH_EVENT_MACSEC, NULL);
1428 atl_enable_intr(dev);
1429 rte_intr_ack(intr_handle);
1435 * Interrupt handler triggered by NIC for handling
1436 * specific interrupt.
1439 * Pointer to interrupt handle.
1441 * The address of parameter (struct rte_eth_dev *) regsitered before.
1447 atl_dev_interrupt_handler(void *param)
1449 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1451 atl_dev_interrupt_get_status(dev);
1452 atl_dev_interrupt_action(dev, dev->intr_handle);
1457 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1459 return SFP_EEPROM_SIZE;
1462 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1463 struct rte_dev_eeprom_info *eeprom)
1465 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1466 uint32_t dev_addr = SMBUS_DEVICE_ID;
1468 if (hw->aq_fw_ops->get_eeprom == NULL)
1471 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1472 eeprom->data == NULL)
1475 if (eeprom->magic > 0x7F)
1479 dev_addr = eeprom->magic;
1481 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1482 eeprom->length, eeprom->offset);
1485 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1486 struct rte_dev_eeprom_info *eeprom)
1488 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489 uint32_t dev_addr = SMBUS_DEVICE_ID;
1491 if (hw->aq_fw_ops->set_eeprom == NULL)
1494 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1495 eeprom->data == NULL)
1498 if (eeprom->magic > 0x7F)
1502 dev_addr = eeprom->magic;
1504 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1505 eeprom->length, eeprom->offset);
1509 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1511 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1515 if (regs->data == NULL) {
1516 regs->length = hw_atl_utils_hw_get_reg_length();
1517 regs->width = sizeof(u32);
1521 /* Only full register dump is supported */
1522 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1525 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1527 /* Device version */
1528 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1529 regs->version = mif_id & 0xFFU;
1535 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1537 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1538 u32 fc = AQ_NIC_FC_OFF;
1540 if (hw->aq_fw_ops->get_flow_control == NULL)
1543 hw->aq_fw_ops->get_flow_control(hw, &fc);
1545 if (fc == AQ_NIC_FC_OFF)
1546 fc_conf->mode = RTE_FC_NONE;
1547 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1548 fc_conf->mode = RTE_FC_FULL;
1549 else if (fc & AQ_NIC_FC_RX)
1550 fc_conf->mode = RTE_FC_RX_PAUSE;
1551 else if (fc & AQ_NIC_FC_TX)
1552 fc_conf->mode = RTE_FC_TX_PAUSE;
1558 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1560 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1561 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1564 if (hw->aq_fw_ops->set_flow_control == NULL)
1567 if (fc_conf->mode == RTE_FC_NONE)
1568 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1569 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1570 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1571 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1572 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1573 else if (fc_conf->mode == RTE_FC_FULL)
1574 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1576 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1577 return hw->aq_fw_ops->set_flow_control(hw);
1583 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1584 u8 *mac_addr, bool enable)
1586 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1587 unsigned int h = 0U;
1588 unsigned int l = 0U;
1592 h = (mac_addr[0] << 8) | (mac_addr[1]);
1593 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1594 (mac_addr[4] << 8) | mac_addr[5];
1597 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1598 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1599 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1602 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1604 err = aq_hw_err_from_flags(hw);
1610 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1611 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1613 if (rte_is_zero_ether_addr(mac_addr)) {
1614 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1618 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1622 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1624 atl_update_mac_addr(dev, index, NULL, false);
1628 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1630 atl_remove_mac_addr(dev, 0);
1631 atl_add_mac_addr(dev, addr, 0, 0);
1636 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1638 struct rte_eth_dev_info dev_info;
1640 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1642 ret = atl_dev_info_get(dev, &dev_info);
1646 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1649 /* update max frame size */
1650 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1656 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1658 struct aq_hw_cfg_s *cfg =
1659 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1660 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1664 PMD_INIT_FUNC_TRACE();
1666 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1667 if (cfg->vlan_filter[i] == vlan_id) {
1669 /* Disable VLAN filter. */
1670 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1672 /* Clear VLAN filter entry */
1673 cfg->vlan_filter[i] = 0;
1679 /* VLAN_ID was not found. So, nothing to delete. */
1680 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1683 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1684 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1687 /* Try to found free VLAN filter to add new VLAN_ID */
1688 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1689 if (cfg->vlan_filter[i] == 0)
1693 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1694 /* We have no free VLAN filter to add new VLAN_ID*/
1699 cfg->vlan_filter[i] = vlan_id;
1700 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1701 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1702 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1705 /* Enable VLAN promisc mode if vlan_filter empty */
1706 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1707 if (cfg->vlan_filter[i] != 0)
1711 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1717 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1719 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720 struct aq_hw_cfg_s *cfg =
1721 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1724 PMD_INIT_FUNC_TRACE();
1726 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1727 if (cfg->vlan_filter[i])
1728 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1734 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1736 struct aq_hw_cfg_s *cfg =
1737 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1738 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1742 PMD_INIT_FUNC_TRACE();
1744 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1746 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1748 for (i = 0; i < dev->data->nb_rx_queues; i++)
1749 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1751 if (mask & ETH_VLAN_EXTEND_MASK)
1758 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1761 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1764 PMD_INIT_FUNC_TRACE();
1766 switch (vlan_type) {
1767 case ETH_VLAN_TYPE_INNER:
1768 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1770 case ETH_VLAN_TYPE_OUTER:
1771 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1774 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1782 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1784 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1786 PMD_INIT_FUNC_TRACE();
1788 if (queue_id > dev->data->nb_rx_queues) {
1789 PMD_DRV_LOG(ERR, "Invalid queue id");
1793 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1797 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1798 struct rte_ether_addr *mc_addr_set,
1799 uint32_t nb_mc_addr)
1801 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1807 /* Update whole uc filters table */
1808 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1809 u8 *mac_addr = NULL;
1812 if (i < nb_mc_addr) {
1813 mac_addr = mc_addr_set[i].addr_bytes;
1814 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1815 (mac_addr[4] << 8) | mac_addr[5];
1816 h = (mac_addr[0] << 8) | mac_addr[1];
1819 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1820 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1821 HW_ATL_B0_MAC_MIN + i);
1822 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1823 HW_ATL_B0_MAC_MIN + i);
1824 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1825 HW_ATL_B0_MAC_MIN + i);
1832 atl_reta_update(struct rte_eth_dev *dev,
1833 struct rte_eth_rss_reta_entry64 *reta_conf,
1837 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1838 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1840 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1841 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1842 dev->data->nb_rx_queues - 1);
1844 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1849 atl_reta_query(struct rte_eth_dev *dev,
1850 struct rte_eth_rss_reta_entry64 *reta_conf,
1854 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1856 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1857 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1858 reta_conf->mask = ~0U;
1863 atl_rss_hash_update(struct rte_eth_dev *dev,
1864 struct rte_eth_rss_conf *rss_conf)
1866 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867 struct aq_hw_cfg_s *cfg =
1868 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1869 static u8 def_rss_key[40] = {
1870 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1871 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1872 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1873 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1874 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1877 cfg->is_rss = !!rss_conf->rss_hf;
1878 if (rss_conf->rss_key) {
1879 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1880 rss_conf->rss_key_len);
1881 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1883 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1884 sizeof(def_rss_key));
1885 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1888 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1889 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1894 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1895 struct rte_eth_rss_conf *rss_conf)
1897 struct aq_hw_cfg_s *cfg =
1898 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1900 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1901 if (rss_conf->rss_key) {
1902 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1903 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1904 rss_conf->rss_key_len);
1911 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1913 if (strcmp(dev->device->driver->name, drv->driver.name))
1920 is_atlantic_supported(struct rte_eth_dev *dev)
1922 return is_device_supported(dev, &rte_atl_pmd);
1925 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1926 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1927 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1929 RTE_INIT(atl_init_log)
1931 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1932 if (atl_logtype_init >= 0)
1933 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1934 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1935 if (atl_logtype_driver >= 0)
1936 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);