1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
8 #include "atl_ethdev.h"
9 #include "atl_common.h"
10 #include "atl_hw_regs.h"
12 #include "hw_atl/hw_atl_llh.h"
13 #include "hw_atl/hw_atl_b0.h"
14 #include "hw_atl/hw_atl_b0_internal.h"
16 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
17 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 static int atl_dev_configure(struct rte_eth_dev *dev);
20 static int atl_dev_start(struct rte_eth_dev *dev);
21 static void atl_dev_stop(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
24 static void atl_dev_close(struct rte_eth_dev *dev);
25 static int atl_dev_reset(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
30 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
33 struct rte_eth_xstat_name *xstats_names,
36 static int atl_dev_stats_get(struct rte_eth_dev *dev,
37 struct rte_eth_stats *stats);
39 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
40 struct rte_eth_xstat *stats, unsigned int n);
42 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
47 static void atl_dev_info_get(struct rte_eth_dev *dev,
48 struct rte_eth_dev_info *dev_info);
50 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
55 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
56 uint16_t vlan_id, int on);
58 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
61 uint16_t queue_id, int on);
63 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
64 enum rte_vlan_type vlan_type, uint16_t tpid);
67 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
68 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
69 struct rte_dev_eeprom_info *eeprom);
70 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
71 struct rte_dev_eeprom_info *eeprom);
74 static int atl_dev_get_regs(struct rte_eth_dev *dev,
75 struct rte_dev_reg_info *regs);
78 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
80 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
81 struct rte_eth_fc_conf *fc_conf);
83 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
86 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
87 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
89 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
90 struct rte_intr_handle *handle);
91 static void atl_dev_interrupt_handler(void *param);
94 static int atl_add_mac_addr(struct rte_eth_dev *dev,
95 struct ether_addr *mac_addr,
96 uint32_t index, uint32_t pool);
97 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
98 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
99 struct ether_addr *mac_addr);
101 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
102 struct ether_addr *mc_addr_set,
103 uint32_t nb_mc_addr);
106 static int atl_reta_update(struct rte_eth_dev *dev,
107 struct rte_eth_rss_reta_entry64 *reta_conf,
109 static int atl_reta_query(struct rte_eth_dev *dev,
110 struct rte_eth_rss_reta_entry64 *reta_conf,
112 static int atl_rss_hash_update(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
114 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
115 struct rte_eth_rss_conf *rss_conf);
118 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
119 struct rte_pci_device *pci_dev);
120 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 static void atl_dev_info_get(struct rte_eth_dev *dev,
123 struct rte_eth_dev_info *dev_info);
125 int atl_logtype_init;
126 int atl_logtype_driver;
129 * The set of PCI devices this driver supports
131 static const struct rte_pci_id pci_id_atl_map[] = {
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
150 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
153 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
154 { .vendor_id = 0, /* sentinel */ },
157 static struct rte_pci_driver rte_atl_pmd = {
158 .id_table = pci_id_atl_map,
159 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
160 RTE_PCI_DRV_IOVA_AS_VA,
161 .probe = eth_atl_pci_probe,
162 .remove = eth_atl_pci_remove,
165 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
166 | DEV_RX_OFFLOAD_IPV4_CKSUM \
167 | DEV_RX_OFFLOAD_UDP_CKSUM \
168 | DEV_RX_OFFLOAD_TCP_CKSUM \
169 | DEV_RX_OFFLOAD_JUMBO_FRAME \
170 | DEV_RX_OFFLOAD_MACSEC_STRIP \
171 | DEV_RX_OFFLOAD_VLAN_FILTER)
173 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
174 | DEV_TX_OFFLOAD_IPV4_CKSUM \
175 | DEV_TX_OFFLOAD_UDP_CKSUM \
176 | DEV_TX_OFFLOAD_TCP_CKSUM \
177 | DEV_TX_OFFLOAD_TCP_TSO \
178 | DEV_TX_OFFLOAD_MACSEC_INSERT \
179 | DEV_TX_OFFLOAD_MULTI_SEGS)
181 static const struct rte_eth_desc_lim rx_desc_lim = {
182 .nb_max = ATL_MAX_RING_DESC,
183 .nb_min = ATL_MIN_RING_DESC,
184 .nb_align = ATL_RXD_ALIGN,
187 static const struct rte_eth_desc_lim tx_desc_lim = {
188 .nb_max = ATL_MAX_RING_DESC,
189 .nb_min = ATL_MIN_RING_DESC,
190 .nb_align = ATL_TXD_ALIGN,
191 .nb_seg_max = ATL_TX_MAX_SEG,
192 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
195 #define ATL_XSTATS_FIELD(name) { \
197 offsetof(struct aq_stats_s, name) \
200 struct atl_xstats_tbl_s {
205 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
206 ATL_XSTATS_FIELD(uprc),
207 ATL_XSTATS_FIELD(mprc),
208 ATL_XSTATS_FIELD(bprc),
209 ATL_XSTATS_FIELD(erpt),
210 ATL_XSTATS_FIELD(uptc),
211 ATL_XSTATS_FIELD(mptc),
212 ATL_XSTATS_FIELD(bptc),
213 ATL_XSTATS_FIELD(erpr),
214 ATL_XSTATS_FIELD(ubrc),
215 ATL_XSTATS_FIELD(ubtc),
216 ATL_XSTATS_FIELD(mbrc),
217 ATL_XSTATS_FIELD(mbtc),
218 ATL_XSTATS_FIELD(bbrc),
219 ATL_XSTATS_FIELD(bbtc),
222 static const struct eth_dev_ops atl_eth_dev_ops = {
223 .dev_configure = atl_dev_configure,
224 .dev_start = atl_dev_start,
225 .dev_stop = atl_dev_stop,
226 .dev_set_link_up = atl_dev_set_link_up,
227 .dev_set_link_down = atl_dev_set_link_down,
228 .dev_close = atl_dev_close,
229 .dev_reset = atl_dev_reset,
232 .promiscuous_enable = atl_dev_promiscuous_enable,
233 .promiscuous_disable = atl_dev_promiscuous_disable,
234 .allmulticast_enable = atl_dev_allmulticast_enable,
235 .allmulticast_disable = atl_dev_allmulticast_disable,
238 .link_update = atl_dev_link_update,
240 .get_reg = atl_dev_get_regs,
243 .stats_get = atl_dev_stats_get,
244 .xstats_get = atl_dev_xstats_get,
245 .xstats_get_names = atl_dev_xstats_get_names,
246 .stats_reset = atl_dev_stats_reset,
247 .xstats_reset = atl_dev_stats_reset,
249 .fw_version_get = atl_fw_version_get,
250 .dev_infos_get = atl_dev_info_get,
251 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
253 .mtu_set = atl_dev_mtu_set,
256 .vlan_filter_set = atl_vlan_filter_set,
257 .vlan_offload_set = atl_vlan_offload_set,
258 .vlan_tpid_set = atl_vlan_tpid_set,
259 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
262 .rx_queue_start = atl_rx_queue_start,
263 .rx_queue_stop = atl_rx_queue_stop,
264 .rx_queue_setup = atl_rx_queue_setup,
265 .rx_queue_release = atl_rx_queue_release,
267 .tx_queue_start = atl_tx_queue_start,
268 .tx_queue_stop = atl_tx_queue_stop,
269 .tx_queue_setup = atl_tx_queue_setup,
270 .tx_queue_release = atl_tx_queue_release,
272 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
273 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
275 .rx_queue_count = atl_rx_queue_count,
276 .rx_descriptor_status = atl_dev_rx_descriptor_status,
277 .tx_descriptor_status = atl_dev_tx_descriptor_status,
280 .get_eeprom_length = atl_dev_get_eeprom_length,
281 .get_eeprom = atl_dev_get_eeprom,
282 .set_eeprom = atl_dev_set_eeprom,
285 .flow_ctrl_get = atl_flow_ctrl_get,
286 .flow_ctrl_set = atl_flow_ctrl_set,
289 .mac_addr_add = atl_add_mac_addr,
290 .mac_addr_remove = atl_remove_mac_addr,
291 .mac_addr_set = atl_set_default_mac_addr,
292 .set_mc_addr_list = atl_dev_set_mc_addr_list,
293 .rxq_info_get = atl_rxq_info_get,
294 .txq_info_get = atl_txq_info_get,
296 .reta_update = atl_reta_update,
297 .reta_query = atl_reta_query,
298 .rss_hash_update = atl_rss_hash_update,
299 .rss_hash_conf_get = atl_rss_hash_conf_get,
302 static inline int32_t
303 atl_reset_hw(struct aq_hw_s *hw)
305 return hw_atl_b0_hw_reset(hw);
309 atl_enable_intr(struct rte_eth_dev *dev)
311 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
313 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
317 atl_disable_intr(struct aq_hw_s *hw)
319 PMD_INIT_FUNC_TRACE();
320 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
324 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
326 struct atl_adapter *adapter =
327 (struct atl_adapter *)eth_dev->data->dev_private;
328 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
329 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
330 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
333 PMD_INIT_FUNC_TRACE();
335 eth_dev->dev_ops = &atl_eth_dev_ops;
336 eth_dev->rx_pkt_burst = &atl_recv_pkts;
337 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
338 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
340 /* For secondary processes, the primary process has done all the work */
341 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
344 /* Vendor and Device ID need to be set before init of shared code */
345 hw->device_id = pci_dev->id.device_id;
346 hw->vendor_id = pci_dev->id.vendor_id;
347 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
349 /* Hardware configuration - hardcode */
350 adapter->hw_cfg.is_lro = false;
351 adapter->hw_cfg.wol = false;
352 adapter->hw_cfg.is_rss = false;
353 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
355 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
361 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
362 adapter->hw_cfg.aq_rss.indirection_table_size =
363 HW_ATL_B0_RSS_REDIRECTION_MAX;
365 hw->aq_nic_cfg = &adapter->hw_cfg;
367 /* disable interrupt */
368 atl_disable_intr(hw);
370 /* Allocate memory for storing MAC addresses */
371 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
372 if (eth_dev->data->mac_addrs == NULL) {
373 PMD_INIT_LOG(ERR, "MAC Malloc failed");
377 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
381 /* Copy the permanent MAC address */
382 if (hw->aq_fw_ops->get_mac_permanent(hw,
383 eth_dev->data->mac_addrs->addr_bytes) != 0)
386 /* Reset the hw statistics */
387 atl_dev_stats_reset(eth_dev);
389 rte_intr_callback_register(intr_handle,
390 atl_dev_interrupt_handler, eth_dev);
392 /* enable uio/vfio intr/eventfd mapping */
393 rte_intr_enable(intr_handle);
395 /* enable support intr */
396 atl_enable_intr(eth_dev);
402 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
404 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
405 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
408 PMD_INIT_FUNC_TRACE();
410 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
413 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
415 if (hw->adapter_stopped == 0)
416 atl_dev_close(eth_dev);
418 eth_dev->dev_ops = NULL;
419 eth_dev->rx_pkt_burst = NULL;
420 eth_dev->tx_pkt_burst = NULL;
422 /* disable uio intr before callback unregister */
423 rte_intr_disable(intr_handle);
424 rte_intr_callback_unregister(intr_handle,
425 atl_dev_interrupt_handler, eth_dev);
427 rte_free(eth_dev->data->mac_addrs);
428 eth_dev->data->mac_addrs = NULL;
434 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
435 struct rte_pci_device *pci_dev)
437 return rte_eth_dev_pci_generic_probe(pci_dev,
438 sizeof(struct atl_adapter), eth_atl_dev_init);
442 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
444 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
448 atl_dev_configure(struct rte_eth_dev *dev)
450 struct atl_interrupt *intr =
451 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
453 PMD_INIT_FUNC_TRACE();
455 /* set flag to update link status after init */
456 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
462 * Configure device link speed and setup link.
463 * It returns 0 on success.
466 atl_dev_start(struct rte_eth_dev *dev)
468 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
469 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
470 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
471 uint32_t intr_vector = 0;
475 PMD_INIT_FUNC_TRACE();
477 /* set adapter started */
478 hw->adapter_stopped = 0;
480 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
482 "Invalid link_speeds for port %u, fix speed not supported",
487 /* disable uio/vfio intr/eventfd mapping */
488 rte_intr_disable(intr_handle);
490 /* reinitialize adapter
491 * this calls reset and start
493 status = atl_reset_hw(hw);
497 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
499 hw_atl_b0_hw_start(hw);
500 /* check and configure queue intr-vector mapping */
501 if ((rte_intr_cap_multiple(intr_handle) ||
502 !RTE_ETH_DEV_SRIOV(dev).active) &&
503 dev->data->dev_conf.intr_conf.rxq != 0) {
504 intr_vector = dev->data->nb_rx_queues;
505 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
506 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
507 ATL_MAX_INTR_QUEUE_NUM);
510 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
511 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
516 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
517 intr_handle->intr_vec = rte_zmalloc("intr_vec",
518 dev->data->nb_rx_queues * sizeof(int), 0);
519 if (intr_handle->intr_vec == NULL) {
520 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
521 " intr_vec", dev->data->nb_rx_queues);
526 /* initialize transmission unit */
529 /* This can fail when allocating mbufs for descriptor rings */
530 err = atl_rx_init(dev);
532 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
536 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
537 hw->fw_ver_actual >> 24,
538 (hw->fw_ver_actual >> 16) & 0xFF,
539 hw->fw_ver_actual & 0xFFFF);
540 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
542 err = atl_start_queues(dev);
544 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
548 err = atl_dev_set_link_up(dev);
550 err = hw->aq_fw_ops->update_link_status(hw);
555 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
560 if (rte_intr_allow_others(intr_handle)) {
561 /* check if lsc interrupt is enabled */
562 if (dev->data->dev_conf.intr_conf.lsc != 0)
563 atl_dev_lsc_interrupt_setup(dev, true);
565 atl_dev_lsc_interrupt_setup(dev, false);
567 rte_intr_callback_unregister(intr_handle,
568 atl_dev_interrupt_handler, dev);
569 if (dev->data->dev_conf.intr_conf.lsc != 0)
570 PMD_INIT_LOG(INFO, "lsc won't enable because of"
571 " no intr multiplex");
574 /* check if rxq interrupt is enabled */
575 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
576 rte_intr_dp_is_en(intr_handle))
577 atl_dev_rxq_interrupt_setup(dev);
579 /* enable uio/vfio intr/eventfd mapping */
580 rte_intr_enable(intr_handle);
582 /* resume enabled intr since hw reset */
583 atl_enable_intr(dev);
588 atl_stop_queues(dev);
593 * Stop device: disable rx and tx functions to allow for reconfiguring.
596 atl_dev_stop(struct rte_eth_dev *dev)
598 struct rte_eth_link link;
600 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
602 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
604 PMD_INIT_FUNC_TRACE();
606 /* disable interrupts */
607 atl_disable_intr(hw);
611 hw->adapter_stopped = 1;
613 atl_stop_queues(dev);
615 /* Clear stored conf */
616 dev->data->scattered_rx = 0;
619 /* Clear recorded link status */
620 memset(&link, 0, sizeof(link));
621 rte_eth_linkstatus_set(dev, &link);
623 if (!rte_intr_allow_others(intr_handle))
624 /* resume to the default handler */
625 rte_intr_callback_register(intr_handle,
626 atl_dev_interrupt_handler,
629 /* Clean datapath event and queue/vec mapping */
630 rte_intr_efd_disable(intr_handle);
631 if (intr_handle->intr_vec != NULL) {
632 rte_free(intr_handle->intr_vec);
633 intr_handle->intr_vec = NULL;
638 * Set device link up: enable tx.
641 atl_dev_set_link_up(struct rte_eth_dev *dev)
643 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
645 uint32_t speed_mask = 0;
647 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
648 speed_mask = hw->aq_nic_cfg->link_speed_msk;
650 if (link_speeds & ETH_LINK_SPEED_10G)
651 speed_mask |= AQ_NIC_RATE_10G;
652 if (link_speeds & ETH_LINK_SPEED_5G)
653 speed_mask |= AQ_NIC_RATE_5G;
654 if (link_speeds & ETH_LINK_SPEED_1G)
655 speed_mask |= AQ_NIC_RATE_1G;
656 if (link_speeds & ETH_LINK_SPEED_2_5G)
657 speed_mask |= AQ_NIC_RATE_2G5;
658 if (link_speeds & ETH_LINK_SPEED_100M)
659 speed_mask |= AQ_NIC_RATE_100M;
662 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
666 * Set device link down: disable tx.
669 atl_dev_set_link_down(struct rte_eth_dev *dev)
671 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
673 return hw->aq_fw_ops->set_link_speed(hw, 0);
677 * Reset and stop device.
680 atl_dev_close(struct rte_eth_dev *dev)
682 PMD_INIT_FUNC_TRACE();
686 atl_free_queues(dev);
690 atl_dev_reset(struct rte_eth_dev *dev)
694 ret = eth_atl_dev_uninit(dev);
698 ret = eth_atl_dev_init(dev);
703 int atl_macsec_enable(struct rte_eth_dev *dev,
704 uint8_t encr, uint8_t repl_prot)
706 struct aq_hw_cfg_s *cfg =
707 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
709 cfg->aq_macsec.common.macsec_enabled = 1;
710 cfg->aq_macsec.common.encryption_enabled = encr;
711 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
716 int atl_macsec_disable(struct rte_eth_dev *dev)
718 struct aq_hw_cfg_s *cfg =
719 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
721 cfg->aq_macsec.common.macsec_enabled = 0;
726 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
728 struct aq_hw_cfg_s *cfg =
729 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
731 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
732 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
737 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
738 uint8_t *mac, uint16_t pi)
740 struct aq_hw_cfg_s *cfg =
741 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
743 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
744 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
745 cfg->aq_macsec.rxsc.pi = pi;
750 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
751 uint8_t idx, uint8_t an,
752 uint32_t pn, uint8_t *key)
754 struct aq_hw_cfg_s *cfg =
755 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
757 cfg->aq_macsec.txsa.idx = idx;
758 cfg->aq_macsec.txsa.pn = pn;
759 cfg->aq_macsec.txsa.an = an;
761 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
765 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
766 uint8_t idx, uint8_t an,
767 uint32_t pn, uint8_t *key)
769 struct aq_hw_cfg_s *cfg =
770 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
772 cfg->aq_macsec.rxsa.idx = idx;
773 cfg->aq_macsec.rxsa.pn = pn;
774 cfg->aq_macsec.rxsa.an = an;
776 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
781 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
783 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
784 struct aq_hw_s *hw = &adapter->hw;
785 struct atl_sw_stats *swstats = &adapter->sw_stats;
788 hw->aq_fw_ops->update_stats(hw);
790 /* Fill out the rte_eth_stats statistics structure */
791 stats->ipackets = hw->curr_stats.dma_pkt_rc;
792 stats->ibytes = hw->curr_stats.dma_oct_rc;
793 stats->imissed = hw->curr_stats.dpc;
794 stats->ierrors = hw->curr_stats.erpt;
796 stats->opackets = hw->curr_stats.dma_pkt_tc;
797 stats->obytes = hw->curr_stats.dma_oct_tc;
800 stats->rx_nombuf = swstats->rx_nombuf;
802 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
803 stats->q_ipackets[i] = swstats->q_ipackets[i];
804 stats->q_opackets[i] = swstats->q_opackets[i];
805 stats->q_ibytes[i] = swstats->q_ibytes[i];
806 stats->q_obytes[i] = swstats->q_obytes[i];
807 stats->q_errors[i] = swstats->q_errors[i];
813 atl_dev_stats_reset(struct rte_eth_dev *dev)
815 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
816 struct aq_hw_s *hw = &adapter->hw;
818 hw->aq_fw_ops->update_stats(hw);
820 /* Reset software totals */
821 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
823 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
827 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
828 struct rte_eth_xstat_name *xstats_names,
834 return RTE_DIM(atl_xstats_tbl);
836 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
837 strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
838 RTE_ETH_XSTATS_NAME_SIZE);
844 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
847 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
848 struct aq_hw_s *hw = &adapter->hw;
854 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
856 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
857 atl_xstats_tbl[i].offset);
864 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
866 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868 unsigned int ret = 0;
870 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
874 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
875 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
877 ret += 1; /* add string null-terminator */
886 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
888 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
890 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
891 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
893 dev_info->min_rx_bufsize = 1024;
894 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
895 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
896 dev_info->max_vfs = pci_dev->max_vfs;
898 dev_info->max_hash_mac_addrs = 0;
899 dev_info->max_vmdq_pools = 0;
900 dev_info->vmdq_queue_num = 0;
902 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
904 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
907 dev_info->default_rxconf = (struct rte_eth_rxconf) {
908 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
911 dev_info->default_txconf = (struct rte_eth_txconf) {
912 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
915 dev_info->rx_desc_lim = rx_desc_lim;
916 dev_info->tx_desc_lim = tx_desc_lim;
918 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
919 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
920 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
922 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
923 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
924 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
925 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
928 static const uint32_t *
929 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
931 static const uint32_t ptypes[] = {
933 RTE_PTYPE_L2_ETHER_ARP,
934 RTE_PTYPE_L2_ETHER_VLAN,
944 if (dev->rx_pkt_burst == atl_recv_pkts)
950 /* return 0 means link status changed, -1 means not changed */
952 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
954 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955 struct atl_interrupt *intr =
956 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
957 struct rte_eth_link link, old;
960 link.link_status = ETH_LINK_DOWN;
962 link.link_duplex = ETH_LINK_FULL_DUPLEX;
963 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
964 memset(&old, 0, sizeof(old));
966 /* load old link status */
967 rte_eth_linkstatus_get(dev, &old);
969 /* read current link status */
970 err = hw->aq_fw_ops->update_link_status(hw);
975 if (hw->aq_link_status.mbps == 0) {
976 /* write default (down) link status */
977 rte_eth_linkstatus_set(dev, &link);
978 if (link.link_status == old.link_status)
983 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
985 link.link_status = ETH_LINK_UP;
986 link.link_duplex = ETH_LINK_FULL_DUPLEX;
987 link.link_speed = hw->aq_link_status.mbps;
989 rte_eth_linkstatus_set(dev, &link);
991 if (link.link_status == old.link_status)
998 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1000 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1002 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1006 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1008 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1010 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1014 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1016 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1018 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1022 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1024 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1026 if (dev->data->promiscuous == 1)
1027 return; /* must remain in all_multicast mode */
1029 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1033 * It clears the interrupt causes and enables the interrupt.
1034 * It will be called once only during nic initialized.
1037 * Pointer to struct rte_eth_dev.
1039 * Enable or Disable.
1042 * - On success, zero.
1043 * - On failure, a negative value.
1047 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1049 atl_dev_link_status_print(dev);
1054 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1061 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1063 struct atl_interrupt *intr =
1064 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1065 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1068 hw_atl_b0_hw_irq_read(hw, &cause);
1070 atl_disable_intr(hw);
1071 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
1072 ATL_FLAG_NEED_LINK_UPDATE : 0;
1078 * It gets and then prints the link status.
1081 * Pointer to struct rte_eth_dev.
1084 * - On success, zero.
1085 * - On failure, a negative value.
1088 atl_dev_link_status_print(struct rte_eth_dev *dev)
1090 struct rte_eth_link link;
1092 memset(&link, 0, sizeof(link));
1093 rte_eth_linkstatus_get(dev, &link);
1094 if (link.link_status) {
1095 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1096 (int)(dev->data->port_id),
1097 (unsigned int)link.link_speed,
1098 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1099 "full-duplex" : "half-duplex");
1101 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1102 (int)(dev->data->port_id));
1108 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1110 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1111 pci_dev->addr.domain,
1113 pci_dev->addr.devid,
1114 pci_dev->addr.function);
1118 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1122 * It executes link_update after knowing an interrupt occurred.
1125 * Pointer to struct rte_eth_dev.
1128 * - On success, zero.
1129 * - On failure, a negative value.
1132 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1133 struct rte_intr_handle *intr_handle)
1135 struct atl_interrupt *intr =
1136 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1138 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1139 atl_dev_link_update(dev, 0);
1140 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1141 atl_dev_link_status_print(dev);
1142 _rte_eth_dev_callback_process(dev,
1143 RTE_ETH_EVENT_INTR_LSC, NULL);
1146 atl_enable_intr(dev);
1147 rte_intr_enable(intr_handle);
1153 * Interrupt handler triggered by NIC for handling
1154 * specific interrupt.
1157 * Pointer to interrupt handle.
1159 * The address of parameter (struct rte_eth_dev *) regsitered before.
1165 atl_dev_interrupt_handler(void *param)
1167 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1169 atl_dev_interrupt_get_status(dev);
1170 atl_dev_interrupt_action(dev, dev->intr_handle);
1173 #define SFP_EEPROM_SIZE 0xff
1176 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1178 return SFP_EEPROM_SIZE;
1181 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1182 struct rte_dev_eeprom_info *eeprom)
1184 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1185 uint32_t dev_addr = SMBUS_DEVICE_ID;
1187 if (hw->aq_fw_ops->get_eeprom == NULL)
1190 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1191 eeprom->data == NULL)
1195 dev_addr = eeprom->magic;
1197 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1198 eeprom->length, eeprom->offset);
1201 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1202 struct rte_dev_eeprom_info *eeprom)
1204 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 uint32_t dev_addr = SMBUS_DEVICE_ID;
1207 if (hw->aq_fw_ops->set_eeprom == NULL)
1210 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1214 dev_addr = eeprom->magic;
1216 return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1217 eeprom->data, eeprom->length);
1221 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1223 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 if (regs->data == NULL) {
1228 regs->length = hw_atl_utils_hw_get_reg_length();
1229 regs->width = sizeof(u32);
1233 /* Only full register dump is supported */
1234 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1237 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1239 /* Device version */
1240 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1241 regs->version = mif_id & 0xFFU;
1247 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1249 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1251 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1252 fc_conf->mode = RTE_FC_NONE;
1253 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1254 fc_conf->mode = RTE_FC_FULL;
1255 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1256 fc_conf->mode = RTE_FC_RX_PAUSE;
1257 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1258 fc_conf->mode = RTE_FC_TX_PAUSE;
1264 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1266 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1267 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1270 if (hw->aq_fw_ops->set_flow_control == NULL)
1273 if (fc_conf->mode == RTE_FC_NONE)
1274 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1275 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1276 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1277 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1278 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1279 else if (fc_conf->mode == RTE_FC_FULL)
1280 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1282 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1283 return hw->aq_fw_ops->set_flow_control(hw);
1289 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1290 u8 *mac_addr, bool enable)
1292 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1293 unsigned int h = 0U;
1294 unsigned int l = 0U;
1298 h = (mac_addr[0] << 8) | (mac_addr[1]);
1299 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1300 (mac_addr[4] << 8) | mac_addr[5];
1303 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1304 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1305 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1308 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1310 err = aq_hw_err_from_flags(hw);
1316 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1317 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1319 if (is_zero_ether_addr(mac_addr)) {
1320 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1324 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1328 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1330 atl_update_mac_addr(dev, index, NULL, false);
1334 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1336 atl_remove_mac_addr(dev, 0);
1337 atl_add_mac_addr(dev, addr, 0, 0);
1342 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1344 struct rte_eth_dev_info dev_info;
1345 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1347 atl_dev_info_get(dev, &dev_info);
1349 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1352 /* update max frame size */
1353 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1359 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1361 struct aq_hw_cfg_s *cfg =
1362 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1363 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1367 PMD_INIT_FUNC_TRACE();
1369 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1370 if (cfg->vlan_filter[i] == vlan_id) {
1372 /* Disable VLAN filter. */
1373 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1375 /* Clear VLAN filter entry */
1376 cfg->vlan_filter[i] = 0;
1382 /* VLAN_ID was not found. So, nothing to delete. */
1383 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1386 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1387 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1390 /* Try to found free VLAN filter to add new VLAN_ID */
1391 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1392 if (cfg->vlan_filter[i] == 0)
1396 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1397 /* We have no free VLAN filter to add new VLAN_ID*/
1402 cfg->vlan_filter[i] = vlan_id;
1403 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1404 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1405 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1408 /* Enable VLAN promisc mode if vlan_filter empty */
1409 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1410 if (cfg->vlan_filter[i] != 0)
1414 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1420 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1422 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1423 struct aq_hw_cfg_s *cfg =
1424 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1427 PMD_INIT_FUNC_TRACE();
1429 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1430 if (cfg->vlan_filter[i])
1431 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1437 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1439 struct aq_hw_cfg_s *cfg =
1440 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1441 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1445 PMD_INIT_FUNC_TRACE();
1447 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1449 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1451 for (i = 0; i < dev->data->nb_rx_queues; i++)
1452 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1454 if (mask & ETH_VLAN_EXTEND_MASK)
1461 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1464 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467 PMD_INIT_FUNC_TRACE();
1469 switch (vlan_type) {
1470 case ETH_VLAN_TYPE_INNER:
1471 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1473 case ETH_VLAN_TYPE_OUTER:
1474 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1477 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1485 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1487 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489 PMD_INIT_FUNC_TRACE();
1491 if (queue_id > dev->data->nb_rx_queues) {
1492 PMD_DRV_LOG(ERR, "Invalid queue id");
1496 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1500 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1501 struct ether_addr *mc_addr_set,
1502 uint32_t nb_mc_addr)
1504 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1507 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1510 /* Update whole uc filters table */
1511 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1512 u8 *mac_addr = NULL;
1515 if (i < nb_mc_addr) {
1516 mac_addr = mc_addr_set[i].addr_bytes;
1517 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1518 (mac_addr[4] << 8) | mac_addr[5];
1519 h = (mac_addr[0] << 8) | mac_addr[1];
1522 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1523 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1524 HW_ATL_B0_MAC_MIN + i);
1525 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1526 HW_ATL_B0_MAC_MIN + i);
1527 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1528 HW_ATL_B0_MAC_MIN + i);
1535 atl_reta_update(struct rte_eth_dev *dev,
1536 struct rte_eth_rss_reta_entry64 *reta_conf,
1540 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1543 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1544 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1545 dev->data->nb_rx_queues - 1);
1547 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1552 atl_reta_query(struct rte_eth_dev *dev,
1553 struct rte_eth_rss_reta_entry64 *reta_conf,
1557 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1559 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1560 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1561 reta_conf->mask = ~0U;
1566 atl_rss_hash_update(struct rte_eth_dev *dev,
1567 struct rte_eth_rss_conf *rss_conf)
1569 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1570 struct aq_hw_cfg_s *cfg =
1571 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1572 static u8 def_rss_key[40] = {
1573 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1574 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1575 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1576 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1577 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1580 cfg->is_rss = !!rss_conf->rss_hf;
1581 if (rss_conf->rss_key) {
1582 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1583 rss_conf->rss_key_len);
1584 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1586 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1587 sizeof(def_rss_key));
1588 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1591 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1592 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1597 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1598 struct rte_eth_rss_conf *rss_conf)
1600 struct aq_hw_cfg_s *cfg =
1601 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1603 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1604 if (rss_conf->rss_key) {
1605 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1606 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1607 rss_conf->rss_key_len);
1614 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1616 if (strcmp(dev->device->driver->name, drv->driver.name))
1623 is_atlantic_supported(struct rte_eth_dev *dev)
1625 return is_device_supported(dev, &rte_atl_pmd);
1628 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1629 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1630 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1632 RTE_INIT(atl_init_log)
1634 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1635 if (atl_logtype_init >= 0)
1636 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1637 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1638 if (atl_logtype_driver >= 0)
1639 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);