1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
8 #include "atl_ethdev.h"
9 #include "atl_common.h"
10 #include "atl_hw_regs.h"
12 #include "hw_atl/hw_atl_llh.h"
13 #include "hw_atl/hw_atl_b0.h"
14 #include "hw_atl/hw_atl_b0_internal.h"
16 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
17 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 static int atl_dev_configure(struct rte_eth_dev *dev);
20 static int atl_dev_start(struct rte_eth_dev *dev);
21 static void atl_dev_stop(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
24 static void atl_dev_close(struct rte_eth_dev *dev);
25 static int atl_dev_reset(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
30 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
33 struct rte_eth_xstat_name *xstats_names,
36 static int atl_dev_stats_get(struct rte_eth_dev *dev,
37 struct rte_eth_stats *stats);
39 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
40 struct rte_eth_xstat *stats, unsigned int n);
42 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
47 static void atl_dev_info_get(struct rte_eth_dev *dev,
48 struct rte_eth_dev_info *dev_info);
50 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
55 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
56 uint16_t vlan_id, int on);
58 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
61 uint16_t queue_id, int on);
63 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
64 enum rte_vlan_type vlan_type, uint16_t tpid);
67 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
68 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
69 struct rte_dev_eeprom_info *eeprom);
70 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
71 struct rte_dev_eeprom_info *eeprom);
74 static int atl_dev_get_regs(struct rte_eth_dev *dev,
75 struct rte_dev_reg_info *regs);
78 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
80 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
81 struct rte_eth_fc_conf *fc_conf);
83 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
86 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
87 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
89 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
90 struct rte_intr_handle *handle);
91 static void atl_dev_interrupt_handler(void *param);
94 static int atl_add_mac_addr(struct rte_eth_dev *dev,
95 struct ether_addr *mac_addr,
96 uint32_t index, uint32_t pool);
97 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
98 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
99 struct ether_addr *mac_addr);
101 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
102 struct ether_addr *mc_addr_set,
103 uint32_t nb_mc_addr);
106 static int atl_reta_update(struct rte_eth_dev *dev,
107 struct rte_eth_rss_reta_entry64 *reta_conf,
109 static int atl_reta_query(struct rte_eth_dev *dev,
110 struct rte_eth_rss_reta_entry64 *reta_conf,
112 static int atl_rss_hash_update(struct rte_eth_dev *dev,
113 struct rte_eth_rss_conf *rss_conf);
114 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
115 struct rte_eth_rss_conf *rss_conf);
118 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
119 struct rte_pci_device *pci_dev);
120 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 static void atl_dev_info_get(struct rte_eth_dev *dev,
123 struct rte_eth_dev_info *dev_info);
125 int atl_logtype_init;
126 int atl_logtype_driver;
129 * The set of PCI devices this driver supports
131 static const struct rte_pci_id pci_id_atl_map[] = {
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
150 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
153 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
154 { .vendor_id = 0, /* sentinel */ },
157 static struct rte_pci_driver rte_atl_pmd = {
158 .id_table = pci_id_atl_map,
159 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
160 RTE_PCI_DRV_IOVA_AS_VA,
161 .probe = eth_atl_pci_probe,
162 .remove = eth_atl_pci_remove,
165 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
166 | DEV_RX_OFFLOAD_IPV4_CKSUM \
167 | DEV_RX_OFFLOAD_UDP_CKSUM \
168 | DEV_RX_OFFLOAD_TCP_CKSUM \
169 | DEV_RX_OFFLOAD_JUMBO_FRAME \
170 | DEV_RX_OFFLOAD_VLAN_FILTER)
172 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
173 | DEV_TX_OFFLOAD_IPV4_CKSUM \
174 | DEV_TX_OFFLOAD_UDP_CKSUM \
175 | DEV_TX_OFFLOAD_TCP_CKSUM \
176 | DEV_TX_OFFLOAD_TCP_TSO \
177 | DEV_TX_OFFLOAD_MULTI_SEGS)
179 static const struct rte_eth_desc_lim rx_desc_lim = {
180 .nb_max = ATL_MAX_RING_DESC,
181 .nb_min = ATL_MIN_RING_DESC,
182 .nb_align = ATL_RXD_ALIGN,
185 static const struct rte_eth_desc_lim tx_desc_lim = {
186 .nb_max = ATL_MAX_RING_DESC,
187 .nb_min = ATL_MIN_RING_DESC,
188 .nb_align = ATL_TXD_ALIGN,
189 .nb_seg_max = ATL_TX_MAX_SEG,
190 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
193 #define ATL_XSTATS_FIELD(name) { \
195 offsetof(struct aq_stats_s, name) \
198 struct atl_xstats_tbl_s {
203 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
204 ATL_XSTATS_FIELD(uprc),
205 ATL_XSTATS_FIELD(mprc),
206 ATL_XSTATS_FIELD(bprc),
207 ATL_XSTATS_FIELD(erpt),
208 ATL_XSTATS_FIELD(uptc),
209 ATL_XSTATS_FIELD(mptc),
210 ATL_XSTATS_FIELD(bptc),
211 ATL_XSTATS_FIELD(erpr),
212 ATL_XSTATS_FIELD(ubrc),
213 ATL_XSTATS_FIELD(ubtc),
214 ATL_XSTATS_FIELD(mbrc),
215 ATL_XSTATS_FIELD(mbtc),
216 ATL_XSTATS_FIELD(bbrc),
217 ATL_XSTATS_FIELD(bbtc),
220 static const struct eth_dev_ops atl_eth_dev_ops = {
221 .dev_configure = atl_dev_configure,
222 .dev_start = atl_dev_start,
223 .dev_stop = atl_dev_stop,
224 .dev_set_link_up = atl_dev_set_link_up,
225 .dev_set_link_down = atl_dev_set_link_down,
226 .dev_close = atl_dev_close,
227 .dev_reset = atl_dev_reset,
230 .promiscuous_enable = atl_dev_promiscuous_enable,
231 .promiscuous_disable = atl_dev_promiscuous_disable,
232 .allmulticast_enable = atl_dev_allmulticast_enable,
233 .allmulticast_disable = atl_dev_allmulticast_disable,
236 .link_update = atl_dev_link_update,
238 .get_reg = atl_dev_get_regs,
241 .stats_get = atl_dev_stats_get,
242 .xstats_get = atl_dev_xstats_get,
243 .xstats_get_names = atl_dev_xstats_get_names,
244 .stats_reset = atl_dev_stats_reset,
245 .xstats_reset = atl_dev_stats_reset,
247 .fw_version_get = atl_fw_version_get,
248 .dev_infos_get = atl_dev_info_get,
249 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
251 .mtu_set = atl_dev_mtu_set,
254 .vlan_filter_set = atl_vlan_filter_set,
255 .vlan_offload_set = atl_vlan_offload_set,
256 .vlan_tpid_set = atl_vlan_tpid_set,
257 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
260 .rx_queue_start = atl_rx_queue_start,
261 .rx_queue_stop = atl_rx_queue_stop,
262 .rx_queue_setup = atl_rx_queue_setup,
263 .rx_queue_release = atl_rx_queue_release,
265 .tx_queue_start = atl_tx_queue_start,
266 .tx_queue_stop = atl_tx_queue_stop,
267 .tx_queue_setup = atl_tx_queue_setup,
268 .tx_queue_release = atl_tx_queue_release,
270 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
271 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
273 .rx_queue_count = atl_rx_queue_count,
274 .rx_descriptor_status = atl_dev_rx_descriptor_status,
275 .tx_descriptor_status = atl_dev_tx_descriptor_status,
278 .get_eeprom_length = atl_dev_get_eeprom_length,
279 .get_eeprom = atl_dev_get_eeprom,
280 .set_eeprom = atl_dev_set_eeprom,
283 .flow_ctrl_get = atl_flow_ctrl_get,
284 .flow_ctrl_set = atl_flow_ctrl_set,
287 .mac_addr_add = atl_add_mac_addr,
288 .mac_addr_remove = atl_remove_mac_addr,
289 .mac_addr_set = atl_set_default_mac_addr,
290 .set_mc_addr_list = atl_dev_set_mc_addr_list,
291 .rxq_info_get = atl_rxq_info_get,
292 .txq_info_get = atl_txq_info_get,
294 .reta_update = atl_reta_update,
295 .reta_query = atl_reta_query,
296 .rss_hash_update = atl_rss_hash_update,
297 .rss_hash_conf_get = atl_rss_hash_conf_get,
300 static inline int32_t
301 atl_reset_hw(struct aq_hw_s *hw)
303 return hw_atl_b0_hw_reset(hw);
307 atl_enable_intr(struct rte_eth_dev *dev)
309 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
311 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
315 atl_disable_intr(struct aq_hw_s *hw)
317 PMD_INIT_FUNC_TRACE();
318 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
322 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
324 struct atl_adapter *adapter =
325 (struct atl_adapter *)eth_dev->data->dev_private;
326 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
327 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
328 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
331 PMD_INIT_FUNC_TRACE();
333 eth_dev->dev_ops = &atl_eth_dev_ops;
334 eth_dev->rx_pkt_burst = &atl_recv_pkts;
335 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
336 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
338 /* For secondary processes, the primary process has done all the work */
339 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
342 /* Vendor and Device ID need to be set before init of shared code */
343 hw->device_id = pci_dev->id.device_id;
344 hw->vendor_id = pci_dev->id.vendor_id;
345 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
347 /* Hardware configuration - hardcode */
348 adapter->hw_cfg.is_lro = false;
349 adapter->hw_cfg.wol = false;
350 adapter->hw_cfg.is_rss = false;
351 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
353 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
359 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
360 adapter->hw_cfg.aq_rss.indirection_table_size =
361 HW_ATL_B0_RSS_REDIRECTION_MAX;
363 hw->aq_nic_cfg = &adapter->hw_cfg;
365 /* disable interrupt */
366 atl_disable_intr(hw);
368 /* Allocate memory for storing MAC addresses */
369 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
370 if (eth_dev->data->mac_addrs == NULL) {
371 PMD_INIT_LOG(ERR, "MAC Malloc failed");
375 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
379 /* Copy the permanent MAC address */
380 if (hw->aq_fw_ops->get_mac_permanent(hw,
381 eth_dev->data->mac_addrs->addr_bytes) != 0)
384 /* Reset the hw statistics */
385 atl_dev_stats_reset(eth_dev);
387 rte_intr_callback_register(intr_handle,
388 atl_dev_interrupt_handler, eth_dev);
390 /* enable uio/vfio intr/eventfd mapping */
391 rte_intr_enable(intr_handle);
393 /* enable support intr */
394 atl_enable_intr(eth_dev);
400 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
402 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
403 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
406 PMD_INIT_FUNC_TRACE();
408 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
411 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
413 if (hw->adapter_stopped == 0)
414 atl_dev_close(eth_dev);
416 eth_dev->dev_ops = NULL;
417 eth_dev->rx_pkt_burst = NULL;
418 eth_dev->tx_pkt_burst = NULL;
420 /* disable uio intr before callback unregister */
421 rte_intr_disable(intr_handle);
422 rte_intr_callback_unregister(intr_handle,
423 atl_dev_interrupt_handler, eth_dev);
425 rte_free(eth_dev->data->mac_addrs);
426 eth_dev->data->mac_addrs = NULL;
432 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
433 struct rte_pci_device *pci_dev)
435 return rte_eth_dev_pci_generic_probe(pci_dev,
436 sizeof(struct atl_adapter), eth_atl_dev_init);
440 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
442 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
446 atl_dev_configure(struct rte_eth_dev *dev)
448 struct atl_interrupt *intr =
449 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
451 PMD_INIT_FUNC_TRACE();
453 /* set flag to update link status after init */
454 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
460 * Configure device link speed and setup link.
461 * It returns 0 on success.
464 atl_dev_start(struct rte_eth_dev *dev)
466 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
467 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
468 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
469 uint32_t intr_vector = 0;
473 PMD_INIT_FUNC_TRACE();
475 /* set adapter started */
476 hw->adapter_stopped = 0;
478 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
480 "Invalid link_speeds for port %u, fix speed not supported",
485 /* disable uio/vfio intr/eventfd mapping */
486 rte_intr_disable(intr_handle);
488 /* reinitialize adapter
489 * this calls reset and start
491 status = atl_reset_hw(hw);
495 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
497 hw_atl_b0_hw_start(hw);
498 /* check and configure queue intr-vector mapping */
499 if ((rte_intr_cap_multiple(intr_handle) ||
500 !RTE_ETH_DEV_SRIOV(dev).active) &&
501 dev->data->dev_conf.intr_conf.rxq != 0) {
502 intr_vector = dev->data->nb_rx_queues;
503 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
504 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
505 ATL_MAX_INTR_QUEUE_NUM);
508 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
509 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
514 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
515 intr_handle->intr_vec = rte_zmalloc("intr_vec",
516 dev->data->nb_rx_queues * sizeof(int), 0);
517 if (intr_handle->intr_vec == NULL) {
518 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
519 " intr_vec", dev->data->nb_rx_queues);
524 /* initialize transmission unit */
527 /* This can fail when allocating mbufs for descriptor rings */
528 err = atl_rx_init(dev);
530 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
534 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
535 hw->fw_ver_actual >> 24,
536 (hw->fw_ver_actual >> 16) & 0xFF,
537 hw->fw_ver_actual & 0xFFFF);
538 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
540 err = atl_start_queues(dev);
542 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
546 err = atl_dev_set_link_up(dev);
548 err = hw->aq_fw_ops->update_link_status(hw);
553 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
558 if (rte_intr_allow_others(intr_handle)) {
559 /* check if lsc interrupt is enabled */
560 if (dev->data->dev_conf.intr_conf.lsc != 0)
561 atl_dev_lsc_interrupt_setup(dev, true);
563 atl_dev_lsc_interrupt_setup(dev, false);
565 rte_intr_callback_unregister(intr_handle,
566 atl_dev_interrupt_handler, dev);
567 if (dev->data->dev_conf.intr_conf.lsc != 0)
568 PMD_INIT_LOG(INFO, "lsc won't enable because of"
569 " no intr multiplex");
572 /* check if rxq interrupt is enabled */
573 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
574 rte_intr_dp_is_en(intr_handle))
575 atl_dev_rxq_interrupt_setup(dev);
577 /* enable uio/vfio intr/eventfd mapping */
578 rte_intr_enable(intr_handle);
580 /* resume enabled intr since hw reset */
581 atl_enable_intr(dev);
586 atl_stop_queues(dev);
591 * Stop device: disable rx and tx functions to allow for reconfiguring.
594 atl_dev_stop(struct rte_eth_dev *dev)
596 struct rte_eth_link link;
598 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
600 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
602 PMD_INIT_FUNC_TRACE();
604 /* disable interrupts */
605 atl_disable_intr(hw);
609 hw->adapter_stopped = 1;
611 atl_stop_queues(dev);
613 /* Clear stored conf */
614 dev->data->scattered_rx = 0;
617 /* Clear recorded link status */
618 memset(&link, 0, sizeof(link));
619 rte_eth_linkstatus_set(dev, &link);
621 if (!rte_intr_allow_others(intr_handle))
622 /* resume to the default handler */
623 rte_intr_callback_register(intr_handle,
624 atl_dev_interrupt_handler,
627 /* Clean datapath event and queue/vec mapping */
628 rte_intr_efd_disable(intr_handle);
629 if (intr_handle->intr_vec != NULL) {
630 rte_free(intr_handle->intr_vec);
631 intr_handle->intr_vec = NULL;
636 * Set device link up: enable tx.
639 atl_dev_set_link_up(struct rte_eth_dev *dev)
641 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
643 uint32_t speed_mask = 0;
645 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
646 speed_mask = hw->aq_nic_cfg->link_speed_msk;
648 if (link_speeds & ETH_LINK_SPEED_10G)
649 speed_mask |= AQ_NIC_RATE_10G;
650 if (link_speeds & ETH_LINK_SPEED_5G)
651 speed_mask |= AQ_NIC_RATE_5G;
652 if (link_speeds & ETH_LINK_SPEED_1G)
653 speed_mask |= AQ_NIC_RATE_1G;
654 if (link_speeds & ETH_LINK_SPEED_2_5G)
655 speed_mask |= AQ_NIC_RATE_2G5;
656 if (link_speeds & ETH_LINK_SPEED_100M)
657 speed_mask |= AQ_NIC_RATE_100M;
660 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
664 * Set device link down: disable tx.
667 atl_dev_set_link_down(struct rte_eth_dev *dev)
669 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
671 return hw->aq_fw_ops->set_link_speed(hw, 0);
675 * Reset and stop device.
678 atl_dev_close(struct rte_eth_dev *dev)
680 PMD_INIT_FUNC_TRACE();
684 atl_free_queues(dev);
688 atl_dev_reset(struct rte_eth_dev *dev)
692 ret = eth_atl_dev_uninit(dev);
696 ret = eth_atl_dev_init(dev);
703 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
705 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
706 struct aq_hw_s *hw = &adapter->hw;
707 struct atl_sw_stats *swstats = &adapter->sw_stats;
710 hw->aq_fw_ops->update_stats(hw);
712 /* Fill out the rte_eth_stats statistics structure */
713 stats->ipackets = hw->curr_stats.dma_pkt_rc;
714 stats->ibytes = hw->curr_stats.dma_oct_rc;
715 stats->imissed = hw->curr_stats.dpc;
716 stats->ierrors = hw->curr_stats.erpt;
718 stats->opackets = hw->curr_stats.dma_pkt_tc;
719 stats->obytes = hw->curr_stats.dma_oct_tc;
722 stats->rx_nombuf = swstats->rx_nombuf;
724 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
725 stats->q_ipackets[i] = swstats->q_ipackets[i];
726 stats->q_opackets[i] = swstats->q_opackets[i];
727 stats->q_ibytes[i] = swstats->q_ibytes[i];
728 stats->q_obytes[i] = swstats->q_obytes[i];
729 stats->q_errors[i] = swstats->q_errors[i];
735 atl_dev_stats_reset(struct rte_eth_dev *dev)
737 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
738 struct aq_hw_s *hw = &adapter->hw;
740 hw->aq_fw_ops->update_stats(hw);
742 /* Reset software totals */
743 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
745 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
749 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
750 struct rte_eth_xstat_name *xstats_names,
756 return RTE_DIM(atl_xstats_tbl);
758 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
759 strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
760 RTE_ETH_XSTATS_NAME_SIZE);
766 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
769 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
770 struct aq_hw_s *hw = &adapter->hw;
776 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
778 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
779 atl_xstats_tbl[i].offset);
786 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
788 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
790 unsigned int ret = 0;
792 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
796 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
797 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
799 ret += 1; /* add string null-terminator */
808 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
810 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
812 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
813 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
815 dev_info->min_rx_bufsize = 1024;
816 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
817 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
818 dev_info->max_vfs = pci_dev->max_vfs;
820 dev_info->max_hash_mac_addrs = 0;
821 dev_info->max_vmdq_pools = 0;
822 dev_info->vmdq_queue_num = 0;
824 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
826 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
829 dev_info->default_rxconf = (struct rte_eth_rxconf) {
830 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
833 dev_info->default_txconf = (struct rte_eth_txconf) {
834 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
837 dev_info->rx_desc_lim = rx_desc_lim;
838 dev_info->tx_desc_lim = tx_desc_lim;
840 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
841 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
842 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
844 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
845 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
846 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
847 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
850 static const uint32_t *
851 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
853 static const uint32_t ptypes[] = {
855 RTE_PTYPE_L2_ETHER_ARP,
856 RTE_PTYPE_L2_ETHER_VLAN,
866 if (dev->rx_pkt_burst == atl_recv_pkts)
872 /* return 0 means link status changed, -1 means not changed */
874 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
876 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
877 struct atl_interrupt *intr =
878 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
879 struct rte_eth_link link, old;
882 link.link_status = ETH_LINK_DOWN;
884 link.link_duplex = ETH_LINK_FULL_DUPLEX;
885 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
886 memset(&old, 0, sizeof(old));
888 /* load old link status */
889 rte_eth_linkstatus_get(dev, &old);
891 /* read current link status */
892 err = hw->aq_fw_ops->update_link_status(hw);
897 if (hw->aq_link_status.mbps == 0) {
898 /* write default (down) link status */
899 rte_eth_linkstatus_set(dev, &link);
900 if (link.link_status == old.link_status)
905 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
907 link.link_status = ETH_LINK_UP;
908 link.link_duplex = ETH_LINK_FULL_DUPLEX;
909 link.link_speed = hw->aq_link_status.mbps;
911 rte_eth_linkstatus_set(dev, &link);
913 if (link.link_status == old.link_status)
920 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
922 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
928 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
930 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
932 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
936 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
938 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
940 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
944 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
946 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
948 if (dev->data->promiscuous == 1)
949 return; /* must remain in all_multicast mode */
951 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
955 * It clears the interrupt causes and enables the interrupt.
956 * It will be called once only during nic initialized.
959 * Pointer to struct rte_eth_dev.
964 * - On success, zero.
965 * - On failure, a negative value.
969 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
971 atl_dev_link_status_print(dev);
976 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
983 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
985 struct atl_interrupt *intr =
986 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
987 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
990 hw_atl_b0_hw_irq_read(hw, &cause);
992 atl_disable_intr(hw);
993 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
994 ATL_FLAG_NEED_LINK_UPDATE : 0;
1000 * It gets and then prints the link status.
1003 * Pointer to struct rte_eth_dev.
1006 * - On success, zero.
1007 * - On failure, a negative value.
1010 atl_dev_link_status_print(struct rte_eth_dev *dev)
1012 struct rte_eth_link link;
1014 memset(&link, 0, sizeof(link));
1015 rte_eth_linkstatus_get(dev, &link);
1016 if (link.link_status) {
1017 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1018 (int)(dev->data->port_id),
1019 (unsigned int)link.link_speed,
1020 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1021 "full-duplex" : "half-duplex");
1023 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1024 (int)(dev->data->port_id));
1030 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1032 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1033 pci_dev->addr.domain,
1035 pci_dev->addr.devid,
1036 pci_dev->addr.function);
1040 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1044 * It executes link_update after knowing an interrupt occurred.
1047 * Pointer to struct rte_eth_dev.
1050 * - On success, zero.
1051 * - On failure, a negative value.
1054 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1055 struct rte_intr_handle *intr_handle)
1057 struct atl_interrupt *intr =
1058 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1060 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1061 atl_dev_link_update(dev, 0);
1062 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1063 atl_dev_link_status_print(dev);
1064 _rte_eth_dev_callback_process(dev,
1065 RTE_ETH_EVENT_INTR_LSC, NULL);
1068 atl_enable_intr(dev);
1069 rte_intr_enable(intr_handle);
1075 * Interrupt handler triggered by NIC for handling
1076 * specific interrupt.
1079 * Pointer to interrupt handle.
1081 * The address of parameter (struct rte_eth_dev *) regsitered before.
1087 atl_dev_interrupt_handler(void *param)
1089 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1091 atl_dev_interrupt_get_status(dev);
1092 atl_dev_interrupt_action(dev, dev->intr_handle);
1095 #define SFP_EEPROM_SIZE 0xff
1098 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1100 return SFP_EEPROM_SIZE;
1103 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1104 struct rte_dev_eeprom_info *eeprom)
1106 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107 uint32_t dev_addr = SMBUS_DEVICE_ID;
1109 if (hw->aq_fw_ops->get_eeprom == NULL)
1112 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1113 eeprom->data == NULL)
1117 dev_addr = eeprom->magic;
1119 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1120 eeprom->length, eeprom->offset);
1123 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1124 struct rte_dev_eeprom_info *eeprom)
1126 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1127 uint32_t dev_addr = SMBUS_DEVICE_ID;
1129 if (hw->aq_fw_ops->set_eeprom == NULL)
1132 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1136 dev_addr = eeprom->magic;
1138 return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1139 eeprom->data, eeprom->length);
1143 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1145 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149 if (regs->data == NULL) {
1150 regs->length = hw_atl_utils_hw_get_reg_length();
1151 regs->width = sizeof(u32);
1155 /* Only full register dump is supported */
1156 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1159 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1161 /* Device version */
1162 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1163 regs->version = mif_id & 0xFFU;
1169 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1171 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1174 fc_conf->mode = RTE_FC_NONE;
1175 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1176 fc_conf->mode = RTE_FC_FULL;
1177 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1178 fc_conf->mode = RTE_FC_RX_PAUSE;
1179 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1180 fc_conf->mode = RTE_FC_TX_PAUSE;
1186 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1188 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1192 if (hw->aq_fw_ops->set_flow_control == NULL)
1195 if (fc_conf->mode == RTE_FC_NONE)
1196 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1197 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1198 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1199 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1200 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1201 else if (fc_conf->mode == RTE_FC_FULL)
1202 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1204 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1205 return hw->aq_fw_ops->set_flow_control(hw);
1211 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1212 u8 *mac_addr, bool enable)
1214 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 unsigned int h = 0U;
1216 unsigned int l = 0U;
1220 h = (mac_addr[0] << 8) | (mac_addr[1]);
1221 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1222 (mac_addr[4] << 8) | mac_addr[5];
1225 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1226 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1227 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1230 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1232 err = aq_hw_err_from_flags(hw);
1238 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1239 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1241 if (is_zero_ether_addr(mac_addr)) {
1242 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1246 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1250 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1252 atl_update_mac_addr(dev, index, NULL, false);
1256 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1258 atl_remove_mac_addr(dev, 0);
1259 atl_add_mac_addr(dev, addr, 0, 0);
1264 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1266 struct rte_eth_dev_info dev_info;
1267 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1269 atl_dev_info_get(dev, &dev_info);
1271 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1274 /* update max frame size */
1275 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1281 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1283 struct aq_hw_cfg_s *cfg =
1284 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1285 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289 PMD_INIT_FUNC_TRACE();
1291 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1292 if (cfg->vlan_filter[i] == vlan_id) {
1294 /* Disable VLAN filter. */
1295 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1297 /* Clear VLAN filter entry */
1298 cfg->vlan_filter[i] = 0;
1304 /* VLAN_ID was not found. So, nothing to delete. */
1305 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1308 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1309 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1312 /* Try to found free VLAN filter to add new VLAN_ID */
1313 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1314 if (cfg->vlan_filter[i] == 0)
1318 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1319 /* We have no free VLAN filter to add new VLAN_ID*/
1324 cfg->vlan_filter[i] = vlan_id;
1325 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1326 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1327 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1330 /* Enable VLAN promisc mode if vlan_filter empty */
1331 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1332 if (cfg->vlan_filter[i] != 0)
1336 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1342 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1344 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1345 struct aq_hw_cfg_s *cfg =
1346 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1349 PMD_INIT_FUNC_TRACE();
1351 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1352 if (cfg->vlan_filter[i])
1353 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1359 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1361 struct aq_hw_cfg_s *cfg =
1362 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1363 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1367 PMD_INIT_FUNC_TRACE();
1369 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1371 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1373 for (i = 0; i < dev->data->nb_rx_queues; i++)
1374 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1376 if (mask & ETH_VLAN_EXTEND_MASK)
1383 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1386 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1389 PMD_INIT_FUNC_TRACE();
1391 switch (vlan_type) {
1392 case ETH_VLAN_TYPE_INNER:
1393 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1395 case ETH_VLAN_TYPE_OUTER:
1396 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1399 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1407 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1409 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1411 PMD_INIT_FUNC_TRACE();
1413 if (queue_id > dev->data->nb_rx_queues) {
1414 PMD_DRV_LOG(ERR, "Invalid queue id");
1418 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1422 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1423 struct ether_addr *mc_addr_set,
1424 uint32_t nb_mc_addr)
1426 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1429 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1432 /* Update whole uc filters table */
1433 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1434 u8 *mac_addr = NULL;
1437 if (i < nb_mc_addr) {
1438 mac_addr = mc_addr_set[i].addr_bytes;
1439 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1440 (mac_addr[4] << 8) | mac_addr[5];
1441 h = (mac_addr[0] << 8) | mac_addr[1];
1444 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1445 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1446 HW_ATL_B0_MAC_MIN + i);
1447 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1448 HW_ATL_B0_MAC_MIN + i);
1449 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1450 HW_ATL_B0_MAC_MIN + i);
1457 atl_reta_update(struct rte_eth_dev *dev,
1458 struct rte_eth_rss_reta_entry64 *reta_conf,
1462 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1463 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1465 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1466 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1467 dev->data->nb_rx_queues - 1);
1469 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1474 atl_reta_query(struct rte_eth_dev *dev,
1475 struct rte_eth_rss_reta_entry64 *reta_conf,
1479 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1481 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1482 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1483 reta_conf->mask = ~0U;
1488 atl_rss_hash_update(struct rte_eth_dev *dev,
1489 struct rte_eth_rss_conf *rss_conf)
1491 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492 struct aq_hw_cfg_s *cfg =
1493 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1494 static u8 def_rss_key[40] = {
1495 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1496 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1497 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1498 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1499 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1502 cfg->is_rss = !!rss_conf->rss_hf;
1503 if (rss_conf->rss_key) {
1504 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1505 rss_conf->rss_key_len);
1506 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1508 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1509 sizeof(def_rss_key));
1510 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1513 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1514 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1519 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1520 struct rte_eth_rss_conf *rss_conf)
1522 struct aq_hw_cfg_s *cfg =
1523 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1525 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1526 if (rss_conf->rss_key) {
1527 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1528 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1529 rss_conf->rss_key_len);
1535 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1536 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1537 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1539 RTE_INIT(atl_init_log)
1541 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1542 if (atl_logtype_init >= 0)
1543 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1544 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1545 if (atl_logtype_driver >= 0)
1546 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);