1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 uint16_t queue_id, int on);
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 enum rte_vlan_type vlan_type, uint16_t tpid);
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 struct rte_dev_eeprom_info *eeprom);
73 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
74 struct rte_eth_fc_conf *fc_conf);
75 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
76 struct rte_eth_fc_conf *fc_conf);
78 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
81 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
82 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
83 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
84 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
85 struct rte_intr_handle *handle);
86 static void atl_dev_interrupt_handler(void *param);
89 static int atl_add_mac_addr(struct rte_eth_dev *dev,
90 struct ether_addr *mac_addr,
91 uint32_t index, uint32_t pool);
92 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
93 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
94 struct ether_addr *mac_addr);
96 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
97 struct ether_addr *mc_addr_set,
101 static int atl_reta_update(struct rte_eth_dev *dev,
102 struct rte_eth_rss_reta_entry64 *reta_conf,
104 static int atl_reta_query(struct rte_eth_dev *dev,
105 struct rte_eth_rss_reta_entry64 *reta_conf,
107 static int atl_rss_hash_update(struct rte_eth_dev *dev,
108 struct rte_eth_rss_conf *rss_conf);
109 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
110 struct rte_eth_rss_conf *rss_conf);
113 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
114 struct rte_pci_device *pci_dev);
115 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 static void atl_dev_info_get(struct rte_eth_dev *dev,
118 struct rte_eth_dev_info *dev_info);
120 int atl_logtype_init;
121 int atl_logtype_driver;
124 * The set of PCI devices this driver supports
126 static const struct rte_pci_id pci_id_atl_map[] = {
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149 { .vendor_id = 0, /* sentinel */ },
152 static struct rte_pci_driver rte_atl_pmd = {
153 .id_table = pci_id_atl_map,
154 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
155 RTE_PCI_DRV_IOVA_AS_VA,
156 .probe = eth_atl_pci_probe,
157 .remove = eth_atl_pci_remove,
160 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
161 | DEV_RX_OFFLOAD_IPV4_CKSUM \
162 | DEV_RX_OFFLOAD_UDP_CKSUM \
163 | DEV_RX_OFFLOAD_TCP_CKSUM \
164 | DEV_RX_OFFLOAD_JUMBO_FRAME)
166 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
167 | DEV_TX_OFFLOAD_IPV4_CKSUM \
168 | DEV_TX_OFFLOAD_UDP_CKSUM \
169 | DEV_TX_OFFLOAD_TCP_CKSUM \
170 | DEV_TX_OFFLOAD_TCP_TSO \
171 | DEV_TX_OFFLOAD_MULTI_SEGS)
173 static const struct rte_eth_desc_lim rx_desc_lim = {
174 .nb_max = ATL_MAX_RING_DESC,
175 .nb_min = ATL_MIN_RING_DESC,
176 .nb_align = ATL_RXD_ALIGN,
179 static const struct rte_eth_desc_lim tx_desc_lim = {
180 .nb_max = ATL_MAX_RING_DESC,
181 .nb_min = ATL_MIN_RING_DESC,
182 .nb_align = ATL_TXD_ALIGN,
183 .nb_seg_max = ATL_TX_MAX_SEG,
184 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 #define ATL_XSTATS_FIELD(name) { \
189 offsetof(struct aq_stats_s, name) \
192 struct atl_xstats_tbl_s {
197 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
198 ATL_XSTATS_FIELD(uprc),
199 ATL_XSTATS_FIELD(mprc),
200 ATL_XSTATS_FIELD(bprc),
201 ATL_XSTATS_FIELD(erpt),
202 ATL_XSTATS_FIELD(uptc),
203 ATL_XSTATS_FIELD(mptc),
204 ATL_XSTATS_FIELD(bptc),
205 ATL_XSTATS_FIELD(erpr),
206 ATL_XSTATS_FIELD(ubrc),
207 ATL_XSTATS_FIELD(ubtc),
208 ATL_XSTATS_FIELD(mbrc),
209 ATL_XSTATS_FIELD(mbtc),
210 ATL_XSTATS_FIELD(bbrc),
211 ATL_XSTATS_FIELD(bbtc),
214 static const struct eth_dev_ops atl_eth_dev_ops = {
215 .dev_configure = atl_dev_configure,
216 .dev_start = atl_dev_start,
217 .dev_stop = atl_dev_stop,
218 .dev_set_link_up = atl_dev_set_link_up,
219 .dev_set_link_down = atl_dev_set_link_down,
220 .dev_close = atl_dev_close,
221 .dev_reset = atl_dev_reset,
224 .promiscuous_enable = atl_dev_promiscuous_enable,
225 .promiscuous_disable = atl_dev_promiscuous_disable,
226 .allmulticast_enable = atl_dev_allmulticast_enable,
227 .allmulticast_disable = atl_dev_allmulticast_disable,
230 .link_update = atl_dev_link_update,
233 .stats_get = atl_dev_stats_get,
234 .xstats_get = atl_dev_xstats_get,
235 .xstats_get_names = atl_dev_xstats_get_names,
236 .stats_reset = atl_dev_stats_reset,
237 .xstats_reset = atl_dev_stats_reset,
239 .fw_version_get = atl_fw_version_get,
240 .dev_infos_get = atl_dev_info_get,
241 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
243 .mtu_set = atl_dev_mtu_set,
246 .vlan_filter_set = atl_vlan_filter_set,
247 .vlan_offload_set = atl_vlan_offload_set,
248 .vlan_tpid_set = atl_vlan_tpid_set,
249 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
252 .rx_queue_start = atl_rx_queue_start,
253 .rx_queue_stop = atl_rx_queue_stop,
254 .rx_queue_setup = atl_rx_queue_setup,
255 .rx_queue_release = atl_rx_queue_release,
257 .tx_queue_start = atl_tx_queue_start,
258 .tx_queue_stop = atl_tx_queue_stop,
259 .tx_queue_setup = atl_tx_queue_setup,
260 .tx_queue_release = atl_tx_queue_release,
262 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
263 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
265 .rx_queue_count = atl_rx_queue_count,
266 .rx_descriptor_status = atl_dev_rx_descriptor_status,
267 .tx_descriptor_status = atl_dev_tx_descriptor_status,
270 .get_eeprom_length = atl_dev_get_eeprom_length,
271 .get_eeprom = atl_dev_get_eeprom,
272 .set_eeprom = atl_dev_set_eeprom,
275 .flow_ctrl_get = atl_flow_ctrl_get,
276 .flow_ctrl_set = atl_flow_ctrl_set,
279 .mac_addr_add = atl_add_mac_addr,
280 .mac_addr_remove = atl_remove_mac_addr,
281 .mac_addr_set = atl_set_default_mac_addr,
282 .set_mc_addr_list = atl_dev_set_mc_addr_list,
283 .rxq_info_get = atl_rxq_info_get,
284 .txq_info_get = atl_txq_info_get,
286 .reta_update = atl_reta_update,
287 .reta_query = atl_reta_query,
288 .rss_hash_update = atl_rss_hash_update,
289 .rss_hash_conf_get = atl_rss_hash_conf_get,
292 static inline int32_t
293 atl_reset_hw(struct aq_hw_s *hw)
295 return hw_atl_b0_hw_reset(hw);
299 atl_enable_intr(struct rte_eth_dev *dev)
301 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
303 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
307 atl_disable_intr(struct aq_hw_s *hw)
309 PMD_INIT_FUNC_TRACE();
310 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
314 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
316 struct atl_adapter *adapter =
317 (struct atl_adapter *)eth_dev->data->dev_private;
318 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
319 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
320 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
323 PMD_INIT_FUNC_TRACE();
325 eth_dev->dev_ops = &atl_eth_dev_ops;
326 eth_dev->rx_pkt_burst = &atl_recv_pkts;
327 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
328 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
330 /* For secondary processes, the primary process has done all the work */
331 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
334 /* Vendor and Device ID need to be set before init of shared code */
335 hw->device_id = pci_dev->id.device_id;
336 hw->vendor_id = pci_dev->id.vendor_id;
337 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
339 /* Hardware configuration - hardcode */
340 adapter->hw_cfg.is_lro = false;
341 adapter->hw_cfg.wol = false;
342 adapter->hw_cfg.is_rss = false;
343 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
345 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
351 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
352 adapter->hw_cfg.aq_rss.indirection_table_size =
353 HW_ATL_B0_RSS_REDIRECTION_MAX;
355 hw->aq_nic_cfg = &adapter->hw_cfg;
357 /* disable interrupt */
358 atl_disable_intr(hw);
360 /* Allocate memory for storing MAC addresses */
361 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
362 if (eth_dev->data->mac_addrs == NULL) {
363 PMD_INIT_LOG(ERR, "MAC Malloc failed");
367 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
371 /* Copy the permanent MAC address */
372 if (hw->aq_fw_ops->get_mac_permanent(hw,
373 eth_dev->data->mac_addrs->addr_bytes) != 0)
376 /* Reset the hw statistics */
377 atl_dev_stats_reset(eth_dev);
379 rte_intr_callback_register(intr_handle,
380 atl_dev_interrupt_handler, eth_dev);
382 /* enable uio/vfio intr/eventfd mapping */
383 rte_intr_enable(intr_handle);
385 /* enable support intr */
386 atl_enable_intr(eth_dev);
392 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
394 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
395 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
398 PMD_INIT_FUNC_TRACE();
400 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
403 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
405 if (hw->adapter_stopped == 0)
406 atl_dev_close(eth_dev);
408 eth_dev->dev_ops = NULL;
409 eth_dev->rx_pkt_burst = NULL;
410 eth_dev->tx_pkt_burst = NULL;
412 /* disable uio intr before callback unregister */
413 rte_intr_disable(intr_handle);
414 rte_intr_callback_unregister(intr_handle,
415 atl_dev_interrupt_handler, eth_dev);
417 rte_free(eth_dev->data->mac_addrs);
418 eth_dev->data->mac_addrs = NULL;
424 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
425 struct rte_pci_device *pci_dev)
427 return rte_eth_dev_pci_generic_probe(pci_dev,
428 sizeof(struct atl_adapter), eth_atl_dev_init);
432 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
434 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
438 atl_dev_configure(struct rte_eth_dev *dev)
440 struct atl_interrupt *intr =
441 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
443 PMD_INIT_FUNC_TRACE();
445 /* set flag to update link status after init */
446 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
452 * Configure device link speed and setup link.
453 * It returns 0 on success.
456 atl_dev_start(struct rte_eth_dev *dev)
458 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
460 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
461 uint32_t intr_vector = 0;
462 uint32_t *link_speeds;
467 PMD_INIT_FUNC_TRACE();
469 /* set adapter started */
470 hw->adapter_stopped = 0;
472 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
474 "Invalid link_speeds for port %u, fix speed not supported",
479 /* disable uio/vfio intr/eventfd mapping */
480 rte_intr_disable(intr_handle);
482 /* reinitialize adapter
483 * this calls reset and start
485 status = atl_reset_hw(hw);
489 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
491 hw_atl_b0_hw_start(hw);
492 /* check and configure queue intr-vector mapping */
493 if ((rte_intr_cap_multiple(intr_handle) ||
494 !RTE_ETH_DEV_SRIOV(dev).active) &&
495 dev->data->dev_conf.intr_conf.rxq != 0) {
496 intr_vector = dev->data->nb_rx_queues;
497 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
498 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
499 ATL_MAX_INTR_QUEUE_NUM);
502 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
503 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
508 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
509 intr_handle->intr_vec = rte_zmalloc("intr_vec",
510 dev->data->nb_rx_queues * sizeof(int), 0);
511 if (intr_handle->intr_vec == NULL) {
512 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
513 " intr_vec", dev->data->nb_rx_queues);
518 /* initialize transmission unit */
521 /* This can fail when allocating mbufs for descriptor rings */
522 err = atl_rx_init(dev);
524 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
528 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
529 hw->fw_ver_actual >> 24,
530 (hw->fw_ver_actual >> 16) & 0xFF,
531 hw->fw_ver_actual & 0xFFFF);
532 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
534 err = atl_start_queues(dev);
536 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
540 err = hw->aq_fw_ops->update_link_status(hw);
545 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
547 link_speeds = &dev->data->dev_conf.link_speeds;
551 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
552 speed = hw->aq_nic_cfg->link_speed_msk;
554 if (*link_speeds & ETH_LINK_SPEED_10G)
555 speed |= AQ_NIC_RATE_10G;
556 if (*link_speeds & ETH_LINK_SPEED_5G)
557 speed |= AQ_NIC_RATE_5G;
558 if (*link_speeds & ETH_LINK_SPEED_1G)
559 speed |= AQ_NIC_RATE_1G;
560 if (*link_speeds & ETH_LINK_SPEED_2_5G)
561 speed |= AQ_NIC_RATE_2G5;
562 if (*link_speeds & ETH_LINK_SPEED_100M)
563 speed |= AQ_NIC_RATE_100M;
566 err = hw->aq_fw_ops->set_link_speed(hw, speed);
570 if (rte_intr_allow_others(intr_handle)) {
571 /* check if lsc interrupt is enabled */
572 if (dev->data->dev_conf.intr_conf.lsc != 0)
573 atl_dev_lsc_interrupt_setup(dev, true);
575 atl_dev_lsc_interrupt_setup(dev, false);
577 rte_intr_callback_unregister(intr_handle,
578 atl_dev_interrupt_handler, dev);
579 if (dev->data->dev_conf.intr_conf.lsc != 0)
580 PMD_INIT_LOG(INFO, "lsc won't enable because of"
581 " no intr multiplex");
584 /* check if rxq interrupt is enabled */
585 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
586 rte_intr_dp_is_en(intr_handle))
587 atl_dev_rxq_interrupt_setup(dev);
589 /* enable uio/vfio intr/eventfd mapping */
590 rte_intr_enable(intr_handle);
592 /* resume enabled intr since hw reset */
593 atl_enable_intr(dev);
598 atl_stop_queues(dev);
603 * Stop device: disable rx and tx functions to allow for reconfiguring.
606 atl_dev_stop(struct rte_eth_dev *dev)
608 struct rte_eth_link link;
610 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
612 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
614 PMD_INIT_FUNC_TRACE();
616 /* disable interrupts */
617 atl_disable_intr(hw);
621 hw->adapter_stopped = 1;
623 atl_stop_queues(dev);
625 /* Clear stored conf */
626 dev->data->scattered_rx = 0;
629 /* Clear recorded link status */
630 memset(&link, 0, sizeof(link));
631 rte_eth_linkstatus_set(dev, &link);
633 if (!rte_intr_allow_others(intr_handle))
634 /* resume to the default handler */
635 rte_intr_callback_register(intr_handle,
636 atl_dev_interrupt_handler,
639 /* Clean datapath event and queue/vec mapping */
640 rte_intr_efd_disable(intr_handle);
641 if (intr_handle->intr_vec != NULL) {
642 rte_free(intr_handle->intr_vec);
643 intr_handle->intr_vec = NULL;
648 * Set device link up: enable tx.
651 atl_dev_set_link_up(struct rte_eth_dev *dev)
653 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655 return hw->aq_fw_ops->set_link_speed(hw,
656 hw->aq_nic_cfg->link_speed_msk);
660 * Set device link down: disable tx.
663 atl_dev_set_link_down(struct rte_eth_dev *dev)
665 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
667 return hw->aq_fw_ops->set_link_speed(hw, 0);
671 * Reset and stop device.
674 atl_dev_close(struct rte_eth_dev *dev)
676 PMD_INIT_FUNC_TRACE();
680 atl_free_queues(dev);
684 atl_dev_reset(struct rte_eth_dev *dev)
688 ret = eth_atl_dev_uninit(dev);
692 ret = eth_atl_dev_init(dev);
699 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
701 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
702 struct aq_hw_s *hw = &adapter->hw;
703 struct atl_sw_stats *swstats = &adapter->sw_stats;
706 hw->aq_fw_ops->update_stats(hw);
708 /* Fill out the rte_eth_stats statistics structure */
709 stats->ipackets = hw->curr_stats.dma_pkt_rc;
710 stats->ibytes = hw->curr_stats.dma_oct_rc;
711 stats->imissed = hw->curr_stats.dpc;
712 stats->ierrors = hw->curr_stats.erpt;
714 stats->opackets = hw->curr_stats.dma_pkt_tc;
715 stats->obytes = hw->curr_stats.dma_oct_tc;
718 stats->rx_nombuf = swstats->rx_nombuf;
720 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
721 stats->q_ipackets[i] = swstats->q_ipackets[i];
722 stats->q_opackets[i] = swstats->q_opackets[i];
723 stats->q_ibytes[i] = swstats->q_ibytes[i];
724 stats->q_obytes[i] = swstats->q_obytes[i];
725 stats->q_errors[i] = swstats->q_errors[i];
731 atl_dev_stats_reset(struct rte_eth_dev *dev)
733 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
734 struct aq_hw_s *hw = &adapter->hw;
736 hw->aq_fw_ops->update_stats(hw);
738 /* Reset software totals */
739 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
741 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
745 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
746 struct rte_eth_xstat_name *xstats_names,
752 return RTE_DIM(atl_xstats_tbl);
754 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
755 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
756 atl_xstats_tbl[i].name);
762 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
765 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
766 struct aq_hw_s *hw = &adapter->hw;
772 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
774 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
775 atl_xstats_tbl[i].offset);
782 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
784 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 unsigned int ret = 0;
788 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
792 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
793 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
795 ret += 1; /* add string null-terminator */
804 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
806 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
808 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
809 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
811 dev_info->min_rx_bufsize = 1024;
812 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
813 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
814 dev_info->max_vfs = pci_dev->max_vfs;
816 dev_info->max_hash_mac_addrs = 0;
817 dev_info->max_vmdq_pools = 0;
818 dev_info->vmdq_queue_num = 0;
820 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
822 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
825 dev_info->default_rxconf = (struct rte_eth_rxconf) {
826 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
829 dev_info->default_txconf = (struct rte_eth_txconf) {
830 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
833 dev_info->rx_desc_lim = rx_desc_lim;
834 dev_info->tx_desc_lim = tx_desc_lim;
836 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
837 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
838 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
840 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
841 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
842 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
843 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
846 static const uint32_t *
847 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
849 static const uint32_t ptypes[] = {
851 RTE_PTYPE_L2_ETHER_ARP,
852 RTE_PTYPE_L2_ETHER_VLAN,
862 if (dev->rx_pkt_burst == atl_recv_pkts)
868 /* return 0 means link status changed, -1 means not changed */
870 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
872 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873 struct atl_interrupt *intr =
874 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
875 struct rte_eth_link link, old;
878 link.link_status = ETH_LINK_DOWN;
880 link.link_duplex = ETH_LINK_FULL_DUPLEX;
881 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
882 memset(&old, 0, sizeof(old));
884 /* load old link status */
885 rte_eth_linkstatus_get(dev, &old);
887 /* read current link status */
888 err = hw->aq_fw_ops->update_link_status(hw);
893 if (hw->aq_link_status.mbps == 0) {
894 /* write default (down) link status */
895 rte_eth_linkstatus_set(dev, &link);
896 if (link.link_status == old.link_status)
901 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
903 link.link_status = ETH_LINK_UP;
904 link.link_duplex = ETH_LINK_FULL_DUPLEX;
905 link.link_speed = hw->aq_link_status.mbps;
907 rte_eth_linkstatus_set(dev, &link);
909 if (link.link_status == old.link_status)
916 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
918 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
920 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
924 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
926 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
928 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
932 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
934 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
940 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
942 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
944 if (dev->data->promiscuous == 1)
945 return; /* must remain in all_multicast mode */
947 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
951 * It clears the interrupt causes and enables the interrupt.
952 * It will be called once only during nic initialized.
955 * Pointer to struct rte_eth_dev.
960 * - On success, zero.
961 * - On failure, a negative value.
965 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
967 atl_dev_link_status_print(dev);
972 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
979 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
981 struct atl_interrupt *intr =
982 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
983 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 hw_atl_b0_hw_irq_read(hw, &cause);
988 atl_disable_intr(hw);
989 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
990 ATL_FLAG_NEED_LINK_UPDATE : 0;
996 * It gets and then prints the link status.
999 * Pointer to struct rte_eth_dev.
1002 * - On success, zero.
1003 * - On failure, a negative value.
1006 atl_dev_link_status_print(struct rte_eth_dev *dev)
1008 struct rte_eth_link link;
1010 memset(&link, 0, sizeof(link));
1011 rte_eth_linkstatus_get(dev, &link);
1012 if (link.link_status) {
1013 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1014 (int)(dev->data->port_id),
1015 (unsigned int)link.link_speed,
1016 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1017 "full-duplex" : "half-duplex");
1019 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1020 (int)(dev->data->port_id));
1026 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1028 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1029 pci_dev->addr.domain,
1031 pci_dev->addr.devid,
1032 pci_dev->addr.function);
1036 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1040 * It executes link_update after knowing an interrupt occurred.
1043 * Pointer to struct rte_eth_dev.
1046 * - On success, zero.
1047 * - On failure, a negative value.
1050 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1051 struct rte_intr_handle *intr_handle)
1053 struct atl_interrupt *intr =
1054 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1056 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1057 atl_dev_link_update(dev, 0);
1058 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1059 atl_dev_link_status_print(dev);
1060 _rte_eth_dev_callback_process(dev,
1061 RTE_ETH_EVENT_INTR_LSC, NULL);
1064 atl_enable_intr(dev);
1065 rte_intr_enable(intr_handle);
1071 * Interrupt handler triggered by NIC for handling
1072 * specific interrupt.
1075 * Pointer to interrupt handle.
1077 * The address of parameter (struct rte_eth_dev *) regsitered before.
1083 atl_dev_interrupt_handler(void *param)
1085 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1087 atl_dev_interrupt_get_status(dev);
1088 atl_dev_interrupt_action(dev, dev->intr_handle);
1091 #define SFP_EEPROM_SIZE 0xff
1094 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1096 return SFP_EEPROM_SIZE;
1100 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1102 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1104 if (hw->aq_fw_ops->get_eeprom == NULL)
1107 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1110 return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
1114 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1116 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1118 if (hw->aq_fw_ops->set_eeprom == NULL)
1121 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1124 return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
1128 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1130 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1132 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1133 fc_conf->mode = RTE_FC_NONE;
1134 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1135 fc_conf->mode = RTE_FC_FULL;
1136 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1137 fc_conf->mode = RTE_FC_RX_PAUSE;
1138 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1139 fc_conf->mode = RTE_FC_TX_PAUSE;
1145 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1147 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1151 if (hw->aq_fw_ops->set_flow_control == NULL)
1154 if (fc_conf->mode == RTE_FC_NONE)
1155 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1156 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1157 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1158 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1159 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1160 else if (fc_conf->mode == RTE_FC_FULL)
1161 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1163 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1164 return hw->aq_fw_ops->set_flow_control(hw);
1170 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1171 u8 *mac_addr, bool enable)
1173 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 unsigned int h = 0U;
1175 unsigned int l = 0U;
1179 h = (mac_addr[0] << 8) | (mac_addr[1]);
1180 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1181 (mac_addr[4] << 8) | mac_addr[5];
1184 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1185 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1186 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1189 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1191 err = aq_hw_err_from_flags(hw);
1197 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1198 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1200 if (is_zero_ether_addr(mac_addr)) {
1201 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1205 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1209 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1211 atl_update_mac_addr(dev, index, NULL, false);
1215 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1217 atl_remove_mac_addr(dev, 0);
1218 atl_add_mac_addr(dev, addr, 0, 0);
1223 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1225 struct rte_eth_dev_info dev_info;
1226 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1228 atl_dev_info_get(dev, &dev_info);
1230 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1233 /* update max frame size */
1234 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1240 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1242 struct aq_hw_cfg_s *cfg =
1243 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1244 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 PMD_INIT_FUNC_TRACE();
1250 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1251 if (cfg->vlan_filter[i] == vlan_id) {
1253 /* Disable VLAN filter. */
1254 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1256 /* Clear VLAN filter entry */
1257 cfg->vlan_filter[i] = 0;
1263 /* VLAN_ID was not found. So, nothing to delete. */
1264 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1267 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1268 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1271 /* Try to found free VLAN filter to add new VLAN_ID */
1272 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1273 if (cfg->vlan_filter[i] == 0)
1277 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1278 /* We have no free VLAN filter to add new VLAN_ID*/
1283 cfg->vlan_filter[i] = vlan_id;
1284 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1285 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1286 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1289 /* Enable VLAN promisc mode if vlan_filter empty */
1290 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1291 if (cfg->vlan_filter[i] != 0)
1295 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1301 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1303 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1304 struct aq_hw_cfg_s *cfg =
1305 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1308 PMD_INIT_FUNC_TRACE();
1310 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1311 if (cfg->vlan_filter[i])
1312 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1318 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1320 struct aq_hw_cfg_s *cfg =
1321 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1322 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1326 PMD_INIT_FUNC_TRACE();
1328 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1330 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1332 for (i = 0; i < dev->data->nb_rx_queues; i++)
1333 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1335 if (mask & ETH_VLAN_EXTEND_MASK)
1342 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1345 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1348 PMD_INIT_FUNC_TRACE();
1350 switch (vlan_type) {
1351 case ETH_VLAN_TYPE_INNER:
1352 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1354 case ETH_VLAN_TYPE_OUTER:
1355 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1358 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1366 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1368 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1370 PMD_INIT_FUNC_TRACE();
1372 if (queue_id > dev->data->nb_rx_queues) {
1373 PMD_DRV_LOG(ERR, "Invalid queue id");
1377 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1381 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1382 struct ether_addr *mc_addr_set,
1383 uint32_t nb_mc_addr)
1385 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1388 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1391 /* Update whole uc filters table */
1392 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1393 u8 *mac_addr = NULL;
1396 if (i < nb_mc_addr) {
1397 mac_addr = mc_addr_set[i].addr_bytes;
1398 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1399 (mac_addr[4] << 8) | mac_addr[5];
1400 h = (mac_addr[0] << 8) | mac_addr[1];
1403 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1404 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1405 HW_ATL_B0_MAC_MIN + i);
1406 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1407 HW_ATL_B0_MAC_MIN + i);
1408 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1409 HW_ATL_B0_MAC_MIN + i);
1416 atl_reta_update(struct rte_eth_dev *dev,
1417 struct rte_eth_rss_reta_entry64 *reta_conf,
1421 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1422 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1424 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1425 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1426 dev->data->nb_rx_queues - 1);
1428 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1433 atl_reta_query(struct rte_eth_dev *dev,
1434 struct rte_eth_rss_reta_entry64 *reta_conf,
1438 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1440 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1441 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1442 reta_conf->mask = ~0U;
1447 atl_rss_hash_update(struct rte_eth_dev *dev,
1448 struct rte_eth_rss_conf *rss_conf)
1450 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1451 struct aq_hw_cfg_s *cfg =
1452 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1453 static u8 def_rss_key[40] = {
1454 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1455 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1456 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1457 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1458 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1461 cfg->is_rss = !!rss_conf->rss_hf;
1462 if (rss_conf->rss_key) {
1463 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1464 rss_conf->rss_key_len);
1465 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1467 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1468 sizeof(def_rss_key));
1469 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1472 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1473 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1478 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1479 struct rte_eth_rss_conf *rss_conf)
1481 struct aq_hw_cfg_s *cfg =
1482 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1484 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1485 if (rss_conf->rss_key) {
1486 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1487 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1488 rss_conf->rss_key_len);
1494 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1495 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1496 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1498 RTE_INIT(atl_init_log)
1500 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1501 if (atl_logtype_init >= 0)
1502 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1503 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1504 if (atl_logtype_driver >= 0)
1505 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);