1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 uint16_t queue_id, int on);
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 enum rte_vlan_type vlan_type, uint16_t tpid);
66 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
67 struct rte_eth_fc_conf *fc_conf);
68 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
69 struct rte_eth_fc_conf *fc_conf);
71 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
74 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
75 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
76 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
77 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
78 struct rte_intr_handle *handle);
79 static void atl_dev_interrupt_handler(void *param);
82 static int atl_add_mac_addr(struct rte_eth_dev *dev,
83 struct ether_addr *mac_addr,
84 uint32_t index, uint32_t pool);
85 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
86 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
87 struct ether_addr *mac_addr);
89 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
90 struct ether_addr *mc_addr_set,
94 static int atl_reta_update(struct rte_eth_dev *dev,
95 struct rte_eth_rss_reta_entry64 *reta_conf,
97 static int atl_reta_query(struct rte_eth_dev *dev,
98 struct rte_eth_rss_reta_entry64 *reta_conf,
100 static int atl_rss_hash_update(struct rte_eth_dev *dev,
101 struct rte_eth_rss_conf *rss_conf);
102 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
103 struct rte_eth_rss_conf *rss_conf);
106 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
107 struct rte_pci_device *pci_dev);
108 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
110 static void atl_dev_info_get(struct rte_eth_dev *dev,
111 struct rte_eth_dev_info *dev_info);
113 int atl_logtype_init;
114 int atl_logtype_driver;
117 * The set of PCI devices this driver supports
119 static const struct rte_pci_id pci_id_atl_map[] = {
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
142 { .vendor_id = 0, /* sentinel */ },
145 static struct rte_pci_driver rte_atl_pmd = {
146 .id_table = pci_id_atl_map,
147 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
148 RTE_PCI_DRV_IOVA_AS_VA,
149 .probe = eth_atl_pci_probe,
150 .remove = eth_atl_pci_remove,
153 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
154 | DEV_RX_OFFLOAD_IPV4_CKSUM \
155 | DEV_RX_OFFLOAD_UDP_CKSUM \
156 | DEV_RX_OFFLOAD_TCP_CKSUM \
157 | DEV_RX_OFFLOAD_JUMBO_FRAME)
159 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
160 | DEV_TX_OFFLOAD_IPV4_CKSUM \
161 | DEV_TX_OFFLOAD_UDP_CKSUM \
162 | DEV_TX_OFFLOAD_TCP_CKSUM \
163 | DEV_TX_OFFLOAD_TCP_TSO \
164 | DEV_TX_OFFLOAD_MULTI_SEGS)
166 static const struct rte_eth_desc_lim rx_desc_lim = {
167 .nb_max = ATL_MAX_RING_DESC,
168 .nb_min = ATL_MIN_RING_DESC,
169 .nb_align = ATL_RXD_ALIGN,
172 static const struct rte_eth_desc_lim tx_desc_lim = {
173 .nb_max = ATL_MAX_RING_DESC,
174 .nb_min = ATL_MIN_RING_DESC,
175 .nb_align = ATL_TXD_ALIGN,
176 .nb_seg_max = ATL_TX_MAX_SEG,
177 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
180 #define ATL_XSTATS_FIELD(name) { \
182 offsetof(struct aq_stats_s, name) \
185 struct atl_xstats_tbl_s {
190 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
191 ATL_XSTATS_FIELD(uprc),
192 ATL_XSTATS_FIELD(mprc),
193 ATL_XSTATS_FIELD(bprc),
194 ATL_XSTATS_FIELD(erpt),
195 ATL_XSTATS_FIELD(uptc),
196 ATL_XSTATS_FIELD(mptc),
197 ATL_XSTATS_FIELD(bptc),
198 ATL_XSTATS_FIELD(erpr),
199 ATL_XSTATS_FIELD(ubrc),
200 ATL_XSTATS_FIELD(ubtc),
201 ATL_XSTATS_FIELD(mbrc),
202 ATL_XSTATS_FIELD(mbtc),
203 ATL_XSTATS_FIELD(bbrc),
204 ATL_XSTATS_FIELD(bbtc),
207 static const struct eth_dev_ops atl_eth_dev_ops = {
208 .dev_configure = atl_dev_configure,
209 .dev_start = atl_dev_start,
210 .dev_stop = atl_dev_stop,
211 .dev_set_link_up = atl_dev_set_link_up,
212 .dev_set_link_down = atl_dev_set_link_down,
213 .dev_close = atl_dev_close,
214 .dev_reset = atl_dev_reset,
217 .promiscuous_enable = atl_dev_promiscuous_enable,
218 .promiscuous_disable = atl_dev_promiscuous_disable,
219 .allmulticast_enable = atl_dev_allmulticast_enable,
220 .allmulticast_disable = atl_dev_allmulticast_disable,
223 .link_update = atl_dev_link_update,
226 .stats_get = atl_dev_stats_get,
227 .xstats_get = atl_dev_xstats_get,
228 .xstats_get_names = atl_dev_xstats_get_names,
229 .stats_reset = atl_dev_stats_reset,
230 .xstats_reset = atl_dev_stats_reset,
232 .fw_version_get = atl_fw_version_get,
233 .dev_infos_get = atl_dev_info_get,
234 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
236 .mtu_set = atl_dev_mtu_set,
239 .vlan_filter_set = atl_vlan_filter_set,
240 .vlan_offload_set = atl_vlan_offload_set,
241 .vlan_tpid_set = atl_vlan_tpid_set,
242 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
245 .rx_queue_start = atl_rx_queue_start,
246 .rx_queue_stop = atl_rx_queue_stop,
247 .rx_queue_setup = atl_rx_queue_setup,
248 .rx_queue_release = atl_rx_queue_release,
250 .tx_queue_start = atl_tx_queue_start,
251 .tx_queue_stop = atl_tx_queue_stop,
252 .tx_queue_setup = atl_tx_queue_setup,
253 .tx_queue_release = atl_tx_queue_release,
255 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
256 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
258 .rx_queue_count = atl_rx_queue_count,
259 .rx_descriptor_status = atl_dev_rx_descriptor_status,
260 .tx_descriptor_status = atl_dev_tx_descriptor_status,
263 .flow_ctrl_get = atl_flow_ctrl_get,
264 .flow_ctrl_set = atl_flow_ctrl_set,
267 .mac_addr_add = atl_add_mac_addr,
268 .mac_addr_remove = atl_remove_mac_addr,
269 .mac_addr_set = atl_set_default_mac_addr,
270 .set_mc_addr_list = atl_dev_set_mc_addr_list,
271 .rxq_info_get = atl_rxq_info_get,
272 .txq_info_get = atl_txq_info_get,
274 .reta_update = atl_reta_update,
275 .reta_query = atl_reta_query,
276 .rss_hash_update = atl_rss_hash_update,
277 .rss_hash_conf_get = atl_rss_hash_conf_get,
280 static inline int32_t
281 atl_reset_hw(struct aq_hw_s *hw)
283 return hw_atl_b0_hw_reset(hw);
287 atl_enable_intr(struct rte_eth_dev *dev)
289 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
291 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
295 atl_disable_intr(struct aq_hw_s *hw)
297 PMD_INIT_FUNC_TRACE();
298 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
302 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
304 struct atl_adapter *adapter =
305 (struct atl_adapter *)eth_dev->data->dev_private;
306 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
307 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
308 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
311 PMD_INIT_FUNC_TRACE();
313 eth_dev->dev_ops = &atl_eth_dev_ops;
314 eth_dev->rx_pkt_burst = &atl_recv_pkts;
315 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
316 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
318 /* For secondary processes, the primary process has done all the work */
319 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
322 /* Vendor and Device ID need to be set before init of shared code */
323 hw->device_id = pci_dev->id.device_id;
324 hw->vendor_id = pci_dev->id.vendor_id;
325 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
327 /* Hardware configuration - hardcode */
328 adapter->hw_cfg.is_lro = false;
329 adapter->hw_cfg.wol = false;
330 adapter->hw_cfg.is_rss = false;
331 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
333 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
339 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
340 adapter->hw_cfg.aq_rss.indirection_table_size =
341 HW_ATL_B0_RSS_REDIRECTION_MAX;
343 hw->aq_nic_cfg = &adapter->hw_cfg;
345 /* disable interrupt */
346 atl_disable_intr(hw);
348 /* Allocate memory for storing MAC addresses */
349 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
350 if (eth_dev->data->mac_addrs == NULL) {
351 PMD_INIT_LOG(ERR, "MAC Malloc failed");
355 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
359 /* Copy the permanent MAC address */
360 if (hw->aq_fw_ops->get_mac_permanent(hw,
361 eth_dev->data->mac_addrs->addr_bytes) != 0)
364 /* Reset the hw statistics */
365 atl_dev_stats_reset(eth_dev);
367 rte_intr_callback_register(intr_handle,
368 atl_dev_interrupt_handler, eth_dev);
370 /* enable uio/vfio intr/eventfd mapping */
371 rte_intr_enable(intr_handle);
373 /* enable support intr */
374 atl_enable_intr(eth_dev);
380 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
382 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
383 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
386 PMD_INIT_FUNC_TRACE();
388 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
391 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
393 if (hw->adapter_stopped == 0)
394 atl_dev_close(eth_dev);
396 eth_dev->dev_ops = NULL;
397 eth_dev->rx_pkt_burst = NULL;
398 eth_dev->tx_pkt_burst = NULL;
400 /* disable uio intr before callback unregister */
401 rte_intr_disable(intr_handle);
402 rte_intr_callback_unregister(intr_handle,
403 atl_dev_interrupt_handler, eth_dev);
405 rte_free(eth_dev->data->mac_addrs);
406 eth_dev->data->mac_addrs = NULL;
412 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
413 struct rte_pci_device *pci_dev)
415 return rte_eth_dev_pci_generic_probe(pci_dev,
416 sizeof(struct atl_adapter), eth_atl_dev_init);
420 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
422 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
426 atl_dev_configure(struct rte_eth_dev *dev)
428 struct atl_interrupt *intr =
429 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
431 PMD_INIT_FUNC_TRACE();
433 /* set flag to update link status after init */
434 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
440 * Configure device link speed and setup link.
441 * It returns 0 on success.
444 atl_dev_start(struct rte_eth_dev *dev)
446 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
448 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
449 uint32_t intr_vector = 0;
450 uint32_t *link_speeds;
455 PMD_INIT_FUNC_TRACE();
457 /* set adapter started */
458 hw->adapter_stopped = 0;
460 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
462 "Invalid link_speeds for port %u, fix speed not supported",
467 /* disable uio/vfio intr/eventfd mapping */
468 rte_intr_disable(intr_handle);
470 /* reinitialize adapter
471 * this calls reset and start
473 status = atl_reset_hw(hw);
477 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
479 hw_atl_b0_hw_start(hw);
480 /* check and configure queue intr-vector mapping */
481 if ((rte_intr_cap_multiple(intr_handle) ||
482 !RTE_ETH_DEV_SRIOV(dev).active) &&
483 dev->data->dev_conf.intr_conf.rxq != 0) {
484 intr_vector = dev->data->nb_rx_queues;
485 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
486 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
487 ATL_MAX_INTR_QUEUE_NUM);
490 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
491 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
496 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
497 intr_handle->intr_vec = rte_zmalloc("intr_vec",
498 dev->data->nb_rx_queues * sizeof(int), 0);
499 if (intr_handle->intr_vec == NULL) {
500 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
501 " intr_vec", dev->data->nb_rx_queues);
506 /* initialize transmission unit */
509 /* This can fail when allocating mbufs for descriptor rings */
510 err = atl_rx_init(dev);
512 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
516 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
517 hw->fw_ver_actual >> 24,
518 (hw->fw_ver_actual >> 16) & 0xFF,
519 hw->fw_ver_actual & 0xFFFF);
520 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
522 err = atl_start_queues(dev);
524 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
528 err = hw->aq_fw_ops->update_link_status(hw);
533 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
535 link_speeds = &dev->data->dev_conf.link_speeds;
539 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
540 speed = hw->aq_nic_cfg->link_speed_msk;
542 if (*link_speeds & ETH_LINK_SPEED_10G)
543 speed |= AQ_NIC_RATE_10G;
544 if (*link_speeds & ETH_LINK_SPEED_5G)
545 speed |= AQ_NIC_RATE_5G;
546 if (*link_speeds & ETH_LINK_SPEED_1G)
547 speed |= AQ_NIC_RATE_1G;
548 if (*link_speeds & ETH_LINK_SPEED_2_5G)
549 speed |= AQ_NIC_RATE_2G5;
550 if (*link_speeds & ETH_LINK_SPEED_100M)
551 speed |= AQ_NIC_RATE_100M;
554 err = hw->aq_fw_ops->set_link_speed(hw, speed);
558 if (rte_intr_allow_others(intr_handle)) {
559 /* check if lsc interrupt is enabled */
560 if (dev->data->dev_conf.intr_conf.lsc != 0)
561 atl_dev_lsc_interrupt_setup(dev, true);
563 atl_dev_lsc_interrupt_setup(dev, false);
565 rte_intr_callback_unregister(intr_handle,
566 atl_dev_interrupt_handler, dev);
567 if (dev->data->dev_conf.intr_conf.lsc != 0)
568 PMD_INIT_LOG(INFO, "lsc won't enable because of"
569 " no intr multiplex");
572 /* check if rxq interrupt is enabled */
573 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
574 rte_intr_dp_is_en(intr_handle))
575 atl_dev_rxq_interrupt_setup(dev);
577 /* enable uio/vfio intr/eventfd mapping */
578 rte_intr_enable(intr_handle);
580 /* resume enabled intr since hw reset */
581 atl_enable_intr(dev);
586 atl_stop_queues(dev);
591 * Stop device: disable rx and tx functions to allow for reconfiguring.
594 atl_dev_stop(struct rte_eth_dev *dev)
596 struct rte_eth_link link;
598 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
600 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
602 PMD_INIT_FUNC_TRACE();
604 /* disable interrupts */
605 atl_disable_intr(hw);
609 hw->adapter_stopped = 1;
611 atl_stop_queues(dev);
613 /* Clear stored conf */
614 dev->data->scattered_rx = 0;
617 /* Clear recorded link status */
618 memset(&link, 0, sizeof(link));
619 rte_eth_linkstatus_set(dev, &link);
621 if (!rte_intr_allow_others(intr_handle))
622 /* resume to the default handler */
623 rte_intr_callback_register(intr_handle,
624 atl_dev_interrupt_handler,
627 /* Clean datapath event and queue/vec mapping */
628 rte_intr_efd_disable(intr_handle);
629 if (intr_handle->intr_vec != NULL) {
630 rte_free(intr_handle->intr_vec);
631 intr_handle->intr_vec = NULL;
636 * Set device link up: enable tx.
639 atl_dev_set_link_up(struct rte_eth_dev *dev)
641 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
643 return hw->aq_fw_ops->set_link_speed(hw,
644 hw->aq_nic_cfg->link_speed_msk);
648 * Set device link down: disable tx.
651 atl_dev_set_link_down(struct rte_eth_dev *dev)
653 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655 return hw->aq_fw_ops->set_link_speed(hw, 0);
659 * Reset and stop device.
662 atl_dev_close(struct rte_eth_dev *dev)
664 PMD_INIT_FUNC_TRACE();
668 atl_free_queues(dev);
672 atl_dev_reset(struct rte_eth_dev *dev)
676 ret = eth_atl_dev_uninit(dev);
680 ret = eth_atl_dev_init(dev);
687 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
689 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
690 struct aq_hw_s *hw = &adapter->hw;
691 struct atl_sw_stats *swstats = &adapter->sw_stats;
694 hw->aq_fw_ops->update_stats(hw);
696 /* Fill out the rte_eth_stats statistics structure */
697 stats->ipackets = hw->curr_stats.dma_pkt_rc;
698 stats->ibytes = hw->curr_stats.dma_oct_rc;
699 stats->imissed = hw->curr_stats.dpc;
700 stats->ierrors = hw->curr_stats.erpt;
702 stats->opackets = hw->curr_stats.dma_pkt_tc;
703 stats->obytes = hw->curr_stats.dma_oct_tc;
706 stats->rx_nombuf = swstats->rx_nombuf;
708 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
709 stats->q_ipackets[i] = swstats->q_ipackets[i];
710 stats->q_opackets[i] = swstats->q_opackets[i];
711 stats->q_ibytes[i] = swstats->q_ibytes[i];
712 stats->q_obytes[i] = swstats->q_obytes[i];
713 stats->q_errors[i] = swstats->q_errors[i];
719 atl_dev_stats_reset(struct rte_eth_dev *dev)
721 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
722 struct aq_hw_s *hw = &adapter->hw;
724 hw->aq_fw_ops->update_stats(hw);
726 /* Reset software totals */
727 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
729 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
733 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
734 struct rte_eth_xstat_name *xstats_names,
740 return RTE_DIM(atl_xstats_tbl);
742 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
743 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
744 atl_xstats_tbl[i].name);
750 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
753 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
754 struct aq_hw_s *hw = &adapter->hw;
760 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
762 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
763 atl_xstats_tbl[i].offset);
770 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
772 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
774 unsigned int ret = 0;
776 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
780 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
781 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
783 ret += 1; /* add string null-terminator */
792 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
794 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
796 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
797 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
799 dev_info->min_rx_bufsize = 1024;
800 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
801 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
802 dev_info->max_vfs = pci_dev->max_vfs;
804 dev_info->max_hash_mac_addrs = 0;
805 dev_info->max_vmdq_pools = 0;
806 dev_info->vmdq_queue_num = 0;
808 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
810 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
813 dev_info->default_rxconf = (struct rte_eth_rxconf) {
814 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
817 dev_info->default_txconf = (struct rte_eth_txconf) {
818 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
821 dev_info->rx_desc_lim = rx_desc_lim;
822 dev_info->tx_desc_lim = tx_desc_lim;
824 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
825 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
826 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
828 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
829 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
830 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
831 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
834 static const uint32_t *
835 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
837 static const uint32_t ptypes[] = {
839 RTE_PTYPE_L2_ETHER_ARP,
840 RTE_PTYPE_L2_ETHER_VLAN,
850 if (dev->rx_pkt_burst == atl_recv_pkts)
856 /* return 0 means link status changed, -1 means not changed */
858 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
860 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
861 struct atl_interrupt *intr =
862 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
863 struct rte_eth_link link, old;
866 link.link_status = ETH_LINK_DOWN;
868 link.link_duplex = ETH_LINK_FULL_DUPLEX;
869 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
870 memset(&old, 0, sizeof(old));
872 /* load old link status */
873 rte_eth_linkstatus_get(dev, &old);
875 /* read current link status */
876 err = hw->aq_fw_ops->update_link_status(hw);
881 if (hw->aq_link_status.mbps == 0) {
882 /* write default (down) link status */
883 rte_eth_linkstatus_set(dev, &link);
884 if (link.link_status == old.link_status)
889 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
891 link.link_status = ETH_LINK_UP;
892 link.link_duplex = ETH_LINK_FULL_DUPLEX;
893 link.link_speed = hw->aq_link_status.mbps;
895 rte_eth_linkstatus_set(dev, &link);
897 if (link.link_status == old.link_status)
904 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
906 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
908 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
912 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
914 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
916 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
920 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
922 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
928 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
930 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
932 if (dev->data->promiscuous == 1)
933 return; /* must remain in all_multicast mode */
935 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
939 * It clears the interrupt causes and enables the interrupt.
940 * It will be called once only during nic initialized.
943 * Pointer to struct rte_eth_dev.
948 * - On success, zero.
949 * - On failure, a negative value.
953 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
955 atl_dev_link_status_print(dev);
960 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
967 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
969 struct atl_interrupt *intr =
970 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
971 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
974 hw_atl_b0_hw_irq_read(hw, &cause);
976 atl_disable_intr(hw);
977 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
978 ATL_FLAG_NEED_LINK_UPDATE : 0;
984 * It gets and then prints the link status.
987 * Pointer to struct rte_eth_dev.
990 * - On success, zero.
991 * - On failure, a negative value.
994 atl_dev_link_status_print(struct rte_eth_dev *dev)
996 struct rte_eth_link link;
998 memset(&link, 0, sizeof(link));
999 rte_eth_linkstatus_get(dev, &link);
1000 if (link.link_status) {
1001 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1002 (int)(dev->data->port_id),
1003 (unsigned int)link.link_speed,
1004 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1005 "full-duplex" : "half-duplex");
1007 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1008 (int)(dev->data->port_id));
1014 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1016 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1017 pci_dev->addr.domain,
1019 pci_dev->addr.devid,
1020 pci_dev->addr.function);
1024 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1028 * It executes link_update after knowing an interrupt occurred.
1031 * Pointer to struct rte_eth_dev.
1034 * - On success, zero.
1035 * - On failure, a negative value.
1038 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1039 struct rte_intr_handle *intr_handle)
1041 struct atl_interrupt *intr =
1042 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1044 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1045 atl_dev_link_update(dev, 0);
1046 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1047 atl_dev_link_status_print(dev);
1048 _rte_eth_dev_callback_process(dev,
1049 RTE_ETH_EVENT_INTR_LSC, NULL);
1052 atl_enable_intr(dev);
1053 rte_intr_enable(intr_handle);
1059 * Interrupt handler triggered by NIC for handling
1060 * specific interrupt.
1063 * Pointer to interrupt handle.
1065 * The address of parameter (struct rte_eth_dev *) regsitered before.
1071 atl_dev_interrupt_handler(void *param)
1073 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1075 atl_dev_interrupt_get_status(dev);
1076 atl_dev_interrupt_action(dev, dev->intr_handle);
1081 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1083 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1085 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1086 fc_conf->mode = RTE_FC_NONE;
1087 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1088 fc_conf->mode = RTE_FC_FULL;
1089 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1090 fc_conf->mode = RTE_FC_RX_PAUSE;
1091 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1092 fc_conf->mode = RTE_FC_TX_PAUSE;
1098 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1100 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1101 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1104 if (hw->aq_fw_ops->set_flow_control == NULL)
1107 if (fc_conf->mode == RTE_FC_NONE)
1108 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1109 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1110 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1111 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1112 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1113 else if (fc_conf->mode == RTE_FC_FULL)
1114 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1116 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1117 return hw->aq_fw_ops->set_flow_control(hw);
1123 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1124 u8 *mac_addr, bool enable)
1126 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1127 unsigned int h = 0U;
1128 unsigned int l = 0U;
1132 h = (mac_addr[0] << 8) | (mac_addr[1]);
1133 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1134 (mac_addr[4] << 8) | mac_addr[5];
1137 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1138 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1139 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1142 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1144 err = aq_hw_err_from_flags(hw);
1150 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1151 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1153 if (is_zero_ether_addr(mac_addr)) {
1154 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1158 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1162 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1164 atl_update_mac_addr(dev, index, NULL, false);
1168 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1170 atl_remove_mac_addr(dev, 0);
1171 atl_add_mac_addr(dev, addr, 0, 0);
1176 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1178 struct rte_eth_dev_info dev_info;
1179 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1181 atl_dev_info_get(dev, &dev_info);
1183 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1186 /* update max frame size */
1187 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1193 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1195 struct aq_hw_cfg_s *cfg =
1196 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1197 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1201 PMD_INIT_FUNC_TRACE();
1203 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1204 if (cfg->vlan_filter[i] == vlan_id) {
1206 /* Disable VLAN filter. */
1207 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1209 /* Clear VLAN filter entry */
1210 cfg->vlan_filter[i] = 0;
1216 /* VLAN_ID was not found. So, nothing to delete. */
1217 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1220 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1221 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1224 /* Try to found free VLAN filter to add new VLAN_ID */
1225 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1226 if (cfg->vlan_filter[i] == 0)
1230 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1231 /* We have no free VLAN filter to add new VLAN_ID*/
1236 cfg->vlan_filter[i] = vlan_id;
1237 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1238 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1239 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1242 /* Enable VLAN promisc mode if vlan_filter empty */
1243 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1244 if (cfg->vlan_filter[i] != 0)
1248 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1254 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1256 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1257 struct aq_hw_cfg_s *cfg =
1258 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1261 PMD_INIT_FUNC_TRACE();
1263 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1264 if (cfg->vlan_filter[i])
1265 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1271 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1273 struct aq_hw_cfg_s *cfg =
1274 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1275 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1279 PMD_INIT_FUNC_TRACE();
1281 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1283 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1285 for (i = 0; i < dev->data->nb_rx_queues; i++)
1286 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1288 if (mask & ETH_VLAN_EXTEND_MASK)
1295 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1298 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1301 PMD_INIT_FUNC_TRACE();
1303 switch (vlan_type) {
1304 case ETH_VLAN_TYPE_INNER:
1305 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1307 case ETH_VLAN_TYPE_OUTER:
1308 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1311 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1319 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1321 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1323 PMD_INIT_FUNC_TRACE();
1325 if (queue_id > dev->data->nb_rx_queues) {
1326 PMD_DRV_LOG(ERR, "Invalid queue id");
1330 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1334 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1335 struct ether_addr *mc_addr_set,
1336 uint32_t nb_mc_addr)
1338 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1341 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1344 /* Update whole uc filters table */
1345 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1346 u8 *mac_addr = NULL;
1349 if (i < nb_mc_addr) {
1350 mac_addr = mc_addr_set[i].addr_bytes;
1351 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1352 (mac_addr[4] << 8) | mac_addr[5];
1353 h = (mac_addr[0] << 8) | mac_addr[1];
1356 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1357 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1358 HW_ATL_B0_MAC_MIN + i);
1359 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1360 HW_ATL_B0_MAC_MIN + i);
1361 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1362 HW_ATL_B0_MAC_MIN + i);
1369 atl_reta_update(struct rte_eth_dev *dev,
1370 struct rte_eth_rss_reta_entry64 *reta_conf,
1374 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1375 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1377 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1378 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1379 dev->data->nb_rx_queues - 1);
1381 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1386 atl_reta_query(struct rte_eth_dev *dev,
1387 struct rte_eth_rss_reta_entry64 *reta_conf,
1391 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1393 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1394 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1395 reta_conf->mask = ~0U;
1400 atl_rss_hash_update(struct rte_eth_dev *dev,
1401 struct rte_eth_rss_conf *rss_conf)
1403 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1404 struct aq_hw_cfg_s *cfg =
1405 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1406 static u8 def_rss_key[40] = {
1407 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1408 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1409 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1410 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1411 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1414 cfg->is_rss = !!rss_conf->rss_hf;
1415 if (rss_conf->rss_key) {
1416 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1417 rss_conf->rss_key_len);
1418 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1420 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1421 sizeof(def_rss_key));
1422 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1425 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1426 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1431 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1432 struct rte_eth_rss_conf *rss_conf)
1434 struct aq_hw_cfg_s *cfg =
1435 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1437 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1438 if (rss_conf->rss_key) {
1439 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1440 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1441 rss_conf->rss_key_len);
1447 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1448 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1449 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1451 RTE_INIT(atl_init_log)
1453 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1454 if (atl_logtype_init >= 0)
1455 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1456 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1457 if (atl_logtype_driver >= 0)
1458 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);