X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fatlantic%2Fatl_ethdev.c;h=f7bfac796c074eac9521731a3b8c32f6c138c467;hb=daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f;hp=8327863cd9b6c53b2d70ed6e61cfd5a9dd58cf25;hpb=6723c0fc7207ca4416822b170b1485a78aa47c7c;p=dpdk.git diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c index 8327863cd9..f7bfac796c 100644 --- a/drivers/net/atlantic/atl_ethdev.c +++ b/drivers/net/atlantic/atl_ethdev.c @@ -3,7 +3,8 @@ */ #include -#include +#include +#include #include "atl_ethdev.h" #include "atl_common.h" @@ -14,19 +15,17 @@ #include "hw_atl/hw_atl_b0_internal.h" static int eth_atl_dev_init(struct rte_eth_dev *eth_dev); -static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev); - static int atl_dev_configure(struct rte_eth_dev *dev); static int atl_dev_start(struct rte_eth_dev *dev); -static void atl_dev_stop(struct rte_eth_dev *dev); +static int atl_dev_stop(struct rte_eth_dev *dev); static int atl_dev_set_link_up(struct rte_eth_dev *dev); static int atl_dev_set_link_down(struct rte_eth_dev *dev); -static void atl_dev_close(struct rte_eth_dev *dev); +static int atl_dev_close(struct rte_eth_dev *dev); static int atl_dev_reset(struct rte_eth_dev *dev); -static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev); static int atl_dev_link_update(struct rte_eth_dev *dev, int wait); static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, @@ -39,14 +38,11 @@ static int atl_dev_stats_get(struct rte_eth_dev *dev, static int atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, unsigned int n); -static void atl_dev_stats_reset(struct rte_eth_dev *dev); +static int atl_dev_stats_reset(struct rte_eth_dev *dev); static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); -static void atl_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); - static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev); static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -92,14 +88,14 @@ static void atl_dev_interrupt_handler(void *param); static int atl_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static int atl_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); /* RSS */ @@ -119,12 +115,9 @@ static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev); static int eth_atl_pci_remove(struct rte_pci_device *pci_dev); -static void atl_dev_info_get(struct rte_eth_dev *dev, +static int atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); -int atl_logtype_init; -int atl_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -156,25 +149,27 @@ static const struct rte_pci_id pci_id_atl_map[] = { static struct rte_pci_driver rte_atl_pmd = { .id_table = pci_id_atl_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_atl_pci_probe, .remove = eth_atl_pci_remove, }; -#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \ - | DEV_RX_OFFLOAD_IPV4_CKSUM \ - | DEV_RX_OFFLOAD_UDP_CKSUM \ - | DEV_RX_OFFLOAD_TCP_CKSUM \ - | DEV_RX_OFFLOAD_JUMBO_FRAME \ - | DEV_RX_OFFLOAD_VLAN_FILTER) +#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \ + | RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \ + | RTE_ETH_RX_OFFLOAD_UDP_CKSUM \ + | RTE_ETH_RX_OFFLOAD_TCP_CKSUM \ + | RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \ + | RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + +#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \ + | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \ + | RTE_ETH_TX_OFFLOAD_UDP_CKSUM \ + | RTE_ETH_TX_OFFLOAD_TCP_CKSUM \ + | RTE_ETH_TX_OFFLOAD_TCP_TSO \ + | RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \ + | RTE_ETH_TX_OFFLOAD_MULTI_SEGS) -#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \ - | DEV_TX_OFFLOAD_IPV4_CKSUM \ - | DEV_TX_OFFLOAD_UDP_CKSUM \ - | DEV_TX_OFFLOAD_TCP_CKSUM \ - | DEV_TX_OFFLOAD_TCP_TSO \ - | DEV_TX_OFFLOAD_MULTI_SEGS) +#define SFP_EEPROM_SIZE 0x100 static const struct rte_eth_desc_lim rx_desc_lim = { .nb_max = ATL_MAX_RING_DESC, @@ -190,14 +185,27 @@ static const struct rte_eth_desc_lim tx_desc_lim = { .nb_mtu_seg_max = ATL_TX_MAX_SEG, }; +enum atl_xstats_type { + XSTATS_TYPE_MSM = 0, + XSTATS_TYPE_MACSEC, +}; + #define ATL_XSTATS_FIELD(name) { \ #name, \ - offsetof(struct aq_stats_s, name) \ + offsetof(struct aq_stats_s, name), \ + XSTATS_TYPE_MSM \ +} + +#define ATL_MACSEC_XSTATS_FIELD(name) { \ + #name, \ + offsetof(struct macsec_stats, name), \ + XSTATS_TYPE_MACSEC \ } struct atl_xstats_tbl_s { const char *name; unsigned int offset; + enum atl_xstats_type type; }; static struct atl_xstats_tbl_s atl_xstats_tbl[] = { @@ -215,6 +223,38 @@ static struct atl_xstats_tbl_s atl_xstats_tbl[] = { ATL_XSTATS_FIELD(mbtc), ATL_XSTATS_FIELD(bbrc), ATL_XSTATS_FIELD(bbtc), + /* Ingress Common Counters */ + ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts), + ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts), + ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts), + ATL_MACSEC_XSTATS_FIELD(in_notag_pkts), + ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts), + ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts), + ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts), + ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts), + /* Ingress SA Counters */ + ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts), + ATL_MACSEC_XSTATS_FIELD(in_not_using_sa), + ATL_MACSEC_XSTATS_FIELD(in_unused_sa), + ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts), + ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts), + ATL_MACSEC_XSTATS_FIELD(in_ok_pkts), + ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts), + ATL_MACSEC_XSTATS_FIELD(in_validated_octets), + ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets), + /* Egress Common Counters */ + ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts), + ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts), + ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts), + ATL_MACSEC_XSTATS_FIELD(out_too_long), + /* Egress SC Counters */ + ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts), + /* Egress SA Counters */ + ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect), + ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts), }; static const struct eth_dev_ops atl_eth_dev_ops = { @@ -270,10 +310,6 @@ static const struct eth_dev_ops atl_eth_dev_ops = { .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable, .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable, - .rx_queue_count = atl_rx_queue_count, - .rx_descriptor_status = atl_dev_rx_descriptor_status, - .tx_descriptor_status = atl_dev_tx_descriptor_status, - /* EEPROM */ .get_eeprom_length = atl_dev_get_eeprom_length, .get_eeprom = atl_dev_get_eeprom, @@ -321,8 +357,7 @@ atl_disable_intr(struct aq_hw_s *hw) static int eth_atl_dev_init(struct rte_eth_dev *eth_dev) { - struct atl_adapter *adapter = - (struct atl_adapter *)eth_dev->data->dev_private; + struct atl_adapter *adapter = eth_dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -331,6 +366,11 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &atl_eth_dev_ops; + + eth_dev->rx_queue_count = atl_rx_queue_count; + eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status; + eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status; + eth_dev->rx_pkt_burst = &atl_recv_pkts; eth_dev->tx_pkt_burst = &atl_xmit_pkts; eth_dev->tx_pkt_prepare = &atl_prep_pkts; @@ -339,6 +379,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -362,11 +404,14 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev) hw->aq_nic_cfg = &adapter->hw_cfg; + pthread_mutex_init(&hw->mbox_mutex, NULL); + /* disable interrupt */ atl_disable_intr(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("atlantic", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "MAC Malloc failed"); return -ENOMEM; @@ -396,38 +441,6 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev) return err; } -static int -eth_atl_dev_uninit(struct rte_eth_dev *eth_dev) -{ - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct aq_hw_s *hw; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - - if (hw->adapter_stopped == 0) - atl_dev_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - atl_dev_interrupt_handler, eth_dev); - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - - return 0; -} - static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -439,7 +452,7 @@ eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_atl_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit); + return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close); } static int @@ -475,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev) /* set adapter started */ hw->adapter_stopped = 0; - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) { PMD_INIT_LOG(ERR, "Invalid link_speeds for port %u, fix speed not supported", dev->data->port_id); @@ -552,9 +565,6 @@ atl_dev_start(struct rte_eth_dev *dev) dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0; - if (err) - goto error; - if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) @@ -590,7 +600,7 @@ error: /* * Stop device: disable rx and tx functions to allow for reconfiguring. */ -static void +static int atl_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; @@ -600,6 +610,7 @@ atl_dev_stop(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; /* disable interrupts */ atl_disable_intr(hw); @@ -630,6 +641,8 @@ atl_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + return 0; } /* @@ -642,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev) uint32_t link_speeds = dev->data->dev_conf.link_speeds; uint32_t speed_mask = 0; - if (link_speeds == ETH_LINK_SPEED_AUTONEG) { + if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { speed_mask = hw->aq_nic_cfg->link_speed_msk; } else { - if (link_speeds & ETH_LINK_SPEED_10G) + if (link_speeds & RTE_ETH_LINK_SPEED_10G) speed_mask |= AQ_NIC_RATE_10G; - if (link_speeds & ETH_LINK_SPEED_5G) + if (link_speeds & RTE_ETH_LINK_SPEED_5G) speed_mask |= AQ_NIC_RATE_5G; - if (link_speeds & ETH_LINK_SPEED_1G) + if (link_speeds & RTE_ETH_LINK_SPEED_1G) speed_mask |= AQ_NIC_RATE_1G; - if (link_speeds & ETH_LINK_SPEED_2_5G) + if (link_speeds & RTE_ETH_LINK_SPEED_2_5G) speed_mask |= AQ_NIC_RATE_2G5; - if (link_speeds & ETH_LINK_SPEED_100M) + if (link_speeds & RTE_ETH_LINK_SPEED_100M) speed_mask |= AQ_NIC_RATE_100M; } @@ -674,14 +687,33 @@ atl_dev_set_link_down(struct rte_eth_dev *dev) /* * Reset and stop device. */ -static void +static int atl_dev_close(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct aq_hw_s *hw; + int ret; + PMD_INIT_FUNC_TRACE(); - atl_dev_stop(dev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ret = atl_dev_stop(dev); atl_free_queues(dev); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + atl_dev_interrupt_handler, dev); + + pthread_mutex_destroy(&hw->mbox_mutex); + + return ret; } static int @@ -689,7 +721,7 @@ atl_dev_reset(struct rte_eth_dev *dev) { int ret; - ret = eth_atl_dev_uninit(dev); + ret = atl_dev_close(dev); if (ret) return ret; @@ -698,6 +730,207 @@ atl_dev_reset(struct rte_eth_dev *dev) return ret; } +static int +atl_dev_configure_macsec(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + struct aq_macsec_config *aqcfg = &cf->aq_macsec; + struct macsec_msg_fw_request msg_macsec; + struct macsec_msg_fw_response response; + + if (!aqcfg->common.macsec_enabled || + hw->aq_fw_ops->send_macsec_req == NULL) + return 0; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Creating set of sc/sa structures from parameters provided by DPDK */ + + /* Configure macsec */ + msg_macsec.msg_type = macsec_cfg_msg; + msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled; + msg_macsec.cfg.interrupts_enabled = 1; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure TX SC */ + + msg_macsec.msg_type = macsec_add_tx_sc_msg; + msg_macsec.txsc.index = 0; /* TXSC always one (??) */ + msg_macsec.txsc.protect = aqcfg->common.encryption_enabled; + + /* MAC addr for TX */ + msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]); + msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]); + msg_macsec.txsc.sa_mask = 0x3f; + + msg_macsec.txsc.da_mask = 0; + msg_macsec.txsc.tci = 0x0B; + msg_macsec.txsc.curr_an = 0; /* SA index which currently used */ + + /* + * Creating SCI (Secure Channel Identifier). + * SCI constructed from Source MAC and Port identifier + */ + uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) | + (msg_macsec.txsc.mac_sa[0] >> 16); + uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16); + + uint32_t port_identifier = 1; + + msg_macsec.txsc.sci[1] = sci_hi_part; + msg_macsec.txsc.sci[0] = sci_low_part | port_identifier; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SC */ + + msg_macsec.msg_type = macsec_add_rx_sc_msg; + msg_macsec.rxsc.index = aqcfg->rxsc.pi; + msg_macsec.rxsc.replay_protect = + aqcfg->common.replay_protection_enabled; + msg_macsec.rxsc.anti_replay_window = 0; + + /* MAC addr for RX */ + msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]); + msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]); + msg_macsec.rxsc.da_mask = 0;//0x3f; + + msg_macsec.rxsc.sa_mask = 0; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SC */ + + msg_macsec.msg_type = macsec_add_tx_sa_msg; + msg_macsec.txsa.index = aqcfg->txsa.idx; + msg_macsec.txsa.next_pn = aqcfg->txsa.pn; + + msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]); + msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]); + msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]); + msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]); + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SA */ + + msg_macsec.msg_type = macsec_add_rx_sa_msg; + msg_macsec.rxsa.index = aqcfg->rxsa.idx; + msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn; + + msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]); + msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]); + msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]); + msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]); + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + return 0; +} + +int atl_macsec_enable(struct rte_eth_dev *dev, + uint8_t encr, uint8_t repl_prot) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.common.macsec_enabled = 1; + cfg->aq_macsec.common.encryption_enabled = encr; + cfg->aq_macsec.common.replay_protection_enabled = repl_prot; + + return 0; +} + +int atl_macsec_disable(struct rte_eth_dev *dev) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.common.macsec_enabled = 0; + + return 0; +} + +int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac)); + memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, + RTE_ETHER_ADDR_LEN); + + return 0; +} + +int atl_macsec_config_rxsc(struct rte_eth_dev *dev, + uint8_t *mac, uint16_t pi) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac)); + memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, + RTE_ETHER_ADDR_LEN); + cfg->aq_macsec.rxsc.pi = pi; + + return 0; +} + +int atl_macsec_select_txsa(struct rte_eth_dev *dev, + uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.txsa.idx = idx; + cfg->aq_macsec.txsa.pn = pn; + cfg->aq_macsec.txsa.an = an; + + memcpy(&cfg->aq_macsec.txsa.key, key, 16); + return 0; +} + +int atl_macsec_select_rxsa(struct rte_eth_dev *dev, + uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.rxsa.idx = idx; + cfg->aq_macsec.rxsa.pn = pn; + cfg->aq_macsec.rxsa.an = an; + + memcpy(&cfg->aq_macsec.rxsa.key, key, 16); + return 0; +} static int atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -731,7 +964,7 @@ atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return 0; } -static void +static int atl_dev_stats_reset(struct rte_eth_dev *dev) { struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); @@ -743,6 +976,28 @@ atl_dev_stats_reset(struct rte_eth_dev *dev) memset(&hw->curr_stats, 0, sizeof(hw->curr_stats)); memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats)); + + return 0; +} + +static int +atl_dev_xstats_get_count(struct rte_eth_dev *dev) +{ + struct atl_adapter *adapter = + (struct atl_adapter *)dev->data->dev_private; + + struct aq_hw_s *hw = &adapter->hw; + unsigned int i, count = 0; + + for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) { + if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC && + ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)) + continue; + + count++; + } + + return count; } static int @@ -751,32 +1006,62 @@ atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, unsigned int size) { unsigned int i; + unsigned int count = atl_dev_xstats_get_count(dev); - if (!xstats_names) - return RTE_DIM(atl_xstats_tbl); - - for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++) - strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name, - RTE_ETH_XSTATS_NAME_SIZE); + if (xstats_names) { + for (i = 0; i < size && i < count; i++) { + snprintf(xstats_names[i].name, + RTE_ETH_XSTATS_NAME_SIZE, "%s", + atl_xstats_tbl[i].name); + } + } - return i; + return count; } static int atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, unsigned int n) { - struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); + struct atl_adapter *adapter = dev->data->dev_private; struct aq_hw_s *hw = &adapter->hw; + struct get_stats req = { 0 }; + struct macsec_msg_fw_request msg = { 0 }; + struct macsec_msg_fw_response resp = { 0 }; + int err = -1; unsigned int i; + unsigned int count = atl_dev_xstats_get_count(dev); if (!stats) - return 0; + return count; + + if (hw->aq_fw_ops->send_macsec_req != NULL) { + req.ingress_sa_index = 0xff; + req.egress_sc_index = 0xff; + req.egress_sa_index = 0xff; - for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) { + msg.msg_type = macsec_get_stats_msg; + msg.stats = req; + + err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); + } + + for (i = 0; i < n && i < count; i++) { stats[i].id = i; - stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats + + + switch (atl_xstats_tbl[i].type) { + case XSTATS_TYPE_MSM: + stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats + + atl_xstats_tbl[i].offset); + break; + case XSTATS_TYPE_MACSEC: + if (!err) { + stats[i].value = + *(u64 *)((uint8_t *)&resp.stats + atl_xstats_tbl[i].offset); + } + break; + } } return i; @@ -787,7 +1072,7 @@ atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fw_ver = 0; - unsigned int ret = 0; + int ret = 0; ret = hw_atl_utils_get_fw_version(hw, &fw_ver); if (ret) @@ -795,16 +1080,17 @@ atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24, (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU); + if (ret < 0) + return -EINVAL; ret += 1; /* add string null-terminator */ - - if (fw_size < ret) + if (fw_size < (size_t)ret) return ret; return 0; } -static void +static int atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -841,10 +1127,12 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX; dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL; - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; - dev_info->speed_capa |= ETH_LINK_SPEED_100M; - dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; - dev_info->speed_capa |= ETH_LINK_SPEED_5G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G; + + return 0; } static const uint32_t * @@ -869,20 +1157,28 @@ atl_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static void +atl_dev_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + atl_dev_configure_macsec(dev); +} + + /* return 0 means link status changed, -1 means not changed */ static int atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct atl_interrupt *intr = - ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct rte_eth_link link, old; + u32 fc = AQ_NIC_FC_OFF; int err = 0; - link.link_status = ETH_LINK_DOWN; + link.link_status = RTE_ETH_LINK_DOWN; link.link_speed = 0; - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; memset(&old, 0, sizeof(old)); /* load old link status */ @@ -902,10 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused) return 0; } - intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG; - - link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_status = RTE_ETH_LINK_UP; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; link.link_speed = hw->aq_link_status.mbps; rte_eth_linkstatus_set(dev, &link); @@ -913,42 +1207,63 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused) if (link.link_status == old.link_status) return -1; + /* Driver has to update flow control settings on RX block + * on any link event. + * We should query FW whether it negotiated FC. + */ + if (hw->aq_fw_ops->get_flow_control) { + hw->aq_fw_ops->get_flow_control(hw, &fc); + hw_atl_b0_set_fc(hw, fc, 0U); + } + + if (rte_eal_alarm_set(1000 * 1000, + atl_dev_delayed_handler, (void *)dev) < 0) + PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail"); + return 0; } -static void +static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw_atl_rpfl2promiscuous_mode_en_set(hw, true); + + return 0; } -static void +static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw_atl_rpfl2promiscuous_mode_en_set(hw, false); + + return 0; } -static void +static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw_atl_rpfl2_accept_all_mc_packets_set(hw, true); + + return 0; } -static void +static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ hw_atl_rpfl2_accept_all_mc_packets_set(hw, false); + + return 0; } /** @@ -990,8 +1305,9 @@ atl_dev_interrupt_get_status(struct rte_eth_dev *dev) hw_atl_b0_hw_irq_read(hw, &cause); atl_disable_intr(hw); - intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ? - ATL_FLAG_NEED_LINK_UPDATE : 0; + + if (cause & BIT(ATL_IRQ_CAUSE_LINK)) + intr->flags |= ATL_FLAG_NEED_LINK_UPDATE; return 0; } @@ -1017,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), (unsigned int)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_DRV_LOG(INFO, " Port %d: Link Down", @@ -1056,17 +1372,50 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev, { struct atl_interrupt *intr = ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct atl_adapter *adapter = dev->data->dev_private; + struct aq_hw_s *hw = &adapter->hw; + + if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE)) + goto done; - if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) { - atl_dev_link_update(dev, 0); - intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE; + intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE; + + /* Notify userapp if link status changed */ + if (!atl_dev_link_update(dev, 0)) { atl_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } else { + if (hw->aq_fw_ops->send_macsec_req == NULL) + goto done; + + /* Check macsec Keys expired */ + struct get_stats req = { 0 }; + struct macsec_msg_fw_request msg = { 0 }; + struct macsec_msg_fw_response resp = { 0 }; + + req.ingress_sa_index = 0x0; + req.egress_sc_index = 0x0; + req.egress_sa_index = 0x0; + msg.msg_type = macsec_get_stats_msg; + msg.stats = req; + + int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); + if (err) { + PMD_DRV_LOG(ERR, "send_macsec_req fail"); + goto done; + } + if (resp.stats.egress_threshold_expired || + resp.stats.ingress_threshold_expired || + resp.stats.egress_expired || + resp.stats.ingress_expired) { + PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC"); + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_MACSEC, NULL); + } } - +done: atl_enable_intr(dev); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -1092,7 +1441,6 @@ atl_dev_interrupt_handler(void *param) atl_dev_interrupt_action(dev, dev->intr_handle); } -#define SFP_EEPROM_SIZE 0xff static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused) @@ -1113,6 +1461,9 @@ int atl_dev_get_eeprom(struct rte_eth_dev *dev, eeprom->data == NULL) return -EINVAL; + if (eeprom->magic > 0x7F) + return -EINVAL; + if (eeprom->magic) dev_addr = eeprom->magic; @@ -1129,14 +1480,18 @@ int atl_dev_set_eeprom(struct rte_eth_dev *dev, if (hw->aq_fw_ops->set_eeprom == NULL) return -ENOTSUP; - if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL) + if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE || + eeprom->data == NULL) + return -EINVAL; + + if (eeprom->magic > 0x7F) return -EINVAL; if (eeprom->magic) dev_addr = eeprom->magic; - return hw->aq_fw_ops->set_eeprom(hw, dev_addr, - eeprom->data, eeprom->length); + return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data, + eeprom->length, eeprom->offset); } static int @@ -1169,15 +1524,21 @@ static int atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 fc = AQ_NIC_FC_OFF; + + if (hw->aq_fw_ops->get_flow_control == NULL) + return -ENOTSUP; + + hw->aq_fw_ops->get_flow_control(hw, &fc); - if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF) - fc_conf->mode = RTE_FC_NONE; - else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX)) - fc_conf->mode = RTE_FC_FULL; - else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) - fc_conf->mode = RTE_FC_RX_PAUSE; - else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) - fc_conf->mode = RTE_FC_TX_PAUSE; + if (fc == AQ_NIC_FC_OFF) + fc_conf->mode = RTE_ETH_FC_NONE; + else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX)) + fc_conf->mode = RTE_ETH_FC_FULL; + else if (fc & AQ_NIC_FC_RX) + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; + else if (fc & AQ_NIC_FC_TX) + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; return 0; } @@ -1192,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (hw->aq_fw_ops->set_flow_control == NULL) return -ENOTSUP; - if (fc_conf->mode == RTE_FC_NONE) + if (fc_conf->mode == RTE_ETH_FC_NONE) hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF; - else if (fc_conf->mode == RTE_FC_RX_PAUSE) + else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX; - else if (fc_conf->mode == RTE_FC_TX_PAUSE) + else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX; - else if (fc_conf->mode == RTE_FC_FULL) + else if (fc_conf->mode == RTE_ETH_FC_FULL) hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX); if (old_flow_control != hw->aq_nic_cfg->flow_control) @@ -1235,10 +1596,10 @@ atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index, } static int -atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index __rte_unused, uint32_t pool __rte_unused) { - if (is_zero_ether_addr(mac_addr)) { + if (rte_is_zero_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); return -EINVAL; } @@ -1253,7 +1614,7 @@ atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } static int -atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) { atl_remove_mac_addr(dev, 0); atl_add_mac_addr(dev, addr, 0, 0); @@ -1264,16 +1625,16 @@ static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct rte_eth_dev_info dev_info; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + int ret; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - atl_dev_info_get(dev, &dev_info); + ret = atl_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - return 0; } @@ -1366,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask) PMD_INIT_FUNC_TRACE(); - ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK); + ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK); - cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK); + cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK); for (i = 0; i < dev->data->nb_rx_queues; i++) hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i); - if (mask & ETH_VLAN_EXTEND_MASK) + if (mask & RTE_ETH_VLAN_EXTEND_MASK) ret = -ENOTSUP; return ret; @@ -1389,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, PMD_INIT_FUNC_TRACE(); switch (vlan_type) { - case ETH_VLAN_TYPE_INNER: + case RTE_ETH_VLAN_TYPE_INNER: hw_atl_rpf_vlan_inner_etht_set(hw, tpid); break; - case ETH_VLAN_TYPE_OUTER: + case RTE_ETH_VLAN_TYPE_OUTER: hw_atl_rpf_vlan_outer_etht_set(hw, tpid); break; default: @@ -1420,7 +1781,7 @@ atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on) static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1532,17 +1893,23 @@ atl_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } -RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd); -RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map); -RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic"); +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} -RTE_INIT(atl_init_log) +bool +is_atlantic_supported(struct rte_eth_dev *dev) { - atl_logtype_init = rte_log_register("pmd.net.atlantic.init"); - if (atl_logtype_init >= 0) - rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE); - atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver"); - if (atl_logtype_driver >= 0) - rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE); + return is_device_supported(dev, &rte_atl_pmd); } +RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map); +RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic"); +RTE_LOG_REGISTER_SUFFIX(atl_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(atl_logtype_driver, driver, NOTICE);