+static void
+atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ atl_update_mac_addr(dev, index, NULL, false);
+}
+
+static int
+atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ atl_remove_mac_addr(dev, 0);
+ atl_add_mac_addr(dev, addr, 0, 0);
+ return 0;
+}
+
+static int
+atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ int ret;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ ret = atl_dev_info_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+ return -EINVAL;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return 0;
+}
+
+static int
+atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = 0;
+ int i = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] == vlan_id) {
+ if (!on) {
+ /* Disable VLAN filter. */
+ hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
+
+ /* Clear VLAN filter entry */
+ cfg->vlan_filter[i] = 0;
+ }
+ break;
+ }
+ }
+
+ /* VLAN_ID was not found. So, nothing to delete. */
+ if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
+ goto exit;
+
+ /* VLAN_ID already exist, or already removed above. Nothing to do. */
+ if (i != HW_ATL_B0_MAX_VLAN_IDS)
+ goto exit;
+
+ /* Try to found free VLAN filter to add new VLAN_ID */
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] == 0)
+ break;
+ }
+
+ if (i == HW_ATL_B0_MAX_VLAN_IDS) {
+ /* We have no free VLAN filter to add new VLAN_ID*/
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ cfg->vlan_filter[i] = vlan_id;
+ hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
+ hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
+ hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
+
+exit:
+ /* Enable VLAN promisc mode if vlan_filter empty */
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] != 0)
+ break;
+ }
+
+ hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
+
+ return err;
+}
+
+static int
+atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i])
+ hw_atl_rpf_vlan_flr_en_set(hw, en, i);
+ }
+ return 0;
+}
+
+static int
+atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+
+ cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ ret = -ENOTSUP;
+
+ return ret;
+}
+
+static int
+atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type");
+ err = -ENOTSUP;
+ }
+
+ return err;
+}
+
+static void
+atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_id > dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue id");
+ return;
+ }
+
+ hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
+}
+
+static int
+atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 i;
+
+ if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
+ return -EINVAL;
+
+ /* Update whole uc filters table */
+ for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
+ u8 *mac_addr = NULL;
+ u32 l = 0, h = 0;
+
+ if (i < nb_mc_addr) {
+ mac_addr = mc_addr_set[i].addr_bytes;
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ h = (mac_addr[0] << 8) | mac_addr[1];
+ }
+
+ hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
+ HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
+ HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
+ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ return 0;
+}
+
+static int
+atl_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i;
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
+ cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
+ dev->data->nb_rx_queues - 1);
+
+ hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
+ return 0;
+}
+
+static int
+atl_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i;
+ struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
+ reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
+ reta_conf->mask = ~0U;
+ return 0;
+}
+
+static int
+atl_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ static u8 def_rss_key[40] = {
+ 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+ 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+ 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+ 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+ 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+ };
+
+ cfg->is_rss = !!rss_conf->rss_hf;
+ if (rss_conf->rss_key) {
+ memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
+ } else {
+ memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
+ sizeof(def_rss_key));
+ cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
+ }
+
+ hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
+ return 0;
+}
+
+static int
+atl_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
+ if (rss_conf->rss_key) {
+ rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
+ memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
+ rss_conf->rss_key_len);
+ }
+
+ return 0;
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_atlantic_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_atl_pmd);
+}
+
+RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
+RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
+RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);