X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa%2Fdpaa_ethdev.c;h=f00279e004b185f7933ce7b3afdab9cb0de5e2ea;hb=f30e69b41f94;hp=8e7eb98247d43005efa465a8f6d93f91d2cd34ec;hpb=e4abd4ff183ca5cd141dae51e761739084ae6912;p=dpdk.git diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 8e7eb98247..f00279e004 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -47,6 +47,7 @@ #include #include #include +#include /* Supported Rx offloads */ static uint64_t dev_rx_offloads_sup = @@ -204,10 +205,12 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) uint64_t rx_offloads = eth_conf->rxmode.offloads; uint64_t tx_offloads = eth_conf->txmode.offloads; struct rte_device *rdev = dev->device; + struct rte_eth_link *link = &dev->data->dev_link; struct rte_dpaa_device *dpaa_dev; struct fman_if *fif = dev->process_private; struct __fman_if *__fif; struct rte_intr_handle *intr_handle; + int speed, duplex; int ret; PMD_INIT_FUNC_TRACE(); @@ -259,6 +262,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) dev->data->scattered_rx = 1; } + if (!(default_q || fmc_q)) { + if (dpaa_fm_config(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf)) { + dpaa_write_fm_config_to_file(); + DPAA_PMD_ERR("FM port configuration: Failed\n"); + return -1; + } + dpaa_write_fm_config_to_file(); + } + /* if the interrupts were configured on this devices*/ if (intr_handle && intr_handle->fd) { if (dev->data->dev_conf.intr_conf.lsc != 0) @@ -281,6 +294,60 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; } } + + /* Wait for link status to get updated */ + if (!link->link_status) + sleep(1); + + /* Configure link only if link is UP*/ + if (link->link_status) { + if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { + /* Start autoneg only if link is not in autoneg mode */ + if (!link->link_autoneg) + dpaa_restart_link_autoneg(__fif->node_name); + } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) { + switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) { + case ETH_LINK_SPEED_10M_HD: + speed = ETH_SPEED_NUM_10M; + duplex = ETH_LINK_HALF_DUPLEX; + break; + case ETH_LINK_SPEED_10M: + speed = ETH_SPEED_NUM_10M; + duplex = ETH_LINK_FULL_DUPLEX; + break; + case ETH_LINK_SPEED_100M_HD: + speed = ETH_SPEED_NUM_100M; + duplex = ETH_LINK_HALF_DUPLEX; + break; + case ETH_LINK_SPEED_100M: + speed = ETH_SPEED_NUM_100M; + duplex = ETH_LINK_FULL_DUPLEX; + break; + case ETH_LINK_SPEED_1G: + speed = ETH_SPEED_NUM_1G; + duplex = ETH_LINK_FULL_DUPLEX; + break; + case ETH_LINK_SPEED_2_5G: + speed = ETH_SPEED_NUM_2_5G; + duplex = ETH_LINK_FULL_DUPLEX; + break; + case ETH_LINK_SPEED_10G: + speed = ETH_SPEED_NUM_10G; + duplex = ETH_LINK_FULL_DUPLEX; + break; + default: + speed = ETH_SPEED_NUM_NONE; + duplex = ETH_LINK_FULL_DUPLEX; + break; + } + /* Set link speed */ + dpaa_update_link_speed(__fif->node_name, speed, duplex); + } else { + /* Manual autoneg - custom advertisement speed. */ + printf("Custom Advertisement speeds not supported\n"); + } + } + return 0; } @@ -325,7 +392,7 @@ static void dpaa_interrupt_handler(void *param) if (bytes_read < 0) DPAA_PMD_ERR("Error reading eventfd\n"); dpaa_eth_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } static int dpaa_eth_dev_start(struct rte_eth_dev *dev) @@ -334,6 +401,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (!(default_q || fmc_q)) + dpaa_write_fm_config_to_file(); + /* Change tx callback to the real one */ if (dpaa_intf->cgr_tx) dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; @@ -345,32 +415,57 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) return 0; } -static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) +static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) { struct fman_if *fif = dev->process_private; PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; if (!fif->is_shared_mac) fman_if_disable_rx(fif); dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + + return 0; } -static void dpaa_eth_dev_close(struct rte_eth_dev *dev) +static int dpaa_eth_dev_close(struct rte_eth_dev *dev) { struct fman_if *fif = dev->process_private; struct __fman_if *__fif; struct rte_device *rdev = dev->device; struct rte_dpaa_device *dpaa_dev; struct rte_intr_handle *intr_handle; + struct rte_eth_link *link = &dev->data->dev_link; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + int loop; + int ret; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpaa_intf) { + DPAA_PMD_WARN("Already closed or not started"); + return -1; + } + + /* DPAA FM deconfig */ + if (!(default_q || fmc_q)) { + if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) + DPAA_PMD_WARN("DPAA FM deconfig failed\n"); + } + dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); intr_handle = &dpaa_dev->intr_handle; __fif = container_of(fif, struct __fman_if, __if); - dpaa_eth_dev_stop(dev); + ret = dpaa_eth_dev_stop(dev); + + /* Reset link to autoneg */ + if (link->link_status && !link->link_autoneg) + dpaa_restart_link_autoneg(__fif->node_name); if (intr_handle && intr_handle->fd && dev->data->dev_conf.intr_conf.lsc != 0) { @@ -379,6 +474,40 @@ static void dpaa_eth_dev_close(struct rte_eth_dev *dev) dpaa_interrupt_handler, (void *)dev); } + + /* release configuration memory */ + if (dpaa_intf->fc_conf) + rte_free(dpaa_intf->fc_conf); + + /* Release RX congestion Groups */ + if (dpaa_intf->cgr_rx) { + for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) + qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); + + qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, + dpaa_intf->nb_rx_queues); + } + + rte_free(dpaa_intf->cgr_rx); + dpaa_intf->cgr_rx = NULL; + /* Release TX congestion Groups */ + if (dpaa_intf->cgr_tx) { + for (loop = 0; loop < MAX_DPAA_CORES; loop++) + qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); + + qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, + MAX_DPAA_CORES); + rte_free(dpaa_intf->cgr_tx); + dpaa_intf->cgr_tx = NULL; + } + + rte_free(dpaa_intf->rx_queues); + dpaa_intf->rx_queues = NULL; + + rte_free(dpaa_intf->tx_queues); + dpaa_intf->tx_queues = NULL; + + return ret; } static int @@ -432,12 +561,24 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; if (fif->mac_type == fman_mac_1g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G; + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD + | ETH_LINK_SPEED_10M + | ETH_LINK_SPEED_100M_HD + | ETH_LINK_SPEED_100M + | ETH_LINK_SPEED_1G; } else if (fif->mac_type == fman_mac_2_5g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD + | ETH_LINK_SPEED_10M + | ETH_LINK_SPEED_100M_HD + | ETH_LINK_SPEED_100M + | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; } else if (fif->mac_type == fman_mac_10g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD + | ETH_LINK_SPEED_10M + | ETH_LINK_SPEED_100M_HD + | ETH_LINK_SPEED_100M + | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_10G; } else { @@ -534,31 +675,35 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, struct rte_eth_link *link = &dev->data->dev_link; struct fman_if *fif = dev->process_private; struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); - int ret; + int ret, ioctl_version; PMD_INIT_FUNC_TRACE(); - if (fif->mac_type == fman_mac_1g) - link->link_speed = ETH_SPEED_NUM_1G; - else if (fif->mac_type == fman_mac_2_5g) - link->link_speed = ETH_SPEED_NUM_2_5G; - else if (fif->mac_type == fman_mac_10g) - link->link_speed = ETH_SPEED_NUM_10G; - else - DPAA_PMD_ERR("invalid link_speed: %s, %d", - dpaa_intf->name, fif->mac_type); + ioctl_version = dpaa_get_ioctl_version_number(); + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { - ret = dpaa_get_link_status(__fif->node_name); - if (ret < 0) + ret = dpaa_get_link_status(__fif->node_name, link); + if (ret) return ret; - link->link_status = ret; } else { link->link_status = dpaa_intf->valid; } - link->link_duplex = ETH_LINK_FULL_DUPLEX; - link->link_autoneg = ETH_LINK_AUTONEG; + if (ioctl_version < 2) { + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_autoneg = ETH_LINK_AUTONEG; + + if (fif->mac_type == fman_mac_1g) + link->link_speed = ETH_SPEED_NUM_1G; + else if (fif->mac_type == fman_mac_2_5g) + link->link_speed = ETH_SPEED_NUM_2_5G; + else if (fif->mac_type == fman_mac_10g) + link->link_speed = ETH_SPEED_NUM_10G; + else + DPAA_PMD_ERR("invalid link_speed: %s, %d", + dpaa_intf->name, fif->mac_type); + } DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, link->link_status ? "Up" : "Down"); @@ -977,7 +1122,8 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->fqid, ret); } } - + /* Enable main queue to receive error packets also by default */ + fman_if_set_err_fqid(fif, rxq->fqid); return 0; } @@ -1147,7 +1293,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev) if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN); else - dpaa_eth_dev_stop(dev); + return dpaa_eth_dev_stop(dev); return 0; } @@ -1290,6 +1436,41 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, return ret; } +static int +dpaa_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + PMD_INIT_FUNC_TRACE(); + + if (!(default_q || fmc_q)) { + if (dpaa_fm_config(dev, rss_conf->rss_hf)) { + DPAA_PMD_ERR("FM port configuration: Failed\n"); + return -1; + } + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; + } else { + DPAA_PMD_ERR("Function not supported\n"); + return -ENOTSUP; + } + return 0; +} + +static int +dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + /* dpaa does not support rss_key, so length should be 0*/ + rss_conf->rss_key_len = 0; + rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; + return 0; +} + static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -1373,7 +1554,6 @@ static struct eth_dev_ops dpaa_devops = { .tx_queue_setup = dpaa_eth_tx_queue_setup, .rx_queue_release = dpaa_eth_rx_queue_release, .tx_queue_release = dpaa_eth_tx_queue_release, - .rx_queue_count = dpaa_dev_rx_queue_count, .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get, .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get, .rxq_info_get = dpaa_rxq_info_get, @@ -1405,6 +1585,8 @@ static struct eth_dev_ops dpaa_devops = { .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, + .rss_hash_update = dpaa_dev_rss_hash_update, + .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get, }; static bool @@ -1424,7 +1606,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) } int -rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) +rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -1699,7 +1881,18 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; } else if (fmc_q) { - num_rx_fqs = 1; + num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids, + dev_vspids, + DPAA_MAX_NUM_PCD_QUEUES); + if (num_rx_fqs < 0) { + DPAA_PMD_ERR("%s FMC initializes failed!", + dpaa_intf->name); + goto free_rx; + } + if (!num_rx_fqs) { + DPAA_PMD_WARN("%s is not configured by FMC.", + dpaa_intf->name); + } } else { /* FMCLESS mode, load balance to multiple cores.*/ num_rx_fqs = rte_lcore_count(); @@ -1835,11 +2028,19 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER - dpaa_debug_queue_init(&dpaa_intf->debug_queues[ - DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); + ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues + [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); + if (ret) { + DPAA_PMD_ERR("DPAA RX ERROR queue init failed!"); + goto free_tx; + } dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; - dpaa_debug_queue_init(&dpaa_intf->debug_queues[ - DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); + ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues + [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); + if (ret) { + DPAA_PMD_ERR("DPAA TX ERROR queue init failed!"); + goto free_tx; + } dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; #endif @@ -1856,6 +2057,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* Populate ethdev structure */ eth_dev->dev_ops = &dpaa_devops; + eth_dev->rx_queue_count = dpaa_dev_rx_queue_count; eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; @@ -1883,8 +2085,10 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fman_intf->mac_addr.addr_bytes[5]); if (!fman_intf->is_shared_mac) { + /* Configure error packet handling */ + fman_if_receive_rx_errors(fman_intf, + FM_FD_RX_STATUS_ERR_MASK); /* Disable RX mode */ - fman_if_discard_rx_errors(fman_intf); fman_if_disable_rx(fman_intf); /* Disable promiscuous mode */ fman_if_promiscuous_disable(fman_intf); @@ -1914,70 +2118,6 @@ free_rx: return ret; } -static int -dpaa_dev_uninit(struct rte_eth_dev *dev) -{ - struct dpaa_if *dpaa_intf = dev->data->dev_private; - int loop; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - if (!dpaa_intf) { - DPAA_PMD_WARN("Already closed or not started"); - return -1; - } - - /* DPAA FM deconfig */ - if (!(default_q || fmc_q)) { - if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) - DPAA_PMD_WARN("DPAA FM deconfig failed\n"); - } - - dpaa_eth_dev_close(dev); - - /* release configuration memory */ - if (dpaa_intf->fc_conf) - rte_free(dpaa_intf->fc_conf); - - /* Release RX congestion Groups */ - if (dpaa_intf->cgr_rx) { - for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) - qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, - dpaa_intf->nb_rx_queues); - } - - rte_free(dpaa_intf->cgr_rx); - dpaa_intf->cgr_rx = NULL; - - /* Release TX congestion Groups */ - if (dpaa_intf->cgr_tx) { - for (loop = 0; loop < MAX_DPAA_CORES; loop++) - qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, - MAX_DPAA_CORES); - rte_free(dpaa_intf->cgr_tx); - dpaa_intf->cgr_tx = NULL; - } - - rte_free(dpaa_intf->rx_queues); - dpaa_intf->rx_queues = NULL; - - rte_free(dpaa_intf->tx_queues); - dpaa_intf->tx_queues = NULL; - - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - - return 0; -} - static int rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, struct rte_dpaa_device *dpaa_dev) @@ -2079,6 +2219,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC) eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); if (diag == 0) { @@ -2094,15 +2236,15 @@ static int rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) { struct rte_eth_dev *eth_dev; + int ret; PMD_INIT_FUNC_TRACE(); eth_dev = dpaa_dev->eth_dev; - dpaa_dev_uninit(eth_dev); - - rte_eth_dev_release_port(eth_dev); + dpaa_eth_dev_close(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); - return 0; + return ret; } static void __attribute__((destructor(102))) dpaa_finish(void)