X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa%2Fdpaa_ethdev.c;h=b9bf9d2966e5a2cfdeb701c491af1edb5a7a9fcb;hb=9fda31c3229ca6e036cae80392578ed6e5a51119;hp=c15e2b5462f87a3de97e1502863138fdfa11ef6d;hpb=2cf9264f2da3e6d90f987ef4e6416bd78446f51e;p=dpdk.git diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index c15e2b5462..b9bf9d2966 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -46,36 +47,40 @@ #include #include #include +#include + +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ /* Supported Rx offloads */ static uint64_t dev_rx_offloads_sup = - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_SCATTER; + RTE_ETH_RX_OFFLOAD_SCATTER; /* Rx offloads which cannot be disabled */ static uint64_t dev_rx_offloads_nodis = - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_RSS_HASH; + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_RSS_HASH; /* Supported Tx offloads */ static uint64_t dev_tx_offloads_sup = - DEV_TX_OFFLOAD_MT_LOCKFREE | - DEV_TX_OFFLOAD_MBUF_FAST_FREE; + RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; /* Tx offloads which cannot be disabled */ static uint64_t dev_tx_offloads_nodis = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_MULTI_SEGS; + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; +static int fmc_q = 1; /* Indicates the use of static fmc for distribution */ static int default_q; /* use default queue - FMC is not executed*/ /* At present we only allow up to 4 push mode queues as default - as each of * this queue need dedicated portal and we are short of portals. @@ -161,8 +166,6 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) PMD_INIT_FUNC_TRACE(); - if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) - return -EINVAL; /* * Refuse mtu that requires the support of scattered packets * when this feature has not been enabled before. @@ -181,15 +184,6 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } - if (frame_size > RTE_ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; - else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; - - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - fman_if_set_maxfrm(dev->process_private, frame_size); return 0; @@ -202,16 +196,19 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) uint64_t rx_offloads = eth_conf->rxmode.offloads; uint64_t tx_offloads = eth_conf->txmode.offloads; struct rte_device *rdev = dev->device; + struct rte_eth_link *link = &dev->data->dev_link; struct rte_dpaa_device *dpaa_dev; struct fman_if *fif = dev->process_private; struct __fman_if *__fif; struct rte_intr_handle *intr_handle; + uint32_t max_rx_pktlen; + int speed, duplex; int ret; PMD_INIT_FUNC_TRACE(); dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); - intr_handle = &dpaa_dev->intr_handle; + intr_handle = dpaa_dev->intr_handle; __fif = container_of(fif, struct __fman_if, __if); /* Rx offloads which are enabled by default */ @@ -230,41 +227,42 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) tx_offloads, dev_tx_offloads_nodis); } - if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - uint32_t max_len; - - DPAA_PMD_DEBUG("enabling jumbo"); - - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - DPAA_MAX_RX_PKT_LEN) - max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; - else { - DPAA_PMD_INFO("enabling jumbo override conf max len=%d " - "supported is %d", - dev->data->dev_conf.rxmode.max_rx_pkt_len, - DPAA_MAX_RX_PKT_LEN); - max_len = DPAA_MAX_RX_PKT_LEN; - } - - fman_if_set_maxfrm(dev->process_private, max_len); - dev->data->mtu = max_len - - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; + max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; + if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) { + DPAA_PMD_INFO("enabling jumbo override conf max len=%d " + "supported is %d", + max_rx_pktlen, DPAA_MAX_RX_PKT_LEN); + max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; } - if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { DPAA_PMD_DEBUG("enabling scatter mode"); fman_if_set_sg(dev->process_private, 1); dev->data->scattered_rx = 1; } + if (!(default_q || fmc_q)) { + if (dpaa_fm_config(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf)) { + dpaa_write_fm_config_to_file(); + DPAA_PMD_ERR("FM port configuration: Failed\n"); + return -1; + } + dpaa_write_fm_config_to_file(); + } + /* if the interrupts were configured on this devices*/ - if (intr_handle && intr_handle->fd) { + if (intr_handle && rte_intr_fd_get(intr_handle)) { if (dev->data->dev_conf.intr_conf.lsc != 0) rte_intr_callback_register(intr_handle, dpaa_interrupt_handler, (void *)dev); - ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd); + ret = dpaa_intr_enable(__fif->node_name, + rte_intr_fd_get(intr_handle)); if (ret) { if (dev->data->dev_conf.intr_conf.lsc != 0) { rte_intr_callback_unregister(intr_handle, @@ -279,6 +277,60 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; } } + + /* Wait for link status to get updated */ + if (!link->link_status) + sleep(1); + + /* Configure link only if link is UP*/ + if (link->link_status) { + if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { + /* Start autoneg only if link is not in autoneg mode */ + if (!link->link_autoneg) + dpaa_restart_link_autoneg(__fif->node_name); + } else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { + switch (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { + case RTE_ETH_LINK_SPEED_10M_HD: + speed = RTE_ETH_SPEED_NUM_10M; + duplex = RTE_ETH_LINK_HALF_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_10M: + speed = RTE_ETH_SPEED_NUM_10M; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_100M_HD: + speed = RTE_ETH_SPEED_NUM_100M; + duplex = RTE_ETH_LINK_HALF_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_100M: + speed = RTE_ETH_SPEED_NUM_100M; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_1G: + speed = RTE_ETH_SPEED_NUM_1G; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_2_5G: + speed = RTE_ETH_SPEED_NUM_2_5G; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + case RTE_ETH_LINK_SPEED_10G: + speed = RTE_ETH_SPEED_NUM_10G; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + default: + speed = RTE_ETH_SPEED_NUM_NONE; + duplex = RTE_ETH_LINK_FULL_DUPLEX; + break; + } + /* Set link speed */ + dpaa_update_link_speed(__fif->node_name, speed, duplex); + } else { + /* Manual autoneg - custom advertisement speed. */ + printf("Custom Advertisement speeds not supported\n"); + } + } + return 0; } @@ -317,13 +369,14 @@ static void dpaa_interrupt_handler(void *param) int bytes_read; dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); - intr_handle = &dpaa_dev->intr_handle; + intr_handle = dpaa_dev->intr_handle; - bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t)); + bytes_read = read(rte_intr_fd_get(intr_handle), &buf, + sizeof(uint64_t)); if (bytes_read < 0) DPAA_PMD_ERR("Error reading eventfd\n"); dpaa_eth_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } static int dpaa_eth_dev_start(struct rte_eth_dev *dev) @@ -332,6 +385,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (!(default_q || fmc_q)) + dpaa_write_fm_config_to_file(); + /* Change tx callback to the real one */ if (dpaa_intf->cgr_tx) dev->tx_pkt_burst = dpaa_eth_queue_tx_slow; @@ -343,39 +399,93 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) return 0; } -static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) +static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) { struct fman_if *fif = dev->process_private; PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; - fman_if_disable_rx(fif); + if (!fif->is_shared_mac) + fman_if_disable_rx(fif); dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + + return 0; } -static void dpaa_eth_dev_close(struct rte_eth_dev *dev) +static int dpaa_eth_dev_close(struct rte_eth_dev *dev) { struct fman_if *fif = dev->process_private; struct __fman_if *__fif; struct rte_device *rdev = dev->device; struct rte_dpaa_device *dpaa_dev; struct rte_intr_handle *intr_handle; + struct rte_eth_link *link = &dev->data->dev_link; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + int loop; + int ret; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpaa_intf) { + DPAA_PMD_WARN("Already closed or not started"); + return -1; + } + + /* DPAA FM deconfig */ + if (!(default_q || fmc_q)) { + if (dpaa_fm_deconfig(dpaa_intf, dev->process_private)) + DPAA_PMD_WARN("DPAA FM deconfig failed\n"); + } + dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); - intr_handle = &dpaa_dev->intr_handle; + intr_handle = dpaa_dev->intr_handle; __fif = container_of(fif, struct __fman_if, __if); - dpaa_eth_dev_stop(dev); + ret = dpaa_eth_dev_stop(dev); + + /* Reset link to autoneg */ + if (link->link_status && !link->link_autoneg) + dpaa_restart_link_autoneg(__fif->node_name); - if (intr_handle && intr_handle->fd && + if (intr_handle && rte_intr_fd_get(intr_handle) && dev->data->dev_conf.intr_conf.lsc != 0) { dpaa_intr_disable(__fif->node_name); rte_intr_callback_unregister(intr_handle, dpaa_interrupt_handler, (void *)dev); } + + /* release configuration memory */ + if (dpaa_intf->fc_conf) + rte_free(dpaa_intf->fc_conf); + + /* Release RX congestion Groups */ + if (dpaa_intf->cgr_rx) { + for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) + qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); + } + + rte_free(dpaa_intf->cgr_rx); + dpaa_intf->cgr_rx = NULL; + /* Release TX congestion Groups */ + if (dpaa_intf->cgr_tx) { + for (loop = 0; loop < MAX_DPAA_CORES; loop++) + qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); + rte_free(dpaa_intf->cgr_tx); + dpaa_intf->cgr_tx = NULL; + } + + rte_free(dpaa_intf->rx_queues); + dpaa_intf->rx_queues = NULL; + + rte_free(dpaa_intf->tx_queues); + dpaa_intf->tx_queues = NULL; + + return ret; } static int @@ -403,9 +513,11 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", svr_ver, fman_ip_rev); - ret += 1; /* add the size of '\0' */ + if (ret < 0) + return -EINVAL; - if (fw_size < (uint32_t)ret) + ret += 1; /* add the size of '\0' */ + if (fw_size < (size_t)ret) return ret; else return 0; @@ -425,18 +537,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; dev_info->max_hash_mac_addrs = 0; dev_info->max_vfs = 0; - dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; if (fif->mac_type == fman_mac_1g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD + | RTE_ETH_LINK_SPEED_10M + | RTE_ETH_LINK_SPEED_100M_HD + | RTE_ETH_LINK_SPEED_100M + | RTE_ETH_LINK_SPEED_1G; } else if (fif->mac_type == fman_mac_2_5g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G - | ETH_LINK_SPEED_2_5G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD + | RTE_ETH_LINK_SPEED_10M + | RTE_ETH_LINK_SPEED_100M_HD + | RTE_ETH_LINK_SPEED_100M + | RTE_ETH_LINK_SPEED_1G + | RTE_ETH_LINK_SPEED_2_5G; } else if (fif->mac_type == fman_mac_10g) { - dev_info->speed_capa = ETH_LINK_SPEED_1G - | ETH_LINK_SPEED_2_5G - | ETH_LINK_SPEED_10G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD + | RTE_ETH_LINK_SPEED_10M + | RTE_ETH_LINK_SPEED_100M_HD + | RTE_ETH_LINK_SPEED_100M + | RTE_ETH_LINK_SPEED_1G + | RTE_ETH_LINK_SPEED_2_5G + | RTE_ETH_LINK_SPEED_10G; } else { DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, fif->mac_type); @@ -469,13 +593,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev, uint64_t flags; const char *output; } rx_offload_map[] = { - {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"}, - {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}, - {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, - {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, - {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, - {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, - {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"} + {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}, + {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, + {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, + {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, + {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"} }; /* Update Rx offload info */ @@ -502,14 +625,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, uint64_t flags; const char *output; } tx_offload_map[] = { - {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, - {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, - {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, - {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, - {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, - {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, - {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, - {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} + {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, + {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, + {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, + {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, + {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, + {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, + {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} }; /* Update Tx offload info */ @@ -525,37 +648,48 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, } static int dpaa_eth_link_update(struct rte_eth_dev *dev, - int wait_to_complete __rte_unused) + int wait_to_complete) { struct dpaa_if *dpaa_intf = dev->data->dev_private; struct rte_eth_link *link = &dev->data->dev_link; struct fman_if *fif = dev->process_private; struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); - int ret; + int ret, ioctl_version; + uint8_t count; PMD_INIT_FUNC_TRACE(); - if (fif->mac_type == fman_mac_1g) - link->link_speed = ETH_SPEED_NUM_1G; - else if (fif->mac_type == fman_mac_2_5g) - link->link_speed = ETH_SPEED_NUM_2_5G; - else if (fif->mac_type == fman_mac_10g) - link->link_speed = ETH_SPEED_NUM_10G; - else - DPAA_PMD_ERR("invalid link_speed: %s, %d", - dpaa_intf->name, fif->mac_type); + ioctl_version = dpaa_get_ioctl_version_number(); if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { - ret = dpaa_get_link_status(__fif->node_name); - if (ret < 0) - return ret; - link->link_status = ret; + for (count = 0; count <= MAX_REPEAT_TIME; count++) { + ret = dpaa_get_link_status(__fif->node_name, link); + if (ret) + return ret; + if (link->link_status == RTE_ETH_LINK_DOWN && + wait_to_complete) + rte_delay_ms(CHECK_INTERVAL); + else + break; + } } else { link->link_status = dpaa_intf->valid; } - link->link_duplex = ETH_LINK_FULL_DUPLEX; - link->link_autoneg = ETH_LINK_AUTONEG; + if (ioctl_version < 2) { + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link->link_autoneg = RTE_ETH_LINK_AUTONEG; + + if (fif->mac_type == fman_mac_1g) + link->link_speed = RTE_ETH_SPEED_NUM_1G; + else if (fif->mac_type == fman_mac_2_5g) + link->link_speed = RTE_ETH_SPEED_NUM_2_5G; + else if (fif->mac_type == fman_mac_10g) + link->link_speed = RTE_ETH_SPEED_NUM_10G; + else + DPAA_PMD_ERR("invalid link_speed: %s, %d", + dpaa_intf->name, fif->mac_type); + } DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, link->link_status ? "Up" : "Down"); @@ -661,8 +795,8 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, static int dpaa_xstats_get_names_by_id( struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); @@ -719,6 +853,55 @@ static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) return 0; } +static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct fman_if_ic_params icp; + uint32_t fd_offset; + uint32_t bp_size; + + memset(&icp, 0, sizeof(icp)); + /* set ICEOF for to the default value , which is 0*/ + icp.iciof = DEFAULT_ICIOF; + icp.iceof = DEFAULT_RX_ICEOF; + icp.icsz = DEFAULT_ICSZ; + fman_if_set_ic_params(dev->process_private, &icp); + + fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; + fman_if_set_fdoff(dev->process_private, fd_offset); + + /* Buffer pool size should be equal to Dataroom Size*/ + bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp); + + fman_if_set_bp(dev->process_private, + dpaa_intf->bp_info->mp->size, + dpaa_intf->bp_info->bpid, bp_size); +} + +static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev, + int8_t vsp_id, uint32_t bpid) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct fman_if *fif = dev->process_private; + + if (fif->num_profiles) { + if (vsp_id < 0) + vsp_id = fif->base_profile_id; + } else { + if (vsp_id < 0) + vsp_id = 0; + } + + if (dpaa_intf->vsp_bpid[vsp_id] && + bpid != dpaa_intf->vsp_bpid[vsp_id]) { + DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP"); + + return -1; + } + + return 0; +} + static int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -733,6 +916,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, u32 flags = 0; int ret; u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + uint32_t max_rx_pktlen; PMD_INIT_FUNC_TRACE(); @@ -754,17 +938,37 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", queue_idx, rxq->fqid); + if (!fif->num_profiles) { + if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && + dpaa_intf->bp_info->mp != mp) { + DPAA_PMD_WARN("Multiple pools on same interface not" + " supported"); + return -EINVAL; + } + } else { + if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id, + DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) { + return -EINVAL; + } + } + + if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && + dpaa_intf->bp_info->mp != mp) { + DPAA_PMD_WARN("Multiple pools on same interface not supported"); + return -EINVAL; + } + + max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; /* Max packet can fit in single buffer */ - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { + if (max_rx_pktlen <= buffsz) { ; } else if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SCATTER) { - if (dev->data->dev_conf.rxmode.max_rx_pkt_len > - buffsz * DPAA_SGT_MAX_ENTRIES) { - DPAA_PMD_ERR("max RxPkt size %d too big to fit " + RTE_ETH_RX_OFFLOAD_SCATTER) { + if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) { + DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit " "MaxSGlist %d", - dev->data->dev_conf.rxmode.max_rx_pkt_len, - buffsz * DPAA_SGT_MAX_ENTRIES); + max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES); rte_errno = EOVERFLOW; return -rte_errno; } @@ -772,43 +976,45 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" " larger than a single mbuf (%u) and scattered" " mode has not been requested", - dev->data->dev_conf.rxmode.max_rx_pkt_len, - buffsz - RTE_PKTMBUF_HEADROOM); + max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM); } - if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { - struct fman_if_ic_params icp; - uint32_t fd_offset; - uint32_t bp_size; + dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); - if (!mp->pool_data) { - DPAA_PMD_ERR("Not an offloaded buffer pool!"); - return -1; + /* For shared interface, it's done in kernel, skip.*/ + if (!fif->is_shared_mac) + dpaa_fman_if_pool_setup(dev); + + if (fif->num_profiles) { + int8_t vsp_id = rxq->vsp_id; + + if (vsp_id >= 0) { + ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, + DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, + fif); + if (ret) { + DPAA_PMD_ERR("dpaa_port_vsp_update failed"); + return ret; + } + } else { + DPAA_PMD_INFO("Base profile is associated to" + " RXQ fqid:%d\r\n", rxq->fqid); + if (fif->is_shared_mac) { + DPAA_PMD_ERR("Fatal: Base profile is associated" + " to shared interface on DPDK."); + return -EINVAL; + } + dpaa_intf->vsp_bpid[fif->base_profile_id] = + DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; } - dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); - - memset(&icp, 0, sizeof(icp)); - /* set ICEOF for to the default value , which is 0*/ - icp.iciof = DEFAULT_ICIOF; - icp.iceof = DEFAULT_RX_ICEOF; - icp.icsz = DEFAULT_ICSZ; - fman_if_set_ic_params(fif, &icp); - - fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; - fman_if_set_fdoff(fif, fd_offset); - - /* Buffer pool size should be equal to Dataroom Size*/ - bp_size = rte_pktmbuf_data_room_size(mp); - fman_if_set_bp(fif, mp->size, - dpaa_intf->bp_info->bpid, bp_size); - dpaa_intf->valid = 1; - DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", - dpaa_intf->name, fd_offset, - fman_if_get_fdoff(fif)); + } else { + dpaa_intf->vsp_bpid[0] = + DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid; } + + dpaa_intf->valid = 1; DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, - fman_if_get_sg_enable(fif), - dev->data->dev_conf.rxmode.max_rx_pkt_len); + fman_if_get_sg_enable(fif), max_rx_pktlen); /* checking if push mode only, no error check for now */ if (!rxq->is_static && dpaa_push_mode_max_queue > dpaa_push_queue_idx) { @@ -868,26 +1074,38 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->qp = qp; /* Set up the device interrupt handler */ - if (!dev->intr_handle) { + if (dev->intr_handle == NULL) { struct rte_dpaa_device *dpaa_dev; struct rte_device *rdev = dev->device; dpaa_dev = container_of(rdev, struct rte_dpaa_device, device); - dev->intr_handle = &dpaa_dev->intr_handle; - dev->intr_handle->intr_vec = rte_zmalloc(NULL, - dpaa_push_mode_max_queue, 0); - if (!dev->intr_handle->intr_vec) { + dev->intr_handle = dpaa_dev->intr_handle; + if (rte_intr_vec_list_alloc(dev->intr_handle, + NULL, dpaa_push_mode_max_queue)) { DPAA_PMD_ERR("intr_vec alloc failed"); return -ENOMEM; } - dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; - dev->intr_handle->max_intr = dpaa_push_mode_max_queue; + if (rte_intr_nb_efd_set(dev->intr_handle, + dpaa_push_mode_max_queue)) + return -rte_errno; + + if (rte_intr_max_intr_set(dev->intr_handle, + dpaa_push_mode_max_queue)) + return -rte_errno; } - dev->intr_handle->type = RTE_INTR_HANDLE_EXT; - dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; - dev->intr_handle->efds[queue_idx] = q_fd; + if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT)) + return -rte_errno; + + if (rte_intr_vec_list_index_set(dev->intr_handle, + queue_idx, queue_idx + 1)) + return -rte_errno; + + if (rte_intr_efds_index_set(dev->intr_handle, queue_idx, + q_fd)) + return -rte_errno; + rxq->q_fd = q_fd; } rxq->bp_array = rte_dpaa_bpid_info; @@ -907,7 +1125,8 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->fqid, ret); } } - + /* Enable main queue to receive error packets also by default */ + fman_if_set_err_fqid(fif, rxq->fqid); return 0; } @@ -1005,12 +1224,6 @@ dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, return 0; } -static -void dpaa_eth_rx_queue_release(void *rxq __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); -} - static int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc __rte_unused, @@ -1044,23 +1257,17 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } -static void dpaa_eth_tx_queue_release(void *txq __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); -} - static uint32_t -dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +dpaa_dev_rx_queue_count(void *rx_queue) { - struct dpaa_if *dpaa_intf = dev->data->dev_private; - struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; + struct qman_fq *rxq = rx_queue; u32 frm_cnt = 0; PMD_INIT_FUNC_TRACE(); if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { - DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", - rx_queue_id, frm_cnt); + DPAA_PMD_DEBUG("RX frame count for q(%p) is %u", + rx_queue, frm_cnt); } return frm_cnt; } @@ -1075,9 +1282,9 @@ static int dpaa_link_down(struct rte_eth_dev *dev) __fif = container_of(fif, struct __fman_if, __if); if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN); + dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN); else - dpaa_eth_dev_stop(dev); + return dpaa_eth_dev_stop(dev); return 0; } @@ -1091,7 +1298,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev) __fif = container_of(fif, struct __fman_if, __if); if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - dpaa_update_link_status(__fif->node_name, ETH_LINK_UP); + dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP); else dpaa_eth_dev_start(dev); return 0; @@ -1121,10 +1328,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev, return -EINVAL; } - if (fc_conf->mode == RTE_FC_NONE) { + if (fc_conf->mode == RTE_ETH_FC_NONE) { return 0; - } else if (fc_conf->mode == RTE_FC_TX_PAUSE || - fc_conf->mode == RTE_FC_FULL) { + } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE || + fc_conf->mode == RTE_ETH_FC_FULL) { fman_if_set_fc_threshold(dev->process_private, fc_conf->high_water, fc_conf->low_water, @@ -1168,11 +1375,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev, } ret = fman_if_get_fc_threshold(dev->process_private); if (ret) { - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; fc_conf->pause_time = fman_if_get_fc_quanta(dev->process_private); } else { - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; } return 0; @@ -1220,6 +1427,41 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, return ret; } +static int +dpaa_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + PMD_INIT_FUNC_TRACE(); + + if (!(default_q || fmc_q)) { + if (dpaa_fm_config(dev, rss_conf->rss_hf)) { + DPAA_PMD_ERR("FM port configuration: Failed\n"); + return -1; + } + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; + } else { + DPAA_PMD_ERR("Function not supported\n"); + return -ENOTSUP; + } + return 0; +} + +static int +dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + /* dpaa does not support rss_key, so length should be 0*/ + rss_conf->rss_key_len = 0; + rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; + return 0; +} + static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -1260,12 +1502,19 @@ dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, { struct dpaa_if *dpaa_intf = dev->data->dev_private; struct qman_fq *rxq; + int ret; rxq = dev->data->rx_queues[queue_id]; qinfo->mp = dpaa_intf->bp_info->mp; qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_desc; + + /* Report the HW Rx buffer length to user */ + ret = fman_if_get_maxfrm(dev->process_private); + if (ret > 0) + qinfo->rx_buf_size = ret; + qinfo->conf.rx_free_thresh = 1; qinfo->conf.rx_drop_en = 1; qinfo->conf.rx_deferred_start = 0; @@ -1301,9 +1550,6 @@ static struct eth_dev_ops dpaa_devops = { .rx_queue_setup = dpaa_eth_rx_queue_setup, .tx_queue_setup = dpaa_eth_tx_queue_setup, - .rx_queue_release = dpaa_eth_rx_queue_release, - .tx_queue_release = dpaa_eth_tx_queue_release, - .rx_queue_count = dpaa_dev_rx_queue_count, .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get, .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get, .rxq_info_get = dpaa_rxq_info_get, @@ -1335,6 +1581,8 @@ static struct eth_dev_ops dpaa_devops = { .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, + .rss_hash_update = dpaa_dev_rss_hash_update, + .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get, }; static bool @@ -1354,7 +1602,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) } int -rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) +rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -1392,10 +1640,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf, fc_conf = dpaa_intf->fc_conf; ret = fman_if_get_fc_threshold(fman_intf); if (ret) { - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf); } else { - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; } return 0; @@ -1418,16 +1666,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, } }; - if (fqid) { + if (fmc_q || default_q) { ret = qman_reserve_fqid(fqid); if (ret) { - DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", + DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d", fqid, ret); return -EINVAL; } - } else { - flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; } + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); ret = qman_create_fq(fqid, flags, fq); if (ret) { @@ -1602,7 +1849,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) struct fman_if_bpool *bp, *tmp_bp; uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; uint32_t cgrid_tx[MAX_DPAA_CORES]; - char eth_buf[RTE_ETHER_ADDR_FMT_SIZE]; + uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES]; + int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES]; + int8_t vsp_id = -1; PMD_INIT_FUNC_TRACE(); @@ -1619,30 +1868,49 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->ifid = dev_id; dpaa_intf->cfg = cfg; + memset((char *)dev_rx_fqids, 0, + sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES); + + memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES); + /* Initialize Rx FQ's */ if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; + } else if (fmc_q) { + num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids, + dev_vspids, + DPAA_MAX_NUM_PCD_QUEUES); + if (num_rx_fqs < 0) { + DPAA_PMD_ERR("%s FMC initializes failed!", + dpaa_intf->name); + goto free_rx; + } + if (!num_rx_fqs) { + DPAA_PMD_WARN("%s is not configured by FMC.", + dpaa_intf->name); + } } else { - if (getenv("DPAA_NUM_RX_QUEUES")) - num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); - else - num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; + /* FMCLESS mode, load balance to multiple cores.*/ + num_rx_fqs = rte_lcore_count(); } - /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { + if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } - dpaa_intf->rx_queues = rte_zmalloc(NULL, - sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); - if (!dpaa_intf->rx_queues) { - DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); - return -ENOMEM; + if (num_rx_fqs > 0) { + dpaa_intf->rx_queues = rte_zmalloc(NULL, + sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); + if (!dpaa_intf->rx_queues) { + DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); + return -ENOMEM; + } + } else { + dpaa_intf->rx_queues = NULL; } memset(cgrid, 0, sizeof(cgrid)); @@ -1661,7 +1929,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) } /* If congestion control is enabled globally*/ - if (td_threshold) { + if (num_rx_fqs > 0 && td_threshold) { dpaa_intf->cgr_rx = rte_zmalloc(NULL, sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); if (!dpaa_intf->cgr_rx) { @@ -1680,12 +1948,22 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->cgr_rx = NULL; } + if (!fmc_q && !default_q) { + ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs, + num_rx_fqs, 0); + if (ret < 0) { + DPAA_PMD_ERR("Failed to alloc rx fqid's\n"); + goto free_rx; + } + } + for (loop = 0; loop < num_rx_fqs; loop++) { if (default_q) fqid = cfg->rx_def; else - fqid = DPAA_PCD_FQID_START + fman_intf->mac_idx * - DPAA_PCD_FQID_MULTIPLIER + loop; + fqid = dev_rx_fqids[loop]; + + vsp_id = dev_vspids[loop]; if (dpaa_intf->cgr_rx) dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; @@ -1695,6 +1973,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fqid); if (ret) goto free_rx; + dpaa_intf->rx_queues[loop].vsp_id = vsp_id; dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; } dpaa_intf->nb_rx_queues = num_rx_fqs; @@ -1745,11 +2024,19 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER - dpaa_debug_queue_init(&dpaa_intf->debug_queues[ - DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); + ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues + [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); + if (ret) { + DPAA_PMD_ERR("DPAA RX ERROR queue init failed!"); + goto free_tx; + } dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; - dpaa_debug_queue_init(&dpaa_intf->debug_queues[ - DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); + ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues + [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); + if (ret) { + DPAA_PMD_ERR("DPAA TX ERROR queue init failed!"); + goto free_tx; + } dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; #endif @@ -1766,6 +2053,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* Populate ethdev structure */ eth_dev->dev_ops = &dpaa_devops; + eth_dev->rx_queue_count = dpaa_dev_rx_queue_count; eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; @@ -1782,22 +2070,27 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* copy the primary mac address */ rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); - rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr); - - DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf); - - /* Disable RX mode */ - fman_if_discard_rx_errors(fman_intf); - fman_if_disable_rx(fman_intf); - /* Disable promiscuous mode */ - fman_if_promiscuous_disable(fman_intf); - /* Disable multicast */ - fman_if_reset_mcast_filter_table(fman_intf); - /* Reset interface statistics */ - fman_if_stats_reset(fman_intf); - /* Disable SG by default */ - fman_if_set_sg(fman_intf, 0); - fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); + + RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n", + dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr)); + + if (!fman_intf->is_shared_mac) { + /* Configure error packet handling */ + fman_if_receive_rx_errors(fman_intf, + FM_FD_RX_STATUS_ERR_MASK); + /* Disable RX mode */ + fman_if_disable_rx(fman_intf); + /* Disable promiscuous mode */ + fman_if_promiscuous_disable(fman_intf); + /* Disable multicast */ + fman_if_reset_mcast_filter_table(fman_intf); + /* Reset interface statistics */ + fman_if_stats_reset(fman_intf); + /* Disable SG by default */ + fman_if_set_sg(fman_intf, 0); + fman_if_set_maxfrm(fman_intf, + RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); + } return 0; @@ -1816,65 +2109,7 @@ free_rx: } static int -dpaa_dev_uninit(struct rte_eth_dev *dev) -{ - struct dpaa_if *dpaa_intf = dev->data->dev_private; - int loop; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - if (!dpaa_intf) { - DPAA_PMD_WARN("Already closed or not started"); - return -1; - } - - dpaa_eth_dev_close(dev); - - /* release configuration memory */ - if (dpaa_intf->fc_conf) - rte_free(dpaa_intf->fc_conf); - - /* Release RX congestion Groups */ - if (dpaa_intf->cgr_rx) { - for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) - qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, - dpaa_intf->nb_rx_queues); - } - - rte_free(dpaa_intf->cgr_rx); - dpaa_intf->cgr_rx = NULL; - - /* Release TX congestion Groups */ - if (dpaa_intf->cgr_tx) { - for (loop = 0; loop < MAX_DPAA_CORES; loop++) - qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, - MAX_DPAA_CORES); - rte_free(dpaa_intf->cgr_tx); - dpaa_intf->cgr_tx = NULL; - } - - rte_free(dpaa_intf->rx_queues); - dpaa_intf->rx_queues = NULL; - - rte_free(dpaa_intf->tx_queues); - dpaa_intf->tx_queues = NULL; - - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - - return 0; -} - -static int -rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, +rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, struct rte_dpaa_device *dpaa_dev) { int diag; @@ -1920,6 +2155,13 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, default_q = 1; } + if (!(default_q || fmc_q)) { + if (dpaa_fm_init()) { + DPAA_PMD_ERR("FM init failed\n"); + return -1; + } + } + /* disabling the default push mode for LS1043 */ if (dpaa_svr_family == SVR_LS1043A_FAMILY) dpaa_push_mode_max_queue = 0; @@ -1982,15 +2224,52 @@ static int rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) { struct rte_eth_dev *eth_dev; + int ret; PMD_INIT_FUNC_TRACE(); eth_dev = dpaa_dev->eth_dev; - dpaa_dev_uninit(eth_dev); + dpaa_eth_dev_close(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); - rte_eth_dev_release_port(eth_dev); + return ret; +} - return 0; +static void __attribute__((destructor(102))) dpaa_finish(void) +{ + /* For secondary, primary will do all the cleanup */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + if (!(default_q || fmc_q)) { + unsigned int i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (rte_eth_devices[i].dev_ops == &dpaa_devops) { + struct rte_eth_dev *dev = &rte_eth_devices[i]; + struct dpaa_if *dpaa_intf = + dev->data->dev_private; + struct fman_if *fif = + dev->process_private; + if (dpaa_intf->port_handle) + if (dpaa_fm_deconfig(dpaa_intf, fif)) + DPAA_PMD_WARN("DPAA FM " + "deconfig failed\n"); + if (fif->num_profiles) { + if (dpaa_port_vsp_cleanup(dpaa_intf, + fif)) + DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n"); + } + } + } + if (is_global_init) + if (dpaa_fm_term()) + DPAA_PMD_WARN("DPAA FM term failed\n"); + + is_global_init = 0; + + DPAA_PMD_INFO("DPAA fman cleaned up"); + } } static struct rte_dpaa_driver rte_dpaa_pmd = { @@ -2001,4 +2280,4 @@ static struct rte_dpaa_driver rte_dpaa_pmd = { }; RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); -RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE); +RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);