X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa%2Fdpaa_ethdev.c;h=ad28c110d70ac3edd09f976ec7fec0b4ea43d2b6;hb=9039c8125730;hp=b1fac8fa74093e77572be89cd397afad9152f727;hpb=4762b3d419c357bd00012835e6fd430cde799dca;p=dpdk.git diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index b1fac8fa74..ad28c110d7 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -123,7 +124,7 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; -static void +static int dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static inline void @@ -145,13 +146,13 @@ static int dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct dpaa_if *dpaa_intf = dev->data->dev_private; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; PMD_INIT_FUNC_TRACE(); - if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; /* * Refuse mtu that requires the support of scattered packets @@ -171,7 +172,7 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } - if (frame_size > ETHER_MAX_LEN) + if (frame_size > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads &= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -229,7 +230,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) fman_if_set_maxfrm(dpaa_intf->fif, max_len); dev->data->mtu = max_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; } if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { @@ -329,8 +330,8 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, return 0; } -static void dpaa_eth_dev_info(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info) +static int dpaa_eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) { struct dpaa_if *dpaa_intf = dev->data->dev_private; @@ -345,13 +346,15 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; - if (dpaa_intf->fif->mac_type == fman_mac_1g) + if (dpaa_intf->fif->mac_type == fman_mac_1g) { dev_info->speed_capa = ETH_LINK_SPEED_1G; - else if (dpaa_intf->fif->mac_type == fman_mac_10g) + } else if (dpaa_intf->fif->mac_type == fman_mac_10g) { dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); - else + } else { DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); + return -EINVAL; + } dev_info->rx_offload_capa = dev_rx_offloads_sup | dev_rx_offloads_nodis; @@ -359,6 +362,8 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_tx_offloads_nodis; dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; + + return 0; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -439,10 +444,9 @@ dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", - dpaa_xstats_strings[i].name); + strlcpy(xstats_names[i].name, + dpaa_xstats_strings[i].name, + sizeof(xstats_names[i].name)); return stat_cnt; } @@ -510,22 +514,26 @@ dpaa_xstats_get_names_by_id( return limit; } -static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) +static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_promiscuous_enable(dpaa_intf->fif); + + return 0; } -static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) +static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_promiscuous_disable(dpaa_intf->fif); + + return 0; } static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev) @@ -934,7 +942,7 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev, static int dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, + struct rte_ether_addr *addr, uint32_t index, __rte_unused uint32_t pool) { @@ -964,7 +972,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { int ret; struct dpaa_if *dpaa_intf = dev->data->dev_private; @@ -1214,7 +1222,7 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) static int dpaa_dev_init(struct rte_eth_dev *eth_dev) { - int num_cores, num_rx_fqs, fqid; + int num_rx_fqs, fqid; int loop, ret = 0; int dev_id; struct rte_dpaa_device *dpaa_device; @@ -1319,23 +1327,22 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->nb_rx_queues = num_rx_fqs; /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ - num_cores = rte_lcore_count(); dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * - num_cores, MAX_CACHELINE); + MAX_DPAA_CORES, MAX_CACHELINE); if (!dpaa_intf->tx_queues) { DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); ret = -ENOMEM; goto free_rx; } - for (loop = 0; loop < num_cores; loop++) { + for (loop = 0; loop < MAX_DPAA_CORES; loop++) { ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], fman_intf); if (ret) goto free_tx; dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; } - dpaa_intf->nb_tx_queues = num_cores; + dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER dpaa_debug_queue_init(&dpaa_intf->debug_queues[ @@ -1364,17 +1371,17 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", - ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); if (eth_dev->data->mac_addrs == NULL) { DPAA_PMD_ERR("Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); ret = -ENOMEM; goto free_tx; } /* copy the primary mac address */ - ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", dpaa_device->name, @@ -1396,7 +1403,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fman_if_stats_reset(fman_intf); /* Disable SG by default */ fman_if_set_sg(fman_intf, 0); - fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE); + fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); return 0; @@ -1470,6 +1477,16 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, PMD_INIT_FUNC_TRACE(); + if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); + + return -1; + } + /* In case of secondary process, the device is already configured * and no further action is required, except portal initialization * and verifying secondary attachment to port name. @@ -1484,7 +1501,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, return 0; } - if (!is_global_init) { + if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { /* One time load of Qman/Bman drivers */ ret = qman_global_init(); if (ret) { @@ -1530,20 +1547,29 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, } } - eth_dev = rte_eth_dev_allocate(dpaa_dev->name); - if (eth_dev == NULL) - return -ENOMEM; + /* In case of secondary process, the device is already configured + * and no further action is required, except portal initialization + * and verifying secondary attachment to port name. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); + if (!eth_dev) + return -ENOMEM; + } else { + eth_dev = rte_eth_dev_allocate(dpaa_dev->name); + if (eth_dev == NULL) + return -ENOMEM; - eth_dev->data->dev_private = rte_zmalloc( - "ethdev private structure", - sizeof(struct dpaa_if), - RTE_CACHE_LINE_SIZE); - if (!eth_dev->data->dev_private) { - DPAA_PMD_ERR("Cannot allocate memzone for port data"); - rte_eth_dev_release_port(eth_dev); - return -ENOMEM; + eth_dev->data->dev_private = rte_zmalloc( + "ethdev private structure", + sizeof(struct dpaa_if), + RTE_CACHE_LINE_SIZE); + if (!eth_dev->data->dev_private) { + DPAA_PMD_ERR("Cannot allocate memzone for port data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } } - eth_dev->device = &dpaa_dev->device; dpaa_dev->eth_dev = eth_dev;