static struct rte_dpaa_driver rte_dpaa_pmd;
-static void
+static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
static inline void
return 0;
}
-static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ if (dpaa_intf->fif->mac_type == fman_mac_1g) {
dev_info->speed_capa = ETH_LINK_SPEED_1G;
- else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
- else
+ } else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, dpaa_intf->fif->mac_type);
+ return -EINVAL;
+ }
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_tx_offloads_nodis;
dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+
+ return 0;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
return limit;
}
-static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_enable(dpaa_intf->fif);
+
+ return 0;
}
-static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_disable(dpaa_intf->fif);
+
+ return 0;
}
static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
static int
dpaa_dev_init(struct rte_eth_dev *eth_dev)
{
- int num_cores, num_rx_fqs, fqid;
+ int num_rx_fqs, fqid;
int loop, ret = 0;
int dev_id;
struct rte_dpaa_device *dpaa_device;
dpaa_intf->nb_rx_queues = num_rx_fqs;
/* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
- num_cores = rte_lcore_count();
dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
- num_cores, MAX_CACHELINE);
+ MAX_DPAA_CORES, MAX_CACHELINE);
if (!dpaa_intf->tx_queues) {
DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
ret = -ENOMEM;
goto free_rx;
}
- for (loop = 0; loop < num_cores; loop++) {
+ for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
fman_intf);
if (ret)
goto free_tx;
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
}
- dpaa_intf->nb_tx_queues = num_cores;
+ dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
dpaa_debug_queue_init(&dpaa_intf->debug_queues[
PMD_INIT_FUNC_TRACE();
+ if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
+ RTE_PKTMBUF_HEADROOM) {
+ DPAA_PMD_ERR(
+ "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
+ RTE_PKTMBUF_HEADROOM,
+ DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
+
+ return -1;
+ }
+
/* In case of secondary process, the device is already configured
* and no further action is required, except portal initialization
* and verifying secondary attachment to port name.
return 0;
}
- if (!is_global_init) {
+ if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
/* One time load of Qman/Bman drivers */
ret = qman_global_init();
if (ret) {
}
}
- eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
- if (eth_dev == NULL)
- return -ENOMEM;
+ /* In case of secondary process, the device is already configured
+ * and no further action is required, except portal initialization
+ * and verifying secondary attachment to port name.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+ if (!eth_dev)
+ return -ENOMEM;
+ } else {
+ eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
- eth_dev->data->dev_private = rte_zmalloc(
- "ethdev private structure",
- sizeof(struct dpaa_if),
- RTE_CACHE_LINE_SIZE);
- if (!eth_dev->data->dev_private) {
- DPAA_PMD_ERR("Cannot allocate memzone for port data");
- rte_eth_dev_release_port(eth_dev);
- return -ENOMEM;
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa_if),
+ RTE_CACHE_LINE_SIZE);
+ if (!eth_dev->data->dev_private) {
+ DPAA_PMD_ERR("Cannot allocate memzone for port data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
}
-
eth_dev->device = &dpaa_dev->device;
dpaa_dev->eth_dev = eth_dev;