#include <sys/types.h>
#include <sys/syscall.h>
+#include <rte_string_fns.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_interrupts.h>
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup;
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+static int default_q; /* use default queue - FMC is not executed*/
/* At present we only allow up to 4 push mode queues as default - as each of
* this queue need dedicated portal and we are short of portals.
*/
static struct rte_dpaa_driver rte_dpaa_pmd;
-static void
+static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
static inline void
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
+ uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
- if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
+ return -EINVAL;
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size > buffsz) {
+ DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ /* check <seg size> * <max_seg> >= max_frame */
+ if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
+ (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
+ DPAA_PMD_ERR("Too big to fit for Max SG list %d",
+ buffsz * DPAA_SGT_MAX_ENTRIES);
return -EINVAL;
- if (frame_size > ETHER_MAX_LEN)
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
PMD_INIT_FUNC_TRACE();
/* Rx offloads validation */
- if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
- DPAA_PMD_ERR(
- "Rx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- rx_offloads,
- dev_rx_offloads_sup | dev_rx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_WARN(
"Rx offloads non configurable - requested 0x%" PRIx64
}
/* Tx offloads validation */
- if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
- DPAA_PMD_ERR(
- "Tx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- tx_offloads,
- dev_tx_offloads_sup | dev_tx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_tx_offloads_nodis & ~tx_offloads) {
DPAA_PMD_WARN(
"Tx offloads non configurable - requested 0x%" PRIx64
}
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ DPAA_PMD_DEBUG("enabling jumbo");
+
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- DPAA_MAX_RX_PKT_LEN) {
- fman_if_set_maxfrm(dpaa_intf->fif,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- return 0;
- } else {
- return -1;
+ DPAA_MAX_RX_PKT_LEN)
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ else {
+ DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+ "supported is %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ DPAA_MAX_RX_PKT_LEN);
+ max_len = DPAA_MAX_RX_PKT_LEN;
}
+
+ fman_if_set_maxfrm(dpaa_intf->fif, max_len);
+ dev->data->mtu = max_len
+ - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ DPAA_PMD_DEBUG("enabling scatter mode");
+ fman_if_set_sg(dpaa_intf->fif, 1);
+ dev->data->scattered_rx = 1;
}
+
return 0;
}
dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
- /*todo -= add more types */
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV4_EXT,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_SCTP
return 0;
}
-static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
- dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G);
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g) {
+ dev_info->speed_capa = ETH_LINK_SPEED_1G;
+ } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
+ } else {
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+ return -EINVAL;
+ }
+
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
dev_tx_offloads_nodis;
dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+
+ return 0;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
- "%s",
- dpaa_xstats_strings[i].name);
+ strlcpy(xstats_names[i].name,
+ dpaa_xstats_strings[i].name,
+ sizeof(xstats_names[i].name));
return stat_cnt;
}
return limit;
}
-static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_enable(dpaa_intf->fif);
+
+ return 0;
}
-static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_disable(dpaa_intf->fif);
+
+ return 0;
}
static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
struct qm_mcc_initfq opts = {0};
u32 flags = 0;
int ret;
+ u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, rxq->fqid);
+
+ /* Max packet can fit in single buffer */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+ ;
+ } else if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ buffsz * DPAA_SGT_MAX_ENTRIES) {
+ DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+ "MaxSGlist %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ } else {
+ DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz - RTE_PKTMBUF_HEADROOM);
+ }
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
struct fman_if_ic_params icp;
fman_if_set_bp(dpaa_intf->fif, mp->size,
dpaa_intf->bp_info->bpid, bp_size);
dpaa_intf->valid = 1;
- DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
- dpaa_intf->name, fd_offset,
- fman_if_get_fdoff(dpaa_intf->fif));
+ DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
}
+ DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+ fman_if_get_sg_enable(dpaa_intf->fif),
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
/* checking if push mode only, no error check for now */
if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
dpaa_push_queue_idx++;
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
ret = qman_init_fq(rxq, flags, &opts);
- if (ret)
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
- " ret: %d", rxq->fqid, ret);
- rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
- rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+ } else {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ }
+
rxq->is_static = true;
}
+ rxq->bp_array = rte_dpaa_bpid_info;
dev->data->rx_queues[queue_idx] = rxq;
/* configure the CGR size as per the desc size */
return 0;
}
-int __rte_experimental
+int
dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
u16 ch_id,
struct qm_mcc_initfq opts = {0};
if (dpaa_push_mode_max_queue)
- DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
+ "PUSH mode already enabled for first %d queues.\n"
"To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
dpaa_push_mode_max_queue);
ret = qman_init_fq(rxq, flags, &opts);
if (ret) {
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
- rxq->fqid, ret);
+ DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
return ret;
}
return ret;
}
-int __rte_experimental
+int
dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id)
{
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
return 0;
}
static int
dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
+ struct rte_ether_addr *addr,
uint32_t index,
__rte_unused uint32_t pool)
{
static int
dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
int ret;
struct dpaa_if *dpaa_intf = dev->data->dev_private;
return is_device_supported(dev, &rte_dpaa_pmd);
}
-int __rte_experimental
+int
rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
{
struct rte_eth_dev *dev;
{
struct qm_mcc_initfq opts = {0};
int ret;
- u32 flags = 0;
+ u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
struct qm_mcc_initcgr cgr_opts = {
.we_mask = QM_CGR_WE_CS_THRES |
QM_CGR_WE_CSTD_EN |
PMD_INIT_FUNC_TRACE();
- ret = qman_reserve_fqid(fqid);
- if (ret) {
- DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
- fqid, ret);
- return -EINVAL;
+ if (fqid) {
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ } else {
+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
}
-
- DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
- ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
+ ret = qman_create_fq(fqid, flags, fq);
if (ret) {
- DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
fqid, ret);
return ret;
}
&cgr_opts);
if (ret) {
DPAA_PMD_WARN(
- "rx taildrop init fail on rx fqid %d (ret=%d)",
- fqid, ret);
+ "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
+ fq->fqid, ret);
goto without_cgr;
}
opts.we_mask |= QM_INITFQ_WE_CGID;
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
without_cgr:
- ret = qman_init_fq(fq, flags, &opts);
+ ret = qman_init_fq(fq, 0, &opts);
if (ret)
- DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+ DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
return ret;
}
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
- DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+ DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret)
- DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+ DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
return ret;
}
static int
dpaa_dev_init(struct rte_eth_dev *eth_dev)
{
- int num_cores, num_rx_fqs, fqid;
+ int num_rx_fqs, fqid;
int loop, ret = 0;
int dev_id;
struct rte_dpaa_device *dpaa_device;
PMD_INIT_FUNC_TRACE();
+ dpaa_intf = eth_dev->data->dev_private;
/* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = &dpaa_devops;
+ /* Plugging of UCODE burst API not supported in Secondary */
+ eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+ eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ qman_set_fq_lookup_table(
+ dpaa_intf->rx_queues->qman_fq_lookup_table);
+#endif
return 0;
+ }
dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
dev_id = dpaa_device->id.dev_id;
dpaa_intf->cfg = cfg;
/* Initialize Rx FQ's */
- if (getenv("DPAA_NUM_RX_QUEUES"))
- num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
- else
+ if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
-
- /* if push mode queues to be enabled. Currenly we are allowing only
- * one queue per thread.
- */
- if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
- dpaa_push_mode_max_queue =
- atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
- if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
- dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ } else {
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
}
+
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
}
for (loop = 0; loop < num_rx_fqs; loop++) {
- fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
- DPAA_PCD_FQID_MULTIPLIER + loop;
+ if (default_q)
+ fqid = cfg->rx_def;
+ else
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
dpaa_intf->nb_rx_queues = num_rx_fqs;
/* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
- num_cores = rte_lcore_count();
dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
- num_cores, MAX_CACHELINE);
+ MAX_DPAA_CORES, MAX_CACHELINE);
if (!dpaa_intf->tx_queues) {
DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
ret = -ENOMEM;
goto free_rx;
}
- for (loop = 0; loop < num_cores; loop++) {
+ for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
fman_intf);
if (ret)
goto free_tx;
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
}
- dpaa_intf->nb_tx_queues = num_cores;
+ dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
dpaa_debug_queue_init(&dpaa_intf->debug_queues[
/* reset bpool list, initialize bpool dynamically */
list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
list_del(&bp->node);
- free(bp);
+ rte_free(bp);
}
/* Populate ethdev structure */
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
- ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+ RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
if (eth_dev->data->mac_addrs == NULL) {
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+ RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
ret = -ENOMEM;
goto free_tx;
}
/* copy the primary mac address */
- ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
dpaa_device->name,
fman_if_reset_mcast_filter_table(fman_intf);
/* Reset interface statistics */
fman_if_stats_reset(fman_intf);
+ /* Disable SG by default */
+ fman_if_set_sg(fman_intf, 0);
+ fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
return 0;
rte_free(dpaa_intf->tx_queues);
dpaa_intf->tx_queues = NULL;
- /* free memory for storing MAC addresses */
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
}
static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
int diag;
PMD_INIT_FUNC_TRACE();
+ if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
+ RTE_PKTMBUF_HEADROOM) {
+ DPAA_PMD_ERR(
+ "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
+ RTE_PKTMBUF_HEADROOM,
+ DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
+
+ return -1;
+ }
+
/* In case of secondary process, the device is already configured
* and no further action is required, except portal initialization
* and verifying secondary attachment to port name.
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
if (!eth_dev)
return -ENOMEM;
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->dev_ops = &dpaa_devops;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
- if (!is_global_init) {
+ if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
/* One time load of Qman/Bman drivers */
ret = qman_global_init();
if (ret) {
return ret;
}
+ if (access("/tmp/fmc.bin", F_OK) == -1) {
+ RTE_LOG(INFO, PMD,
+ "* FMC not configured.Enabling default mode\n");
+ default_q = 1;
+ }
+
+ /* disabling the default push mode for LS1043 */
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ dpaa_push_mode_max_queue = 0;
+
+ /* if push mode queues to be enabled. Currenly we are allowing
+ * only one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
is_global_init = 1;
}
}
}
- eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
- if (eth_dev == NULL)
- return -ENOMEM;
+ /* In case of secondary process, the device is already configured
+ * and no further action is required, except portal initialization
+ * and verifying secondary attachment to port name.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+ if (!eth_dev)
+ return -ENOMEM;
+ } else {
+ eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
- eth_dev->data->dev_private = rte_zmalloc(
- "ethdev private structure",
- sizeof(struct dpaa_if),
- RTE_CACHE_LINE_SIZE);
- if (!eth_dev->data->dev_private) {
- DPAA_PMD_ERR("Cannot allocate memzone for port data");
- rte_eth_dev_release_port(eth_dev);
- return -ENOMEM;
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa_if),
+ RTE_CACHE_LINE_SIZE);
+ if (!eth_dev->data->dev_private) {
+ DPAA_PMD_ERR("Cannot allocate memzone for port data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
}
-
eth_dev->device = &dpaa_dev->device;
- eth_dev->device->driver = &dpaa_drv->driver;
dpaa_dev->eth_dev = eth_dev;
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
- if (diag == 0)
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
+ }
rte_eth_dev_release_port(eth_dev);
return diag;
eth_dev = dpaa_dev->eth_dev;
dpaa_dev_uninit(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return 0;