#include <sys/types.h>
#include <sys/syscall.h>
+#include <rte_string_fns.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_interrupts.h>
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup;
struct dpaa_if *dpaa_intf = dev->data->dev_private;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
+ uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size > buffsz) {
+ DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ /* check <seg size> * <max_seg> >= max_frame */
+ if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
+ (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
+ DPAA_PMD_ERR("Too big to fit for Max SG list %d",
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ return -EINVAL;
+ }
+
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
}
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ DPAA_PMD_DEBUG("enabling jumbo");
+
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- DPAA_MAX_RX_PKT_LEN) {
- fman_if_set_maxfrm(dpaa_intf->fif,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- return 0;
- } else {
- return -1;
+ DPAA_MAX_RX_PKT_LEN)
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ else {
+ DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+ "supported is %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ DPAA_MAX_RX_PKT_LEN);
+ max_len = DPAA_MAX_RX_PKT_LEN;
}
+
+ fman_if_set_maxfrm(dpaa_intf->fif, max_len);
+ dev->data->mtu = max_len
+ - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
}
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ DPAA_PMD_DEBUG("enabling scatter mode");
+ fman_if_set_sg(dpaa_intf->fif, 1);
+ dev->data->scattered_rx = 1;
+ }
+
return 0;
}
dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
- /*todo -= add more types */
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV4_EXT,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_SCTP
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
- dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G);
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ dev_info->speed_capa = ETH_LINK_SPEED_1G;
+ else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
- "%s",
- dpaa_xstats_strings[i].name);
+ strlcpy(xstats_names[i].name,
+ dpaa_xstats_strings[i].name,
+ sizeof(xstats_names[i].name));
return stat_cnt;
}
struct qm_mcc_initfq opts = {0};
u32 flags = 0;
int ret;
+ u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
queue_idx, rxq->fqid);
+ /* Max packet can fit in single buffer */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+ ;
+ } else if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ buffsz * DPAA_SGT_MAX_ENTRIES) {
+ DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+ "MaxSGlist %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ } else {
+ DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz - RTE_PKTMBUF_HEADROOM);
+ }
+
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
struct fman_if_ic_params icp;
uint32_t fd_offset;
fman_if_set_bp(dpaa_intf->fif, mp->size,
dpaa_intf->bp_info->bpid, bp_size);
dpaa_intf->valid = 1;
- DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
- dpaa_intf->name, fd_offset,
- fman_if_get_fdoff(dpaa_intf->fif));
+ DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
}
+ DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+ fman_if_get_sg_enable(dpaa_intf->fif),
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
/* checking if push mode only, no error check for now */
if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
dpaa_push_queue_idx++;
"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
return ret;
}
- rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
- rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+ } else {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ }
+
rxq->is_static = true;
}
+ rxq->bp_array = rte_dpaa_bpid_info;
dev->data->rx_queues[queue_idx] = rxq;
/* configure the CGR size as per the desc size */
struct qm_mcc_initfq opts = {0};
if (dpaa_push_mode_max_queue)
- DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
+ "PUSH mode already enabled for first %d queues.\n"
"To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
dpaa_push_mode_max_queue);
{
struct qm_mcc_initfq opts = {0};
int ret;
- u32 flags = 0;
+ u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
struct qm_mcc_initcgr cgr_opts = {
.we_mask = QM_CGR_WE_CS_THRES |
QM_CGR_WE_CSTD_EN |
PMD_INIT_FUNC_TRACE();
- ret = qman_reserve_fqid(fqid);
- if (ret) {
- DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
- fqid, ret);
- return -EINVAL;
+ if (fqid) {
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ } else {
+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
}
-
DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
- ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ ret = qman_create_fq(fqid, flags, fq);
if (ret) {
DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
fqid, ret);
if (ret) {
DPAA_PMD_WARN(
"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
- fqid, ret);
+ fq->fqid, ret);
goto without_cgr;
}
opts.we_mask |= QM_INITFQ_WE_CGID;
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
without_cgr:
- ret = qman_init_fq(fq, flags, &opts);
+ ret = qman_init_fq(fq, 0, &opts);
if (ret)
DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
return ret;
PMD_INIT_FUNC_TRACE();
+ dpaa_intf = eth_dev->data->dev_private;
/* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = &dpaa_devops;
+ /* Plugging of UCODE burst API not supported in Secondary */
+ eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+ eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ qman_set_fq_lookup_table(
+ dpaa_intf->rx_queues->qman_fq_lookup_table);
+#endif
return 0;
+ }
dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
dev_id = dpaa_device->id.dev_id;
}
- /* if push mode queues to be enabled. Currenly we are allowing only
- * one queue per thread.
- */
- if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
- dpaa_push_mode_max_queue =
- atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
- if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
- dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
- }
-
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
if (default_q)
fqid = cfg->rx_def;
else
- fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx *
DPAA_PCD_FQID_MULTIPLIER + loop;
if (dpaa_intf->cgr_rx)
/* reset bpool list, initialize bpool dynamically */
list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
list_del(&bp->node);
- free(bp);
+ rte_free(bp);
}
/* Populate ethdev structure */
fman_if_reset_mcast_filter_table(fman_intf);
/* Reset interface statistics */
fman_if_stats_reset(fman_intf);
+ /* Disable SG by default */
+ fman_if_set_sg(fman_intf, 0);
+ fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);
return 0;
rte_free(dpaa_intf->tx_queues);
dpaa_intf->tx_queues = NULL;
- /* free memory for storing MAC addresses */
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
}
static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
int diag;
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
if (!eth_dev)
return -ENOMEM;
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->dev_ops = &dpaa_devops;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
default_q = 1;
}
+ /* disabling the default push mode for LS1043 */
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ dpaa_push_mode_max_queue = 0;
+
+ /* if push mode queues to be enabled. Currenly we are allowing
+ * only one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
is_global_init = 1;
}
}
eth_dev->device = &dpaa_dev->device;
- eth_dev->device->driver = &dpaa_drv->driver;
dpaa_dev->eth_dev = eth_dev;
/* Invoke PMD device initialization function */
return 0;
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return diag;
}
eth_dev = dpaa_dev->eth_dev;
dpaa_dev_uninit(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return 0;