/* * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*
*/
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
DEV_TX_OFFLOAD_MULTI_SEGS;
/* enable timestamp in mbuf */
-enum pmd_dpaa2_ts dpaa2_enable_ts;
+bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-int dpaa2_logtype_pmd;
-
-void
-rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
-{
- dpaa2_enable_ts = enable;
-}
-
static int
dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = dev->process_private;
- int ret;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
- goto next_mask;
+ return -ENOTSUP;
}
if (dev->data->dev_conf.rxmode.offloads &
if (ret < 0)
DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
}
-next_mask:
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND)
- DPAA2_PMD_INFO("VLAN extend offload not supported");
- }
- return 0;
+ return ret;
}
static int
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
+ dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
+ /* same is rx size for best perf */
+ dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
+
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
+ dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
+
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G;
+ }
+
return 0;
}
+static int
+dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ int ret = -EINVAL;
+ unsigned int i;
+ const struct burst_info {
+ uint64_t flags;
+ const char *output;
+ } rx_offload_map[] = {
+ {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+ {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+ {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+ {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+ {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+ {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
+ {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+ };
+
+ /* Update Rx offload info */
+ for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+ if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ rx_offload_map[i].output);
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int
+dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ int ret = -EINVAL;
+ unsigned int i;
+ const struct burst_info {
+ uint64_t flags;
+ const char *output;
+ } tx_offload_map[] = {
+ {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ };
+
+ /* Update Tx offload info */
+ for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+ if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ tx_offload_map[i].output);
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
static int
dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
{
int rx_l4_csum_offload = false;
int tx_l3_csum_offload = false;
int tx_l4_csum_offload = false;
- int ret;
+ int ret, tc_index;
PMD_INIT_FUNC_TRACE();
}
if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
- ret = dpaa2_setup_flow_dist(dev,
- eth_conf->rx_adv_conf.rss_conf.rss_hf);
- if (ret) {
- DPAA2_PMD_ERR("Unable to set flow distribution."
- "Check queue config");
- return ret;
+ for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+ ret = dpaa2_setup_flow_dist(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf,
+ tc_index);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Unable to set flow distribution on tc%d."
+ "Check queue config", tc_index);
+ return ret;
+ }
}
}
return ret;
}
+#if !defined(RTE_LIBRTE_IEEE1588)
if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
- dpaa2_enable_ts = true;
+#endif
+ dpaa2_enable_ts[dev->data->port_id] = true;
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
tx_l3_csum_offload = true;
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
- /* update the current status */
- dpaa2_dev_link_update(dev, 0);
-
return 0;
}
uint16_t rx_queue_id,
uint16_t nb_rx_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
dev, rx_queue_id, mb_pool, rx_conf);
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ DPAA2_PMD_ERR("%p:Rx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
bpid = mempool_to_bpid(mb_pool);
ret = dpaa2_attach_bp_list(priv,
dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
dpaa2_q->bp_array = rte_dpaa2_bpid_info;
+ dpaa2_q->nb_desc = UINT16_MAX;
+ dpaa2_q->offloads = rx_conf->offloads;
/*Get the flow id from given VQ id*/
flow_id = dpaa2_q->flow_id;
struct dpni_taildrop taildrop;
taildrop.enable = 1;
-
+ dpaa2_q->nb_desc = nb_rx_desc;
/* Private CGR will use tail drop length as nb_rx_desc.
* for rest cases we can use standard byte based tail drop.
* There is no HW restriction, but number of CGRs are limited,
DPNI_CP_CONGESTION_GROUP,
DPNI_QUEUE_RX,
dpaa2_q->tc_index,
- flow_id, &taildrop);
+ dpaa2_q->cgid, &taildrop);
} else {
/*enabling per rx queue congestion control */
taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
dpaa2_q->tc_index,
- flow_id, &taildrop);
+ dpaa2_q->cgid, &taildrop);
} else {
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
static int
dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
+ uint16_t nb_tx_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
+ const struct rte_eth_txconf *tx_conf)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
PMD_INIT_FUNC_TRACE();
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ DPAA2_PMD_ERR("%p:Tx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
+ dpaa2_q->nb_desc = UINT16_MAX;
+ dpaa2_q->offloads = tx_conf->offloads;
+
/* Return if queue already configured */
if (dpaa2_q->flow_id != 0xffff) {
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
flow_id = 0;
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
- tc_id, flow_id, options, &tx_flow_cfg);
+ tc_id, flow_id, options, &tx_flow_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx flow: "
- "tc_id=%d, flow=%d err=%d",
- tc_id, flow_id, ret);
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
return -1;
}
if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
+ dpaa2_q->nb_desc = nb_tx_desc;
+
cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
- cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+ cong_notif_cfg.threshold_entry = nb_tx_desc;
/* Notify that the queue is not congested when the data in
* the queue is below this thershold.
*/
- cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+ cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
cong_notif_cfg.message_ctx = 0;
cong_notif_cfg.message_iova =
(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
struct qbman_fq_query_np_rslt state;
uint32_t frame_cnt = 0;
- PMD_INIT_FUNC_TRACE();
-
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_PMD_ERR("Failure in affining portal");
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return -EINVAL;
}
}
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
frame_cnt = qbman_fq_state_frame_count(&state);
- DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+ DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
rx_queue_id, frame_cnt);
}
return frame_cnt;
return -EINVAL;
if (frame_size > RTE_ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads &=
+ dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
/* changing tx burst function to start enqueues */
dev->tx_pkt_burst = dpaa2_dev_tx;
dev->data->dev_link.link_status = state.up;
+ dev->data->dev_link.link_speed = state.rate;
if (state.up)
DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev_data *data = dev->data;
+ struct dpaa2_dev_priv *priv = data->dev_private;
struct rte_eth_conf *eth_conf = &data->dev_conf;
- int ret;
+ int ret, tc_index;
PMD_INIT_FUNC_TRACE();
if (rss_conf->rss_hf) {
- ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
- if (ret) {
- DPAA2_PMD_ERR("Unable to set flow dist");
- return ret;
+ for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+ ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
+ tc_index);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
+ tc_index);
+ return ret;
+ }
}
} else {
- ret = dpaa2_remove_flow_dist(dev, 0);
- if (ret) {
- DPAA2_PMD_ERR("Unable to remove flow dist");
- return ret;
+ for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+ ret = dpaa2_remove_flow_dist(dev, tc_index);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Unable to remove flow dist on tc%d",
+ tc_index);
+ return ret;
+ }
}
}
eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
return ret;
}
+static void
+dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct dpaa2_queue *rxq;
+
+ rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_desc;
+
+ qinfo->conf.rx_free_thresh = 1;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = 0;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+static void
+dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct dpaa2_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_desc;
+ qinfo->conf.tx_thresh.pthresh = 0;
+ qinfo->conf.tx_thresh.hthresh = 0;
+ qinfo->conf.tx_thresh.wthresh = 0;
+
+ qinfo->conf.tx_free_thresh = 0;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = 0;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
.tx_queue_release = dpaa2_dev_tx_queue_release,
+ .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
+ .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
.rx_queue_count = dpaa2_dev_rx_queue_count,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
.flow_ctrl_set = dpaa2_flow_ctrl_set,
.rss_hash_update = dpaa2_dev_rss_hash_update,
.rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
.filter_ctrl = dpaa2_dev_flow_ctrl,
+ .rxq_info_get = dpaa2_rxq_info_get,
+ .txq_info_get = dpaa2_txq_info_get,
#if defined(RTE_LIBRTE_IEEE1588)
.timesync_enable = dpaa2_timesync_enable,
.timesync_disable = dpaa2_timesync_disable,
DPAA2_PMD_ERR("Memory allocation failed for dpni device");
return -1;
}
- dpni_dev->regs = rte_mcp_ptr_list[0];
+ dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
eth_dev->process_private = (void *)dpni_dev;
/* For secondary processes, the primary has done all the work */
}
priv->num_rx_tc = attr.num_rx_tcs;
+ priv->qos_entries = attr.qos_entries;
+ priv->fs_entries = attr.fs_entries;
+ priv->dist_queues = attr.num_queues;
+
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
priv->max_cgs = attr.num_cgs;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
/*Init fields w.r.t. classficaition*/
- memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ memset(&priv->extract.qos_key_extract, 0,
+ sizeof(struct dpaa2_key_extract));
priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
if (!priv->extract.qos_extract_param) {
DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
" classificaiton ", ret);
goto init_err;
}
+ priv->extract.qos_key_extract.key_info.ipv4_src_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.qos_key_extract.key_info.ipv6_src_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+
for (i = 0; i < MAX_TCS; i++) {
- memset(&priv->extract.fs_key_cfg[i], 0,
- sizeof(struct dpkg_profile_cfg));
- priv->extract.fs_extract_param[i] =
+ memset(&priv->extract.tc_key_extract[i], 0,
+ sizeof(struct dpaa2_key_extract));
+ priv->extract.tc_extract_param[i] =
(size_t)rte_malloc(NULL, 256, 64);
- if (!priv->extract.fs_extract_param[i]) {
+ if (!priv->extract.tc_extract_param[i]) {
DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
ret);
goto init_err;
}
+ priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
+ IP_ADDRESS_OFFSET_INVALID;
+ priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
+ IP_ADDRESS_OFFSET_INVALID;
}
ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
eth_dev->process_private = NULL;
rte_free(dpni);
- for (i = 0; i < MAX_TCS; i++) {
- if (priv->extract.fs_extract_param[i])
- rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
- }
+ for (i = 0; i < MAX_TCS; i++)
+ rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
if (priv->extract.qos_extract_param)
rte_free((void *)(size_t)priv->extract.qos_extract_param);
RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
DRIVER_LOOPBACK_MODE "=<int> "
DRIVER_NO_PREFETCH_MODE "=<int>");
-RTE_INIT(dpaa2_pmd_init_log)
-{
- dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
- if (dpaa2_logtype_pmd >= 0)
- rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);