#include <dpaa2_hw_dpio.h>
#include <mc/fsl_dpmng.h>
#include "dpaa2_ethdev.h"
+#include "dpaa2_sparser.h"
#include <fsl_qbman_debug.h>
#define DRIVER_LOOPBACK_MODE "drv_loopback"
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
}
if (on)
- ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
- priv->token, vlan_id);
+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
+ vlan_id, 0, 0, 0);
else
ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
priv->token, vlan_id);
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
+ dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
+ /* same is rx size for best perf */
+ dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
+
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
+ dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
+
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G;
+ }
+
return 0;
}
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
- /* update the current status */
- dpaa2_dev_link_update(dev, 0);
-
return 0;
}
DPNI_CP_CONGESTION_GROUP,
DPNI_QUEUE_RX,
dpaa2_q->tc_index,
- flow_id, &taildrop);
+ dpaa2_q->cgid, &taildrop);
} else {
/*enabling per rx queue congestion control */
taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
dpaa2_q->tc_index,
- flow_id, &taildrop);
+ dpaa2_q->cgid, &taildrop);
} else {
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
struct qbman_fq_query_np_rslt state;
uint32_t frame_cnt = 0;
- PMD_INIT_FUNC_TRACE();
-
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_PMD_ERR("Failure in affining portal");
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return -EINVAL;
}
}
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
frame_cnt = qbman_fq_state_frame_count(&state);
- DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+ DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
rx_queue_id, frame_cnt);
}
return frame_cnt;
return -EINVAL;
if (frame_size > RTE_ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads &=
+ dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
return -1;
}
- ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
- priv->token, addr->addr_bytes);
+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
+ addr->addr_bytes, 0, 0, 0);
if (ret)
DPAA2_PMD_ERR(
"error: Adding the MAC ADDR failed: err = %d", ret);
/* changing tx burst function to start enqueues */
dev->tx_pkt_burst = dpaa2_dev_tx;
dev->data->dev_link.link_status = state.up;
+ dev->data->dev_link.link_speed = state.rate;
if (state.up)
DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
- uint16_t dpcon_id,
+ struct dpaa2_dpcon_dev *dpcon,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
uint8_t flow_id = dpaa2_ethq->flow_id;
struct dpni_queue cfg;
- uint8_t options;
+ uint8_t options, priority;
int ret;
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
else
return -EINVAL;
+ priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
+ (dpcon->num_priorities - 1);
+
memset(&cfg, 0, sizeof(struct dpni_queue));
options = DPNI_QUEUE_OPT_DEST;
cfg.destination.type = DPNI_DEST_DPCON;
- cfg.destination.id = dpcon_id;
- cfg.destination.priority = queue_conf->ev.priority;
+ cfg.destination.id = dpcon->dpcon_id;
+ cfg.destination.priority = priority;
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
DPAA2_PMD_ERR("Memory allocation failed for dpni device");
return -1;
}
- dpni_dev->regs = rte_mcp_ptr_list[0];
+ dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
eth_dev->process_private = (void *)dpni_dev;
/* For secondary processes, the primary has done all the work */
goto init_err;
}
+ /*TODO To enable soft parser support DPAA2 driver needs to integrate
+ * with external entity to receive byte code for software sequence
+ * and same will be offload to the H/W using MC interface.
+ * Currently it is assumed that DPAA2 driver has byte code by some
+ * mean and same if offloaded to H/W.
+ */
+ if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
+ WRIOP_SS_INITIALIZER(priv);
+ ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
+ ret);
+ return ret;
+ }
+
+ ret = dpaa2_eth_enable_wriop_soft_parser(priv,
+ DPNI_SS_INGRESS);
+ if (ret < 0) {
+ DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
+ ret);
+ return ret;
+ }
+ }
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
init_err: