DEV_TX_OFFLOAD_MULTI_SEGS;
/* enable timestamp in mbuf */
-enum pmd_dpaa2_ts dpaa2_enable_ts;
+bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-void
-rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
-{
- dpaa2_enable_ts = enable;
-}
-
static int
dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = dev->process_private;
- int ret;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
- goto next_mask;
+ return -ENOTSUP;
}
if (dev->data->dev_conf.rxmode.offloads &
if (ret < 0)
DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
}
-next_mask:
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND)
- DPAA2_PMD_INFO("VLAN extend offload not supported");
- }
- return 0;
+ return ret;
}
static int
return 0;
}
+static int
+dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ int ret = -EINVAL;
+ unsigned int i;
+ const struct burst_info {
+ uint64_t flags;
+ const char *output;
+ } rx_offload_map[] = {
+ {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+ {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+ {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+ {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+ {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+ {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
+ {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+ };
+
+ /* Update Rx offload info */
+ for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+ if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ rx_offload_map[i].output);
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int
+dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ int ret = -EINVAL;
+ unsigned int i;
+ const struct burst_info {
+ uint64_t flags;
+ const char *output;
+ } tx_offload_map[] = {
+ {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ };
+
+ /* Update Tx offload info */
+ for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+ if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ tx_offload_map[i].output);
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
static int
dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
{
#if !defined(RTE_LIBRTE_IEEE1588)
if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
#endif
- dpaa2_enable_ts = true;
+ dpaa2_enable_ts[dev->data->port_id] = true;
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
tx_l3_csum_offload = true;
uint16_t rx_queue_id,
uint16_t nb_rx_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
dev, rx_queue_id, mb_pool, rx_conf);
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ DPAA2_PMD_ERR("%p:Rx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
bpid = mempool_to_bpid(mb_pool);
ret = dpaa2_attach_bp_list(priv,
static int
dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
+ uint16_t nb_tx_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
+ const struct rte_eth_txconf *tx_conf)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
PMD_INIT_FUNC_TRACE();
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ DPAA2_PMD_ERR("%p:Tx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
/* Return if queue already configured */
if (dpaa2_q->flow_id != 0xffff) {
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
- cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+ cong_notif_cfg.threshold_entry = nb_tx_desc;
/* Notify that the queue is not congested when the data in
* the queue is below this thershold.
*/
- cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+ cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
cong_notif_cfg.message_ctx = 0;
cong_notif_cfg.message_iova =
(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
.tx_queue_release = dpaa2_dev_tx_queue_release,
+ .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
+ .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
.rx_queue_count = dpaa2_dev_rx_queue_count,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
.flow_ctrl_set = dpaa2_flow_ctrl_set,