X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=2cde55e7ccc4c5fb9e0ee9a37eb9b25aa44a00db;hb=f1453604557ce04b75fdd8bcc5824d46835a26f6;hp=4c6585f354c1c0e5ad8101423c97048a5b676f8e;hpb=c720c5f6bb7e438adcc43be64b931dd194d762b7;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 4c6585f354..2cde55e7cc 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -26,6 +26,7 @@ #include #include #include "dpaa2_ethdev.h" +#include "dpaa2_sparser.h" #include #define DRIVER_LOOPBACK_MODE "drv_loopback" @@ -44,6 +45,7 @@ static uint64_t dev_rx_offloads_sup = /* Rx offloads which cannot be disabled */ static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_SCATTER; /* Supported Tx offloads */ @@ -117,7 +119,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -127,8 +129,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (on) - ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, - priv->token, vlan_id); + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, + vlan_id, 0, 0, 0); else ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); @@ -144,7 +146,7 @@ static int dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -182,7 +184,7 @@ dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; int ret = -ENOTSUP; PMD_INIT_FUNC_TRACE(); @@ -220,8 +222,7 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, size_t fw_size) { int ret; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct mc_soc_version mc_plat_info = {0}; struct mc_version mc_ver_info = {0}; @@ -292,7 +293,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); - tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + if (priv->tx_conf_en) + tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; + else + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, RTE_CACHE_LINE_SIZE); if (!mc_q) { @@ -327,6 +331,28 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) goto fail_tx; } + if (priv->tx_conf_en) { + /*Setup tx confirmation queues*/ + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->tc_index = i; + mc_q->flow_id = 0; + priv->tx_conf_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + dpaa2_q->q_storage = + rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail_tx_conf; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail_tx_conf; + } + } + vq_id = 0; for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; @@ -336,6 +362,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) } return 0; +fail_tx_conf: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + priv->tx_conf_vq[i--] = NULL; + } + i = priv->nb_tx_queues; fail_tx: i -= 1; while (i >= 0) { @@ -379,6 +413,14 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; rte_free(dpaa2_q->cscn); } + if (priv->tx_conf_en) { + /* cleanup tx conf queue storage */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *) + priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + } + } /*free memory for all queues (RX+TX) */ rte_free(priv->rx_vq[0]); priv->rx_vq[0] = NULL; @@ -389,7 +431,7 @@ static int dpaa2_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct rte_eth_conf *eth_conf = &dev->data->dev_conf; uint64_t rx_offloads = eth_conf->rxmode.offloads; uint64_t tx_offloads = eth_conf->txmode.offloads; @@ -420,12 +462,17 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, - priv->token, eth_conf->rxmode.max_rx_pkt_len); + priv->token, eth_conf->rxmode.max_rx_pkt_len + - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR( "Unable to set mtu. check config"); return ret; } + dev->data->mtu = + dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - + VLAN_TAG_SIZE; } else { return -1; } @@ -524,7 +571,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mb_pool) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_q; struct dpni_queue cfg; uint8_t options = 0; @@ -670,10 +717,13 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) priv->tx_vq[tx_queue_id]; - struct fsl_mc_io *dpni = priv->hw; + struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) + priv->tx_conf_vq[tx_queue_id]; + struct fsl_mc_io *dpni = dev->process_private; struct dpni_queue tx_conf_cfg; struct dpni_queue tx_flow_cfg; uint8_t options = 0, flow_id; + struct dpni_queue_id qid; uint32_t tc_id; int ret; @@ -704,9 +754,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, if (tx_queue_id == 0) { /*Set tx-conf and error configuration*/ - ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, - priv->token, - DPNI_CONF_DISABLE); + if (priv->tx_conf_en) + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_AFFINE); + else + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); if (ret) { DPAA2_PMD_ERR("Error in set tx conf mode settings: " "err=%d", ret); @@ -715,6 +770,15 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q->tc_index = tc_id; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &tx_flow_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_q->fqid = qid.fqid; + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; @@ -748,6 +812,31 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; dev->data->tx_queues[tx_queue_id] = dpaa2_q; + + if (priv->tx_conf_en) { + dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; + options = options | DPNI_QUEUE_OPT_USER_CTX; + tx_conf_cfg.user_context = (size_t)(dpaa2_q); + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx conf flow: " + "tc_index=%d, flow=%d err=%d", + dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, ret); + return -1; + } + + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_tx_conf_q->fqid = qid.fqid; + } return 0; } @@ -756,7 +845,8 @@ dpaa2_dev_rx_queue_release(void *q __rte_unused) { struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q; struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)priv->eth_dev->process_private; uint8_t options = 0; int ret; struct dpni_queue cfg; @@ -854,7 +944,7 @@ dpaa2_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; int irq_index = DPNI_IRQ_INDEX; unsigned int status = 0, clear = 0; @@ -893,7 +983,7 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) { int err = 0; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int irq_index = DPNI_IRQ_INDEX; unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; @@ -923,7 +1013,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct rte_dpaa2_device *dpaa2_dev; struct rte_eth_dev_data *data = dev->data; struct dpaa2_dev_priv *priv = data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpni_queue cfg; struct dpni_error_cfg err_cfg; uint16_t qdid; @@ -1016,7 +1106,7 @@ static void dpaa2_dev_stop(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -1056,7 +1146,7 @@ static void dpaa2_dev_close(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; @@ -1081,7 +1171,7 @@ dpaa2_dev_promiscuous_enable( { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1107,7 +1197,7 @@ dpaa2_dev_promiscuous_disable( { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1137,7 +1227,7 @@ dpaa2_dev_allmulticast_enable( { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1158,7 +1248,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1183,7 +1273,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; @@ -1199,7 +1289,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; if (frame_size > RTE_ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.offloads &= + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= @@ -1211,7 +1301,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Maximum Ethernet header length */ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, - frame_size); + frame_size - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR("Setting the max frame length failed"); return -1; @@ -1228,7 +1318,7 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1237,8 +1327,8 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, return -1; } - ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, - priv->token, addr->addr_bytes); + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, + addr->addr_bytes, 0, 0, 0); if (ret) DPAA2_PMD_ERR( "error: Adding the MAC ADDR failed: err = %d", ret); @@ -1251,7 +1341,7 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_dev_data *data = dev->data; struct rte_ether_addr *macaddr; @@ -1277,7 +1367,7 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1301,7 +1391,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; uint8_t page0 = 0, page1 = 1, page2 = 2; union dpni_statistics value; @@ -1381,7 +1471,7 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; union dpni_statistics value[5] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); @@ -1461,7 +1551,8 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (!ids) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)dev->process_private; int32_t retcode; union dpni_statistics value[5] = {}; @@ -1543,7 +1634,7 @@ static int dpaa2_dev_stats_reset(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int retcode; int i; struct dpaa2_queue *dpaa2_q; @@ -1586,7 +1677,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_link link; struct dpni_link_state state = {0}; @@ -1634,7 +1725,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) struct dpni_link_state state = {0}; priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1690,7 +1781,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("Device has not yet been configured"); @@ -1744,7 +1835,7 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL || fc_conf == NULL) { DPAA2_PMD_ERR("device not configured"); @@ -1800,7 +1891,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1916,15 +2007,15 @@ dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, - uint16_t dpcon_id, + struct dpaa2_dpcon_dev *dpcon, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; - uint8_t options; + uint8_t options, priority; int ret; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) @@ -1936,11 +2027,14 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, else return -EINVAL; + priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * + (dpcon->num_priorities - 1); + memset(&cfg, 0, sizeof(struct dpni_queue)); options = DPNI_QUEUE_OPT_DEST; cfg.destination.type = DPNI_DEST_DPCON; - cfg.destination.id = dpcon_id; - cfg.destination.priority = queue_conf->ev.priority; + cfg.destination.id = dpcon->dpcon_id; + cfg.destination.priority = priority; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; @@ -2001,7 +2095,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; @@ -2100,6 +2194,15 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .rss_hash_update = dpaa2_dev_rss_hash_update, .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, .filter_ctrl = dpaa2_dev_flow_ctrl, +#if defined(RTE_LIBRTE_IEEE1588) + .timesync_enable = dpaa2_timesync_enable, + .timesync_disable = dpaa2_timesync_disable, + .timesync_read_time = dpaa2_timesync_read_time, + .timesync_write_time = dpaa2_timesync_write_time, + .timesync_adjust_time = dpaa2_timesync_adjust_time, + .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, +#endif }; /* Populate the mac address from physically available (u-boot/firmware) and/or @@ -2221,6 +2324,14 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); + dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); + if (!dpni_dev) { + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); + return -1; + } + dpni_dev->regs = rte_mcp_ptr_list[0]; + eth_dev->process_private = (void *)dpni_dev; + /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { /* In case of secondary, only burst and ops API need to be @@ -2241,14 +2352,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); hw_id = dpaa2_dev->object_id; - - dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); - if (!dpni_dev) { - DPAA2_PMD_ERR("Memory allocation failed for dpni device"); - return -1; - } - - dpni_dev->regs = rte_mcp_ptr_list[0]; ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); if (ret) { DPAA2_PMD_ERR( @@ -2300,6 +2403,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->max_mac_filters = attr.mac_filter_entries; priv->max_vlan_filters = attr.vlan_filter_entries; priv->flags = 0; +#if defined(RTE_LIBRTE_IEEE1588) + priv->tx_conf_en = 1; +#else + priv->tx_conf_en = 0; +#endif /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); @@ -2332,7 +2440,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* ... tx buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &layout); @@ -2343,7 +2457,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* ... tx-conf and error buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX_CONFIRM, &layout); @@ -2386,6 +2506,37 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } } + ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, + RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE); + if (ret) { + DPAA2_PMD_ERR("Unable to set mtu. check config"); + goto init_err; + } + + /*TODO To enable soft parser support DPAA2 driver needs to integrate + * with external entity to receive byte code for software sequence + * and same will be offload to the H/W using MC interface. + * Currently it is assumed that DPAA2 driver has byte code by some + * mean and same if offloaded to H/W. + */ + if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { + WRIOP_SS_INITIALIZER(priv); + ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", + ret); + return ret; + } + + ret = dpaa2_eth_enable_wriop_soft_parser(priv, + DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", + ret); + return ret; + } + } RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; init_err: @@ -2397,7 +2548,7 @@ static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) { struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private; int i, ret; PMD_INIT_FUNC_TRACE(); @@ -2424,6 +2575,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) /* Free the allocated memory for ethernet private data and dpni*/ priv->hw = NULL; + eth_dev->process_private = NULL; rte_free(dpni); for (i = 0; i < MAX_TCS; i++) { @@ -2447,6 +2599,7 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, struct rte_dpaa2_device *dpaa2_dev) { struct rte_eth_dev *eth_dev; + struct dpaa2_dev_priv *dev_priv; int diag; if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > @@ -2463,20 +2616,25 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); if (!eth_dev) return -ENODEV; - eth_dev->data->dev_private = rte_zmalloc( - "ethdev private structure", - sizeof(struct dpaa2_dev_priv), - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) { + dev_priv = rte_zmalloc("ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (dev_priv == NULL) { DPAA2_PMD_CRIT( "Unable to allocate memory for private data"); rte_eth_dev_release_port(eth_dev); return -ENOMEM; } + eth_dev->data->dev_private = (void *)dev_priv; + /* Store a pointer to eth_dev in dev_private */ + dev_priv->eth_dev = eth_dev; + dev_priv->tx_conf_en = 0; } else { eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); - if (!eth_dev) + if (!eth_dev) { + DPAA2_PMD_DEBUG("returning enodev"); return -ENODEV; + } } eth_dev->device = &dpaa2_dev->device;