4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <dpaa2_hw_dpio.h>
54 #include "dpaa2_ethdev.h"
56 static struct rte_dpaa2_driver rte_dpaa2_pmd;
57 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
60 * Atomically reads the link status information from global
61 * structure rte_eth_dev.
64 * - Pointer to the structure rte_eth_dev to read from.
65 * - Pointer to the buffer to be saved with the link status.
69 * - On failure, negative value.
72 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
73 struct rte_eth_link *link)
75 struct rte_eth_link *dst = link;
76 struct rte_eth_link *src = &dev->data->dev_link;
78 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
79 *(uint64_t *)src) == 0)
86 * Atomically writes the link status information into global
87 * structure rte_eth_dev.
90 * - Pointer to the structure rte_eth_dev to read from.
91 * - Pointer to the buffer to be saved with the link status.
95 * - On failure, negative value.
98 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
99 struct rte_eth_link *link)
101 struct rte_eth_link *dst = &dev->data->dev_link;
102 struct rte_eth_link *src = link;
104 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
105 *(uint64_t *)src) == 0)
112 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
114 struct dpaa2_dev_priv *priv = dev->data->dev_private;
116 PMD_INIT_FUNC_TRACE();
118 dev_info->if_index = priv->hw_id;
120 dev_info->max_mac_addrs = priv->max_mac_filters;
121 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
122 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
123 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
124 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
125 dev_info->rx_offload_capa =
126 DEV_RX_OFFLOAD_IPV4_CKSUM |
127 DEV_RX_OFFLOAD_UDP_CKSUM |
128 DEV_RX_OFFLOAD_TCP_CKSUM |
129 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
130 dev_info->tx_offload_capa =
131 DEV_TX_OFFLOAD_IPV4_CKSUM |
132 DEV_TX_OFFLOAD_UDP_CKSUM |
133 DEV_TX_OFFLOAD_TCP_CKSUM |
134 DEV_TX_OFFLOAD_SCTP_CKSUM |
135 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
136 dev_info->speed_capa = ETH_LINK_SPEED_1G |
137 ETH_LINK_SPEED_2_5G |
142 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
144 struct dpaa2_dev_priv *priv = dev->data->dev_private;
147 struct dpaa2_queue *mc_q, *mcq;
150 struct dpaa2_queue *dpaa2_q;
152 PMD_INIT_FUNC_TRACE();
154 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
155 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
156 RTE_CACHE_LINE_SIZE);
158 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
162 for (i = 0; i < priv->nb_rx_queues; i++) {
164 priv->rx_vq[i] = mc_q++;
165 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
166 dpaa2_q->q_storage = rte_malloc("dq_storage",
167 sizeof(struct queue_storage_info_t),
168 RTE_CACHE_LINE_SIZE);
169 if (!dpaa2_q->q_storage)
172 memset(dpaa2_q->q_storage, 0,
173 sizeof(struct queue_storage_info_t));
174 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
178 for (i = 0; i < priv->nb_tx_queues; i++) {
180 mc_q->flow_id = 0xffff;
181 priv->tx_vq[i] = mc_q++;
182 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
183 dpaa2_q->cscn = rte_malloc(NULL,
184 sizeof(struct qbman_result), 16);
190 for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
192 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
193 mcq->tc_index = DPAA2_DEF_TC;
194 mcq->flow_id = dist_idx;
202 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
203 rte_free(dpaa2_q->cscn);
204 priv->tx_vq[i--] = NULL;
206 i = priv->nb_rx_queues;
209 mc_q = priv->rx_vq[0];
211 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
212 dpaa2_free_dq_storage(dpaa2_q->q_storage);
213 rte_free(dpaa2_q->q_storage);
214 priv->rx_vq[i--] = NULL;
221 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
223 struct rte_eth_dev_data *data = dev->data;
224 struct rte_eth_conf *eth_conf = &data->dev_conf;
227 PMD_INIT_FUNC_TRACE();
229 /* Check for correct configuration */
230 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
231 data->nb_rx_queues > 1) {
232 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
233 "but Rx queues more than 1\n");
237 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
238 /* Return in case number of Rx queues is 1 */
239 if (data->nb_rx_queues == 1)
241 ret = dpaa2_setup_flow_dist(dev,
242 eth_conf->rx_adv_conf.rss_conf.rss_hf);
244 PMD_INIT_LOG(ERR, "unable to set flow distribution."
245 "please check queue config\n");
252 /* Function to setup RX flow information. It contains traffic class ID,
253 * flow ID, destination configuration etc.
256 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
257 uint16_t rx_queue_id,
258 uint16_t nb_rx_desc __rte_unused,
259 unsigned int socket_id __rte_unused,
260 const struct rte_eth_rxconf *rx_conf __rte_unused,
261 struct rte_mempool *mb_pool)
263 struct dpaa2_dev_priv *priv = dev->data->dev_private;
264 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
265 struct dpaa2_queue *dpaa2_q;
266 struct dpni_queue cfg;
272 PMD_INIT_FUNC_TRACE();
274 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
275 dev, rx_queue_id, mb_pool, rx_conf);
277 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
278 bpid = mempool_to_bpid(mb_pool);
279 ret = dpaa2_attach_bp_list(priv,
280 rte_dpaa2_bpid_info[bpid].bp_list);
284 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
285 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
287 /*Get the tc id and flow id from given VQ id*/
288 flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
289 memset(&cfg, 0, sizeof(struct dpni_queue));
291 options = options | DPNI_QUEUE_OPT_USER_CTX;
292 cfg.user_context = (uint64_t)(dpaa2_q);
294 /*if ls2088 or rev2 device, enable the stashing */
295 if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
296 options |= DPNI_QUEUE_OPT_FLC;
297 cfg.flc.stash_control = true;
298 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
299 /* 00 00 00 - last 6 bit represent annotation, context stashing,
300 * data stashing setting 01 01 00 (0x14) to enable
301 * 1 line data, 1 line annotation
303 cfg.flc.value |= 0x14;
305 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
306 dpaa2_q->tc_index, flow_id, options, &cfg);
308 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
312 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
313 struct dpni_taildrop taildrop;
316 /*enabling per rx queue congestion control */
317 taildrop.threshold = CONG_THRESHOLD_RX_Q;
318 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
319 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
321 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
322 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
323 dpaa2_q->tc_index, flow_id, &taildrop);
325 PMD_INIT_LOG(ERR, "Error in setting the rx flow"
326 " err : = %d\n", ret);
331 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
336 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
337 uint16_t tx_queue_id,
338 uint16_t nb_tx_desc __rte_unused,
339 unsigned int socket_id __rte_unused,
340 const struct rte_eth_txconf *tx_conf __rte_unused)
342 struct dpaa2_dev_priv *priv = dev->data->dev_private;
343 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
344 priv->tx_vq[tx_queue_id];
345 struct fsl_mc_io *dpni = priv->hw;
346 struct dpni_queue tx_conf_cfg;
347 struct dpni_queue tx_flow_cfg;
348 uint8_t options = 0, flow_id;
352 PMD_INIT_FUNC_TRACE();
354 /* Return if queue already configured */
355 if (dpaa2_q->flow_id != 0xffff)
358 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
359 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
361 if (priv->num_tc == 1) {
363 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
369 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
370 tc_id, flow_id, options, &tx_flow_cfg);
372 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
373 "tc_id=%d, flow =%d ErrorCode = %x\n",
374 tc_id, flow_id, -ret);
378 dpaa2_q->flow_id = flow_id;
380 if (tx_queue_id == 0) {
381 /*Set tx-conf and error configuration*/
382 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
386 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
387 " ErrorCode = %x", ret);
391 dpaa2_q->tc_index = tc_id;
393 if (priv->flags & DPAA2_TX_CGR_SUPPORT) {
394 struct dpni_congestion_notification_cfg cong_notif_cfg;
396 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
397 /* Notify about congestion when the queue size is 32 KB */
398 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
399 /* Notify that the queue is not congested when the data in
400 * the queue is below this thershold.
402 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
403 cong_notif_cfg.message_ctx = 0;
404 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
405 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
406 cong_notif_cfg.notification_mode =
407 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
408 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
409 DPNI_CONG_OPT_COHERENT_WRITE;
411 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
418 "Error in setting tx congestion notification: = %d",
423 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
428 dpaa2_dev_rx_queue_release(void *q __rte_unused)
430 PMD_INIT_FUNC_TRACE();
434 dpaa2_dev_tx_queue_release(void *q __rte_unused)
436 PMD_INIT_FUNC_TRACE();
439 static const uint32_t *
440 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
442 static const uint32_t ptypes[] = {
443 /*todo -= add more types */
446 RTE_PTYPE_L3_IPV4_EXT,
448 RTE_PTYPE_L3_IPV6_EXT,
456 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
462 dpaa2_dev_start(struct rte_eth_dev *dev)
464 struct rte_eth_dev_data *data = dev->data;
465 struct dpaa2_dev_priv *priv = data->dev_private;
466 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
467 struct dpni_queue cfg;
468 struct dpni_error_cfg err_cfg;
470 struct dpni_queue_id qid;
471 struct dpaa2_queue *dpaa2_q;
474 PMD_INIT_FUNC_TRACE();
476 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
478 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
483 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
484 DPNI_QUEUE_TX, &qdid);
486 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
491 for (i = 0; i < data->nb_rx_queues; i++) {
492 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
493 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
494 DPNI_QUEUE_RX, dpaa2_q->tc_index,
495 dpaa2_q->flow_id, &cfg, &qid);
497 PMD_INIT_LOG(ERR, "Error to get flow "
498 "information Error code = %d\n", ret);
501 dpaa2_q->fqid = qid.fqid;
504 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
505 DPNI_OFF_RX_L3_CSUM, true);
507 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
511 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
512 DPNI_OFF_RX_L4_CSUM, true);
514 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
518 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
519 DPNI_OFF_TX_L3_CSUM, true);
521 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
525 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
526 DPNI_OFF_TX_L4_CSUM, true);
528 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
532 /*checksum errors, send them to normal path and set it in annotation */
533 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
535 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
536 err_cfg.set_frame_annotation = true;
538 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
539 priv->token, &err_cfg);
541 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
550 * This routine disables all traffic on the adapter by issuing a
551 * global reset on the MAC.
554 dpaa2_dev_stop(struct rte_eth_dev *dev)
556 struct dpaa2_dev_priv *priv = dev->data->dev_private;
557 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
559 struct rte_eth_link link;
561 PMD_INIT_FUNC_TRACE();
563 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
565 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
570 /* clear the recorded link status */
571 memset(&link, 0, sizeof(link));
572 dpaa2_dev_atomic_write_link_status(dev, &link);
576 dpaa2_dev_close(struct rte_eth_dev *dev)
578 struct rte_eth_dev_data *data = dev->data;
579 struct dpaa2_dev_priv *priv = dev->data->dev_private;
580 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
582 struct dpaa2_queue *dpaa2_q;
584 PMD_INIT_FUNC_TRACE();
586 for (i = 0; i < data->nb_tx_queues; i++) {
587 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
588 if (!dpaa2_q->cscn) {
589 rte_free(dpaa2_q->cscn);
590 dpaa2_q->cscn = NULL;
594 /* Clean the device first */
595 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
597 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
598 " error code %d\n", ret);
604 dpaa2_dev_promiscuous_enable(
605 struct rte_eth_dev *dev)
608 struct dpaa2_dev_priv *priv = dev->data->dev_private;
609 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
611 PMD_INIT_FUNC_TRACE();
614 RTE_LOG(ERR, PMD, "dpni is NULL");
618 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
620 RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
624 dpaa2_dev_promiscuous_disable(
625 struct rte_eth_dev *dev)
628 struct dpaa2_dev_priv *priv = dev->data->dev_private;
629 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
631 PMD_INIT_FUNC_TRACE();
634 RTE_LOG(ERR, PMD, "dpni is NULL");
638 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
640 RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
644 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
647 struct dpaa2_dev_priv *priv = dev->data->dev_private;
648 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
649 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
651 PMD_INIT_FUNC_TRACE();
654 RTE_LOG(ERR, PMD, "dpni is NULL");
658 /* check that mtu is within the allowed range */
659 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
662 /* Set the Max Rx frame length as 'mtu' +
663 * Maximum Ethernet header length
665 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
666 mtu + ETH_VLAN_HLEN);
668 PMD_DRV_LOG(ERR, "setting the max frame length failed");
671 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
676 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
677 struct rte_eth_stats *stats)
679 struct dpaa2_dev_priv *priv = dev->data->dev_private;
680 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
682 uint8_t page0 = 0, page1 = 1, page2 = 2;
683 union dpni_statistics value;
685 memset(&value, 0, sizeof(union dpni_statistics));
687 PMD_INIT_FUNC_TRACE();
690 RTE_LOG(ERR, PMD, "dpni is NULL");
695 RTE_LOG(ERR, PMD, "stats is NULL");
699 /*Get Counters from page_0*/
700 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
705 stats->ipackets = value.page_0.ingress_all_frames;
706 stats->ibytes = value.page_0.ingress_all_bytes;
708 /*Get Counters from page_1*/
709 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
714 stats->opackets = value.page_1.egress_all_frames;
715 stats->obytes = value.page_1.egress_all_bytes;
717 /*Get Counters from page_2*/
718 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
723 stats->ierrors = value.page_2.ingress_discarded_frames;
724 stats->oerrors = value.page_2.egress_discarded_frames;
725 stats->imissed = value.page_2.ingress_nobuffer_discards;
730 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
735 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
737 struct dpaa2_dev_priv *priv = dev->data->dev_private;
738 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
741 PMD_INIT_FUNC_TRACE();
744 RTE_LOG(ERR, PMD, "dpni is NULL");
748 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
755 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
759 /* return 0 means link status changed, -1 means not changed */
761 dpaa2_dev_link_update(struct rte_eth_dev *dev,
762 int wait_to_complete __rte_unused)
765 struct dpaa2_dev_priv *priv = dev->data->dev_private;
766 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
767 struct rte_eth_link link, old;
768 struct dpni_link_state state = {0};
770 PMD_INIT_FUNC_TRACE();
773 RTE_LOG(ERR, PMD, "error : dpni is NULL");
776 memset(&old, 0, sizeof(old));
777 dpaa2_dev_atomic_read_link_status(dev, &old);
779 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
781 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
785 if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
786 RTE_LOG(DEBUG, PMD, "No change in status\n");
790 memset(&link, 0, sizeof(struct rte_eth_link));
791 link.link_status = state.up;
792 link.link_speed = state.rate;
794 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
795 link.link_duplex = ETH_LINK_HALF_DUPLEX;
797 link.link_duplex = ETH_LINK_FULL_DUPLEX;
799 dpaa2_dev_atomic_write_link_status(dev, &link);
801 if (link.link_status)
802 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
804 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
808 static struct eth_dev_ops dpaa2_ethdev_ops = {
809 .dev_configure = dpaa2_eth_dev_configure,
810 .dev_start = dpaa2_dev_start,
811 .dev_stop = dpaa2_dev_stop,
812 .dev_close = dpaa2_dev_close,
813 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
814 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
815 .link_update = dpaa2_dev_link_update,
816 .stats_get = dpaa2_dev_stats_get,
817 .stats_reset = dpaa2_dev_stats_reset,
818 .dev_infos_get = dpaa2_dev_info_get,
819 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
820 .mtu_set = dpaa2_dev_mtu_set,
821 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
822 .rx_queue_release = dpaa2_dev_rx_queue_release,
823 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
824 .tx_queue_release = dpaa2_dev_tx_queue_release,
828 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
830 struct rte_device *dev = eth_dev->device;
831 struct rte_dpaa2_device *dpaa2_dev;
832 struct fsl_mc_io *dpni_dev;
833 struct dpni_attr attr;
834 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
835 struct dpni_buffer_layout layout;
838 PMD_INIT_FUNC_TRACE();
840 /* For secondary processes, the primary has done all the work */
841 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
844 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
846 hw_id = dpaa2_dev->object_id;
848 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
850 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
854 dpni_dev->regs = rte_mcp_ptr_list[0];
855 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
858 "Failure in opening dpni@%d with err code %d\n",
864 /* Clean the device first */
865 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
868 "Failure cleaning dpni@%d with err code %d\n",
873 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
876 "Failure in get dpni@%d attribute, err code %d\n",
881 priv->num_tc = attr.num_tcs;
882 for (i = 0; i < attr.num_tcs; i++) {
883 priv->num_dist_per_tc[i] = attr.num_queues;
887 /* Distribution is per Tc only,
888 * so choosing RX queues from default TC only
890 priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
892 if (attr.num_tcs == 1)
893 priv->nb_tx_queues = attr.num_queues;
895 priv->nb_tx_queues = attr.num_tcs;
897 PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
898 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
902 priv->options = attr.options;
903 priv->max_mac_filters = attr.mac_filter_entries;
904 priv->max_vlan_filters = attr.vlan_filter_entries;
907 priv->flags |= DPAA2_TX_CGR_SUPPORT;
908 PMD_INIT_LOG(INFO, "Enable the tx congestion control support");
910 /* Allocate memory for hardware structure for queues */
911 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
913 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
917 /* Allocate memory for storing MAC addresses */
918 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
919 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
920 if (eth_dev->data->mac_addrs == NULL) {
922 "Failed to allocate %d bytes needed to store MAC addresses",
923 ETHER_ADDR_LEN * attr.mac_filter_entries);
928 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
930 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
932 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
937 /* ... tx buffer layout ... */
938 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
939 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
940 layout.pass_frame_status = 1;
941 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
942 DPNI_QUEUE_TX, &layout);
944 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
949 /* ... tx-conf and error buffer layout ... */
950 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
951 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
952 layout.pass_frame_status = 1;
953 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
954 DPNI_QUEUE_TX_CONFIRM, &layout);
956 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
961 eth_dev->dev_ops = &dpaa2_ethdev_ops;
962 eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
964 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
965 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
966 rte_fslmc_vfio_dmamap();
970 dpaa2_dev_uninit(eth_dev);
975 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
977 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
978 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
980 struct dpaa2_queue *dpaa2_q;
982 PMD_INIT_FUNC_TRACE();
984 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
988 PMD_INIT_LOG(WARNING, "Already closed or not started");
992 dpaa2_dev_close(eth_dev);
994 if (priv->rx_vq[0]) {
995 /* cleaning up queue storage */
996 for (i = 0; i < priv->nb_rx_queues; i++) {
997 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
998 if (dpaa2_q->q_storage)
999 rte_free(dpaa2_q->q_storage);
1001 /*free the all queue memory */
1002 rte_free(priv->rx_vq[0]);
1003 priv->rx_vq[0] = NULL;
1006 /* free memory for storing MAC addresses */
1007 if (eth_dev->data->mac_addrs) {
1008 rte_free(eth_dev->data->mac_addrs);
1009 eth_dev->data->mac_addrs = NULL;
1012 /* Close the device at underlying layer*/
1013 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1016 "Failure closing dpni device with err code %d\n",
1020 /* Free the allocated memory for ethernet private data and dpni*/
1024 eth_dev->dev_ops = NULL;
1025 eth_dev->rx_pkt_burst = NULL;
1026 eth_dev->tx_pkt_burst = NULL;
1032 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
1033 struct rte_dpaa2_device *dpaa2_dev)
1035 struct rte_eth_dev *eth_dev;
1036 char ethdev_name[RTE_ETH_NAME_MAX_LEN];
1040 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
1042 eth_dev = rte_eth_dev_allocate(ethdev_name);
1043 if (eth_dev == NULL)
1046 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1047 eth_dev->data->dev_private = rte_zmalloc(
1048 "ethdev private structure",
1049 sizeof(struct dpaa2_dev_priv),
1050 RTE_CACHE_LINE_SIZE);
1051 if (eth_dev->data->dev_private == NULL) {
1052 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
1053 " private port data\n");
1054 rte_eth_dev_release_port(eth_dev);
1058 eth_dev->device = &dpaa2_dev->device;
1059 dpaa2_dev->eth_dev = eth_dev;
1060 eth_dev->data->rx_mbuf_alloc_failed = 0;
1062 /* Invoke PMD device initialization function */
1063 diag = dpaa2_dev_init(eth_dev);
1067 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1068 rte_free(eth_dev->data->dev_private);
1069 rte_eth_dev_release_port(eth_dev);
1074 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1076 struct rte_eth_dev *eth_dev;
1078 eth_dev = dpaa2_dev->eth_dev;
1079 dpaa2_dev_uninit(eth_dev);
1081 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1082 rte_free(eth_dev->data->dev_private);
1083 rte_eth_dev_release_port(eth_dev);
1088 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1089 .drv_type = DPAA2_MC_DPNI_DEVID,
1090 .probe = rte_dpaa2_probe,
1091 .remove = rte_dpaa2_remove,
1094 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);