4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <dpaa2_hw_dpio.h>
54 #include "dpaa2_ethdev.h"
56 static struct rte_dpaa2_driver rte_dpaa2_pmd;
57 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
60 * Atomically reads the link status information from global
61 * structure rte_eth_dev.
64 * - Pointer to the structure rte_eth_dev to read from.
65 * - Pointer to the buffer to be saved with the link status.
69 * - On failure, negative value.
72 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
73 struct rte_eth_link *link)
75 struct rte_eth_link *dst = link;
76 struct rte_eth_link *src = &dev->data->dev_link;
78 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
79 *(uint64_t *)src) == 0)
86 * Atomically writes the link status information into global
87 * structure rte_eth_dev.
90 * - Pointer to the structure rte_eth_dev to read from.
91 * - Pointer to the buffer to be saved with the link status.
95 * - On failure, negative value.
98 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
99 struct rte_eth_link *link)
101 struct rte_eth_link *dst = &dev->data->dev_link;
102 struct rte_eth_link *src = link;
104 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
105 *(uint64_t *)src) == 0)
112 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
114 struct dpaa2_dev_priv *priv = dev->data->dev_private;
116 PMD_INIT_FUNC_TRACE();
118 dev_info->if_index = priv->hw_id;
120 dev_info->max_mac_addrs = priv->max_mac_filters;
121 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
122 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
123 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
124 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
125 dev_info->rx_offload_capa =
126 DEV_RX_OFFLOAD_IPV4_CKSUM |
127 DEV_RX_OFFLOAD_UDP_CKSUM |
128 DEV_RX_OFFLOAD_TCP_CKSUM |
129 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
130 dev_info->tx_offload_capa =
131 DEV_TX_OFFLOAD_IPV4_CKSUM |
132 DEV_TX_OFFLOAD_UDP_CKSUM |
133 DEV_TX_OFFLOAD_TCP_CKSUM |
134 DEV_TX_OFFLOAD_SCTP_CKSUM |
135 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
136 dev_info->speed_capa = ETH_LINK_SPEED_1G |
137 ETH_LINK_SPEED_2_5G |
142 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
144 struct dpaa2_dev_priv *priv = dev->data->dev_private;
147 struct dpaa2_queue *mc_q, *mcq;
150 struct dpaa2_queue *dpaa2_q;
152 PMD_INIT_FUNC_TRACE();
154 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
155 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
156 RTE_CACHE_LINE_SIZE);
158 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
162 for (i = 0; i < priv->nb_rx_queues; i++) {
164 priv->rx_vq[i] = mc_q++;
165 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
166 dpaa2_q->q_storage = rte_malloc("dq_storage",
167 sizeof(struct queue_storage_info_t),
168 RTE_CACHE_LINE_SIZE);
169 if (!dpaa2_q->q_storage)
172 memset(dpaa2_q->q_storage, 0,
173 sizeof(struct queue_storage_info_t));
174 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
178 for (i = 0; i < priv->nb_tx_queues; i++) {
180 mc_q->flow_id = DPNI_NEW_FLOW_ID;
181 priv->tx_vq[i] = mc_q++;
185 for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
187 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
188 mcq->tc_index = DPAA2_DEF_TC;
189 mcq->flow_id = dist_idx;
196 mc_q = priv->rx_vq[0];
198 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
199 dpaa2_free_dq_storage(dpaa2_q->q_storage);
200 rte_free(dpaa2_q->q_storage);
201 priv->rx_vq[i--] = NULL;
208 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
210 struct rte_eth_dev_data *data = dev->data;
211 struct rte_eth_conf *eth_conf = &data->dev_conf;
214 PMD_INIT_FUNC_TRACE();
216 /* Check for correct configuration */
217 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
218 data->nb_rx_queues > 1) {
219 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
220 "but Rx queues more than 1\n");
224 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
225 /* Return in case number of Rx queues is 1 */
226 if (data->nb_rx_queues == 1)
228 ret = dpaa2_setup_flow_dist(dev,
229 eth_conf->rx_adv_conf.rss_conf.rss_hf);
231 PMD_INIT_LOG(ERR, "unable to set flow distribution."
232 "please check queue config\n");
239 /* Function to setup RX flow information. It contains traffic class ID,
240 * flow ID, destination configuration etc.
243 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
244 uint16_t rx_queue_id,
245 uint16_t nb_rx_desc __rte_unused,
246 unsigned int socket_id __rte_unused,
247 const struct rte_eth_rxconf *rx_conf __rte_unused,
248 struct rte_mempool *mb_pool)
250 struct dpaa2_dev_priv *priv = dev->data->dev_private;
251 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
252 struct dpaa2_queue *dpaa2_q;
253 struct dpni_queue cfg;
259 PMD_INIT_FUNC_TRACE();
261 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
262 dev, rx_queue_id, mb_pool, rx_conf);
264 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
265 bpid = mempool_to_bpid(mb_pool);
266 ret = dpaa2_attach_bp_list(priv,
267 rte_dpaa2_bpid_info[bpid].bp_list);
271 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
272 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
274 /*Get the tc id and flow id from given VQ id*/
275 flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
276 memset(&cfg, 0, sizeof(struct dpni_queue));
278 options = options | DPNI_QUEUE_OPT_USER_CTX;
279 cfg.user_context = (uint64_t)(dpaa2_q);
281 /*if ls2088 or rev2 device, enable the stashing */
282 if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
283 options |= DPNI_QUEUE_OPT_FLC;
284 cfg.flc.stash_control = true;
285 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
286 /* 00 00 00 - last 6 bit represent annotation, context stashing,
287 * data stashing setting 01 01 00 (0x14) to enable
288 * 1 line data, 1 line annotation
290 cfg.flc.value |= 0x14;
292 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
293 dpaa2_q->tc_index, flow_id, options, &cfg);
295 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
299 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
304 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
305 uint16_t tx_queue_id,
306 uint16_t nb_tx_desc __rte_unused,
307 unsigned int socket_id __rte_unused,
308 const struct rte_eth_txconf *tx_conf __rte_unused)
310 struct dpaa2_dev_priv *priv = dev->data->dev_private;
311 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
312 priv->tx_vq[tx_queue_id];
313 struct fsl_mc_io *dpni = priv->hw;
314 struct dpni_queue tx_conf_cfg;
315 struct dpni_queue tx_flow_cfg;
316 uint8_t options = 0, flow_id;
320 PMD_INIT_FUNC_TRACE();
322 /* Return if queue already configured */
323 if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
326 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
327 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
329 if (priv->num_tc == 1) {
331 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
337 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
338 tc_id, flow_id, options, &tx_flow_cfg);
340 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
341 "tc_id=%d, flow =%d ErrorCode = %x\n",
342 tc_id, flow_id, -ret);
346 dpaa2_q->flow_id = flow_id;
348 if (tx_queue_id == 0) {
349 /*Set tx-conf and error configuration*/
350 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
354 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
355 " ErrorCode = %x", ret);
359 dpaa2_q->tc_index = tc_id;
361 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
366 dpaa2_dev_rx_queue_release(void *q __rte_unused)
368 PMD_INIT_FUNC_TRACE();
372 dpaa2_dev_tx_queue_release(void *q __rte_unused)
374 PMD_INIT_FUNC_TRACE();
377 static const uint32_t *
378 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
380 static const uint32_t ptypes[] = {
381 /*todo -= add more types */
384 RTE_PTYPE_L3_IPV4_EXT,
386 RTE_PTYPE_L3_IPV6_EXT,
394 if (dev->rx_pkt_burst == dpaa2_dev_rx)
400 dpaa2_dev_start(struct rte_eth_dev *dev)
402 struct rte_eth_dev_data *data = dev->data;
403 struct dpaa2_dev_priv *priv = data->dev_private;
404 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
405 struct dpni_queue cfg;
406 struct dpni_error_cfg err_cfg;
408 struct dpni_queue_id qid;
409 struct dpaa2_queue *dpaa2_q;
412 PMD_INIT_FUNC_TRACE();
414 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
416 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
421 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
422 DPNI_QUEUE_TX, &qdid);
424 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
429 for (i = 0; i < data->nb_rx_queues; i++) {
430 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
431 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
432 DPNI_QUEUE_RX, dpaa2_q->tc_index,
433 dpaa2_q->flow_id, &cfg, &qid);
435 PMD_INIT_LOG(ERR, "Error to get flow "
436 "information Error code = %d\n", ret);
439 dpaa2_q->fqid = qid.fqid;
442 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
443 DPNI_OFF_RX_L3_CSUM, true);
445 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
449 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
450 DPNI_OFF_RX_L4_CSUM, true);
452 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
456 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
457 DPNI_OFF_TX_L3_CSUM, true);
459 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
463 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
464 DPNI_OFF_TX_L4_CSUM, true);
466 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
470 /*checksum errors, send them to normal path and set it in annotation */
471 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
473 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
474 err_cfg.set_frame_annotation = true;
476 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
477 priv->token, &err_cfg);
479 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
488 * This routine disables all traffic on the adapter by issuing a
489 * global reset on the MAC.
492 dpaa2_dev_stop(struct rte_eth_dev *dev)
494 struct dpaa2_dev_priv *priv = dev->data->dev_private;
495 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
497 struct rte_eth_link link;
499 PMD_INIT_FUNC_TRACE();
501 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
503 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
508 /* clear the recorded link status */
509 memset(&link, 0, sizeof(link));
510 dpaa2_dev_atomic_write_link_status(dev, &link);
514 dpaa2_dev_close(struct rte_eth_dev *dev)
516 struct dpaa2_dev_priv *priv = dev->data->dev_private;
517 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
520 PMD_INIT_FUNC_TRACE();
522 /* Clean the device first */
523 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
525 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
526 " error code %d\n", ret);
532 dpaa2_dev_promiscuous_enable(
533 struct rte_eth_dev *dev)
536 struct dpaa2_dev_priv *priv = dev->data->dev_private;
537 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
539 PMD_INIT_FUNC_TRACE();
542 RTE_LOG(ERR, PMD, "dpni is NULL");
546 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
548 RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
552 dpaa2_dev_promiscuous_disable(
553 struct rte_eth_dev *dev)
556 struct dpaa2_dev_priv *priv = dev->data->dev_private;
557 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
559 PMD_INIT_FUNC_TRACE();
562 RTE_LOG(ERR, PMD, "dpni is NULL");
566 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
568 RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
572 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
575 struct dpaa2_dev_priv *priv = dev->data->dev_private;
576 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
577 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
579 PMD_INIT_FUNC_TRACE();
582 RTE_LOG(ERR, PMD, "dpni is NULL");
586 /* check that mtu is within the allowed range */
587 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
590 /* Set the Max Rx frame length as 'mtu' +
591 * Maximum Ethernet header length
593 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
594 mtu + ETH_VLAN_HLEN);
596 PMD_DRV_LOG(ERR, "setting the max frame length failed");
599 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
604 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
605 struct rte_eth_stats *stats)
607 struct dpaa2_dev_priv *priv = dev->data->dev_private;
608 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
610 uint8_t page0 = 0, page1 = 1, page2 = 2;
611 union dpni_statistics value;
613 memset(&value, 0, sizeof(union dpni_statistics));
615 PMD_INIT_FUNC_TRACE();
618 RTE_LOG(ERR, PMD, "dpni is NULL");
623 RTE_LOG(ERR, PMD, "stats is NULL");
627 /*Get Counters from page_0*/
628 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
633 stats->ipackets = value.page_0.ingress_all_frames;
634 stats->ibytes = value.page_0.ingress_all_bytes;
636 /*Get Counters from page_1*/
637 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
642 stats->opackets = value.page_1.egress_all_frames;
643 stats->obytes = value.page_1.egress_all_bytes;
645 /*Get Counters from page_2*/
646 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
651 stats->ierrors = value.page_2.ingress_discarded_frames;
652 stats->oerrors = value.page_2.egress_discarded_frames;
653 stats->imissed = value.page_2.ingress_nobuffer_discards;
658 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
663 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
665 struct dpaa2_dev_priv *priv = dev->data->dev_private;
666 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
669 PMD_INIT_FUNC_TRACE();
672 RTE_LOG(ERR, PMD, "dpni is NULL");
676 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
683 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
687 /* return 0 means link status changed, -1 means not changed */
689 dpaa2_dev_link_update(struct rte_eth_dev *dev,
690 int wait_to_complete __rte_unused)
693 struct dpaa2_dev_priv *priv = dev->data->dev_private;
694 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
695 struct rte_eth_link link, old;
696 struct dpni_link_state state = {0};
698 PMD_INIT_FUNC_TRACE();
701 RTE_LOG(ERR, PMD, "error : dpni is NULL");
704 memset(&old, 0, sizeof(old));
705 dpaa2_dev_atomic_read_link_status(dev, &old);
707 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
709 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
713 if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
714 RTE_LOG(DEBUG, PMD, "No change in status\n");
718 memset(&link, 0, sizeof(struct rte_eth_link));
719 link.link_status = state.up;
720 link.link_speed = state.rate;
722 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
723 link.link_duplex = ETH_LINK_HALF_DUPLEX;
725 link.link_duplex = ETH_LINK_FULL_DUPLEX;
727 dpaa2_dev_atomic_write_link_status(dev, &link);
729 if (link.link_status)
730 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
732 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
736 static struct eth_dev_ops dpaa2_ethdev_ops = {
737 .dev_configure = dpaa2_eth_dev_configure,
738 .dev_start = dpaa2_dev_start,
739 .dev_stop = dpaa2_dev_stop,
740 .dev_close = dpaa2_dev_close,
741 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
742 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
743 .link_update = dpaa2_dev_link_update,
744 .stats_get = dpaa2_dev_stats_get,
745 .stats_reset = dpaa2_dev_stats_reset,
746 .dev_infos_get = dpaa2_dev_info_get,
747 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
748 .mtu_set = dpaa2_dev_mtu_set,
749 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
750 .rx_queue_release = dpaa2_dev_rx_queue_release,
751 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
752 .tx_queue_release = dpaa2_dev_tx_queue_release,
756 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
758 struct rte_device *dev = eth_dev->device;
759 struct rte_dpaa2_device *dpaa2_dev;
760 struct fsl_mc_io *dpni_dev;
761 struct dpni_attr attr;
762 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
763 struct dpni_buffer_layout layout;
766 PMD_INIT_FUNC_TRACE();
768 /* For secondary processes, the primary has done all the work */
769 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
772 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
774 hw_id = dpaa2_dev->object_id;
776 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
778 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
782 dpni_dev->regs = rte_mcp_ptr_list[0];
783 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
786 "Failure in opening dpni@%d with err code %d\n",
792 /* Clean the device first */
793 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
796 "Failure cleaning dpni@%d with err code %d\n",
801 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
804 "Failure in get dpni@%d attribute, err code %d\n",
809 priv->num_tc = attr.num_tcs;
810 for (i = 0; i < attr.num_tcs; i++) {
811 priv->num_dist_per_tc[i] = attr.num_queues;
815 /* Distribution is per Tc only,
816 * so choosing RX queues from default TC only
818 priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
820 if (attr.num_tcs == 1)
821 priv->nb_tx_queues = attr.num_queues;
823 priv->nb_tx_queues = attr.num_tcs;
825 PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
826 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
830 priv->options = attr.options;
831 priv->max_mac_filters = attr.mac_filter_entries;
832 priv->max_vlan_filters = attr.vlan_filter_entries;
835 /* Allocate memory for hardware structure for queues */
836 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
838 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
842 /* Allocate memory for storing MAC addresses */
843 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
844 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
845 if (eth_dev->data->mac_addrs == NULL) {
847 "Failed to allocate %d bytes needed to store MAC addresses",
848 ETHER_ADDR_LEN * attr.mac_filter_entries);
853 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
855 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
857 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
862 /* ... tx buffer layout ... */
863 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
864 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
865 layout.pass_frame_status = 1;
866 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
867 DPNI_QUEUE_TX, &layout);
869 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
874 /* ... tx-conf and error buffer layout ... */
875 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
876 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
877 layout.pass_frame_status = 1;
878 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
879 DPNI_QUEUE_TX_CONFIRM, &layout);
881 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
886 eth_dev->dev_ops = &dpaa2_ethdev_ops;
887 eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
889 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
890 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
891 rte_fslmc_vfio_dmamap();
895 dpaa2_dev_uninit(eth_dev);
900 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
902 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
903 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
905 struct dpaa2_queue *dpaa2_q;
907 PMD_INIT_FUNC_TRACE();
909 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
913 PMD_INIT_LOG(WARNING, "Already closed or not started");
917 dpaa2_dev_close(eth_dev);
919 if (priv->rx_vq[0]) {
920 /* cleaning up queue storage */
921 for (i = 0; i < priv->nb_rx_queues; i++) {
922 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
923 if (dpaa2_q->q_storage)
924 rte_free(dpaa2_q->q_storage);
926 /*free the all queue memory */
927 rte_free(priv->rx_vq[0]);
928 priv->rx_vq[0] = NULL;
931 /* free memory for storing MAC addresses */
932 if (eth_dev->data->mac_addrs) {
933 rte_free(eth_dev->data->mac_addrs);
934 eth_dev->data->mac_addrs = NULL;
937 /* Close the device at underlying layer*/
938 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
941 "Failure closing dpni device with err code %d\n",
945 /* Free the allocated memory for ethernet private data and dpni*/
949 eth_dev->dev_ops = NULL;
950 eth_dev->rx_pkt_burst = NULL;
951 eth_dev->tx_pkt_burst = NULL;
957 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
958 struct rte_dpaa2_device *dpaa2_dev)
960 struct rte_eth_dev *eth_dev;
961 char ethdev_name[RTE_ETH_NAME_MAX_LEN];
965 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
967 eth_dev = rte_eth_dev_allocate(ethdev_name);
971 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
972 eth_dev->data->dev_private = rte_zmalloc(
973 "ethdev private structure",
974 sizeof(struct dpaa2_dev_priv),
975 RTE_CACHE_LINE_SIZE);
976 if (eth_dev->data->dev_private == NULL) {
977 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
978 " private port data\n");
979 rte_eth_dev_release_port(eth_dev);
983 eth_dev->device = &dpaa2_dev->device;
984 dpaa2_dev->eth_dev = eth_dev;
985 eth_dev->data->rx_mbuf_alloc_failed = 0;
987 /* Invoke PMD device initialization function */
988 diag = dpaa2_dev_init(eth_dev);
992 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
993 rte_free(eth_dev->data->dev_private);
994 rte_eth_dev_release_port(eth_dev);
999 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1001 struct rte_eth_dev *eth_dev;
1003 eth_dev = dpaa2_dev->eth_dev;
1004 dpaa2_dev_uninit(eth_dev);
1006 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1007 rte_free(eth_dev->data->dev_private);
1008 rte_eth_dev_release_port(eth_dev);
1013 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1014 .drv_type = DPAA2_MC_DPNI_DEVID,
1015 .probe = rte_dpaa2_probe,
1016 .remove = rte_dpaa2_remove,
1019 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);