4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
53 #include "dpaa2_ethdev.h"
55 static struct rte_dpaa2_driver rte_dpaa2_pmd;
58 * Atomically reads the link status information from global
59 * structure rte_eth_dev.
62 * - Pointer to the structure rte_eth_dev to read from.
63 * - Pointer to the buffer to be saved with the link status.
67 * - On failure, negative value.
70 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
71 struct rte_eth_link *link)
73 struct rte_eth_link *dst = link;
74 struct rte_eth_link *src = &dev->data->dev_link;
76 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
77 *(uint64_t *)src) == 0)
84 * Atomically writes the link status information into global
85 * structure rte_eth_dev.
88 * - Pointer to the structure rte_eth_dev to read from.
89 * - Pointer to the buffer to be saved with the link status.
93 * - On failure, negative value.
96 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
97 struct rte_eth_link *link)
99 struct rte_eth_link *dst = &dev->data->dev_link;
100 struct rte_eth_link *src = link;
102 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
103 *(uint64_t *)src) == 0)
110 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
112 struct dpaa2_dev_priv *priv = dev->data->dev_private;
114 PMD_INIT_FUNC_TRACE();
116 dev_info->if_index = priv->hw_id;
118 dev_info->max_mac_addrs = priv->max_mac_filters;
119 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
120 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
121 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
122 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
123 dev_info->rx_offload_capa =
124 DEV_RX_OFFLOAD_IPV4_CKSUM |
125 DEV_RX_OFFLOAD_UDP_CKSUM |
126 DEV_RX_OFFLOAD_TCP_CKSUM |
127 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
128 dev_info->tx_offload_capa =
129 DEV_TX_OFFLOAD_IPV4_CKSUM |
130 DEV_TX_OFFLOAD_UDP_CKSUM |
131 DEV_TX_OFFLOAD_TCP_CKSUM |
132 DEV_TX_OFFLOAD_SCTP_CKSUM |
133 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
134 dev_info->speed_capa = ETH_LINK_SPEED_1G |
135 ETH_LINK_SPEED_2_5G |
140 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
142 struct dpaa2_dev_priv *priv = dev->data->dev_private;
145 struct dpaa2_queue *mc_q, *mcq;
148 struct dpaa2_queue *dpaa2_q;
150 PMD_INIT_FUNC_TRACE();
152 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
153 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
154 RTE_CACHE_LINE_SIZE);
156 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
160 for (i = 0; i < priv->nb_rx_queues; i++) {
162 priv->rx_vq[i] = mc_q++;
163 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
164 dpaa2_q->q_storage = rte_malloc("dq_storage",
165 sizeof(struct queue_storage_info_t),
166 RTE_CACHE_LINE_SIZE);
167 if (!dpaa2_q->q_storage)
170 memset(dpaa2_q->q_storage, 0,
171 sizeof(struct queue_storage_info_t));
172 dpaa2_q->q_storage->dq_storage[0] = rte_malloc(NULL,
173 DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
174 RTE_CACHE_LINE_SIZE);
177 for (i = 0; i < priv->nb_tx_queues; i++) {
179 mc_q->flow_id = DPNI_NEW_FLOW_ID;
180 priv->tx_vq[i] = mc_q++;
184 for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
186 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
187 mcq->tc_index = DPAA2_DEF_TC;
188 mcq->flow_id = dist_idx;
195 mc_q = priv->rx_vq[0];
197 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
198 rte_free(dpaa2_q->q_storage->dq_storage[0]);
199 rte_free(dpaa2_q->q_storage);
200 priv->rx_vq[i--] = NULL;
207 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
209 struct rte_eth_dev_data *data = dev->data;
210 struct rte_eth_conf *eth_conf = &data->dev_conf;
213 PMD_INIT_FUNC_TRACE();
215 /* Check for correct configuration */
216 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
217 data->nb_rx_queues > 1) {
218 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
219 "but Rx queues more than 1\n");
223 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
224 /* Return in case number of Rx queues is 1 */
225 if (data->nb_rx_queues == 1)
227 ret = dpaa2_setup_flow_dist(dev,
228 eth_conf->rx_adv_conf.rss_conf.rss_hf);
230 PMD_INIT_LOG(ERR, "unable to set flow distribution."
231 "please check queue config\n");
238 /* Function to setup RX flow information. It contains traffic class ID,
239 * flow ID, destination configuration etc.
242 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
243 uint16_t rx_queue_id,
244 uint16_t nb_rx_desc __rte_unused,
245 unsigned int socket_id __rte_unused,
246 const struct rte_eth_rxconf *rx_conf __rte_unused,
247 struct rte_mempool *mb_pool)
249 struct dpaa2_dev_priv *priv = dev->data->dev_private;
250 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
251 struct dpaa2_queue *dpaa2_q;
252 struct dpni_queue cfg;
258 PMD_INIT_FUNC_TRACE();
260 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
261 dev, rx_queue_id, mb_pool, rx_conf);
263 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
264 bpid = mempool_to_bpid(mb_pool);
265 ret = dpaa2_attach_bp_list(priv,
266 rte_dpaa2_bpid_info[bpid].bp_list);
270 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
271 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
273 /*Get the tc id and flow id from given VQ id*/
274 flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
275 memset(&cfg, 0, sizeof(struct dpni_queue));
277 options = options | DPNI_QUEUE_OPT_USER_CTX;
278 cfg.user_context = (uint64_t)(dpaa2_q);
280 /*if ls2088 or rev2 device, enable the stashing */
281 if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
282 options |= DPNI_QUEUE_OPT_FLC;
283 cfg.flc.stash_control = true;
284 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
285 /* 00 00 00 - last 6 bit represent annotation, context stashing,
286 * data stashing setting 01 01 00 (0x14) to enable
287 * 1 line annotation, 1 line context
289 cfg.flc.value |= 0x14;
291 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
292 dpaa2_q->tc_index, flow_id, options, &cfg);
294 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
298 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
303 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
304 uint16_t tx_queue_id,
305 uint16_t nb_tx_desc __rte_unused,
306 unsigned int socket_id __rte_unused,
307 const struct rte_eth_txconf *tx_conf __rte_unused)
309 struct dpaa2_dev_priv *priv = dev->data->dev_private;
310 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
311 priv->tx_vq[tx_queue_id];
312 struct fsl_mc_io *dpni = priv->hw;
313 struct dpni_queue tx_conf_cfg;
314 struct dpni_queue tx_flow_cfg;
315 uint8_t options = 0, flow_id;
319 PMD_INIT_FUNC_TRACE();
321 /* Return if queue already configured */
322 if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
325 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
326 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
328 if (priv->num_tc == 1) {
330 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
336 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
337 tc_id, flow_id, options, &tx_flow_cfg);
339 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
340 "tc_id=%d, flow =%d ErrorCode = %x\n",
341 tc_id, flow_id, -ret);
345 dpaa2_q->flow_id = flow_id;
347 if (tx_queue_id == 0) {
348 /*Set tx-conf and error configuration*/
349 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
353 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
354 " ErrorCode = %x", ret);
358 dpaa2_q->tc_index = tc_id;
360 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
365 dpaa2_dev_rx_queue_release(void *q __rte_unused)
367 PMD_INIT_FUNC_TRACE();
371 dpaa2_dev_tx_queue_release(void *q __rte_unused)
373 PMD_INIT_FUNC_TRACE();
376 static const uint32_t *
377 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
379 static const uint32_t ptypes[] = {
380 /*todo -= add more types */
383 RTE_PTYPE_L3_IPV4_EXT,
385 RTE_PTYPE_L3_IPV6_EXT,
393 if (dev->rx_pkt_burst == dpaa2_dev_rx)
399 dpaa2_dev_start(struct rte_eth_dev *dev)
401 struct rte_eth_dev_data *data = dev->data;
402 struct dpaa2_dev_priv *priv = data->dev_private;
403 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
404 struct dpni_queue cfg;
405 struct dpni_error_cfg err_cfg;
407 struct dpni_queue_id qid;
408 struct dpaa2_queue *dpaa2_q;
411 PMD_INIT_FUNC_TRACE();
413 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
415 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
420 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
421 DPNI_QUEUE_TX, &qdid);
423 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
428 for (i = 0; i < data->nb_rx_queues; i++) {
429 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
430 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
431 DPNI_QUEUE_RX, dpaa2_q->tc_index,
432 dpaa2_q->flow_id, &cfg, &qid);
434 PMD_INIT_LOG(ERR, "Error to get flow "
435 "information Error code = %d\n", ret);
438 dpaa2_q->fqid = qid.fqid;
441 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
442 DPNI_OFF_RX_L3_CSUM, true);
444 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
448 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
449 DPNI_OFF_RX_L4_CSUM, true);
451 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
455 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
456 DPNI_OFF_TX_L3_CSUM, true);
458 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
462 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
463 DPNI_OFF_TX_L4_CSUM, true);
465 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
469 /*checksum errors, send them to normal path and set it in annotation */
470 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
472 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
473 err_cfg.set_frame_annotation = true;
475 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
476 priv->token, &err_cfg);
478 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
487 * This routine disables all traffic on the adapter by issuing a
488 * global reset on the MAC.
491 dpaa2_dev_stop(struct rte_eth_dev *dev)
493 struct dpaa2_dev_priv *priv = dev->data->dev_private;
494 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
496 struct rte_eth_link link;
498 PMD_INIT_FUNC_TRACE();
500 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
502 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
507 /* clear the recorded link status */
508 memset(&link, 0, sizeof(link));
509 dpaa2_dev_atomic_write_link_status(dev, &link);
513 dpaa2_dev_close(struct rte_eth_dev *dev)
515 struct dpaa2_dev_priv *priv = dev->data->dev_private;
516 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
519 PMD_INIT_FUNC_TRACE();
521 /* Clean the device first */
522 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
524 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
525 " error code %d\n", ret);
531 dpaa2_dev_promiscuous_enable(
532 struct rte_eth_dev *dev)
535 struct dpaa2_dev_priv *priv = dev->data->dev_private;
536 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
538 PMD_INIT_FUNC_TRACE();
541 RTE_LOG(ERR, PMD, "dpni is NULL");
545 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
547 RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
551 dpaa2_dev_promiscuous_disable(
552 struct rte_eth_dev *dev)
555 struct dpaa2_dev_priv *priv = dev->data->dev_private;
556 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
558 PMD_INIT_FUNC_TRACE();
561 RTE_LOG(ERR, PMD, "dpni is NULL");
565 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
567 RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
571 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
574 struct dpaa2_dev_priv *priv = dev->data->dev_private;
575 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
576 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
578 PMD_INIT_FUNC_TRACE();
581 RTE_LOG(ERR, PMD, "dpni is NULL");
585 /* check that mtu is within the allowed range */
586 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
589 /* Set the Max Rx frame length as 'mtu' +
590 * Maximum Ethernet header length
592 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
593 mtu + ETH_VLAN_HLEN);
595 PMD_DRV_LOG(ERR, "setting the max frame length failed");
598 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
603 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
604 struct rte_eth_stats *stats)
606 struct dpaa2_dev_priv *priv = dev->data->dev_private;
607 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
609 uint8_t page0 = 0, page1 = 1, page2 = 2;
610 union dpni_statistics value;
612 memset(&value, 0, sizeof(union dpni_statistics));
614 PMD_INIT_FUNC_TRACE();
617 RTE_LOG(ERR, PMD, "dpni is NULL");
622 RTE_LOG(ERR, PMD, "stats is NULL");
626 /*Get Counters from page_0*/
627 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
632 stats->ipackets = value.page_0.ingress_all_frames;
633 stats->ibytes = value.page_0.ingress_all_bytes;
635 /*Get Counters from page_1*/
636 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
641 stats->opackets = value.page_1.egress_all_frames;
642 stats->obytes = value.page_1.egress_all_bytes;
644 /*Get Counters from page_2*/
645 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
650 stats->ierrors = value.page_2.ingress_discarded_frames;
651 stats->oerrors = value.page_2.egress_discarded_frames;
652 stats->imissed = value.page_2.ingress_nobuffer_discards;
657 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
662 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
664 struct dpaa2_dev_priv *priv = dev->data->dev_private;
665 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
668 PMD_INIT_FUNC_TRACE();
671 RTE_LOG(ERR, PMD, "dpni is NULL");
675 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
682 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
686 /* return 0 means link status changed, -1 means not changed */
688 dpaa2_dev_link_update(struct rte_eth_dev *dev,
689 int wait_to_complete __rte_unused)
692 struct dpaa2_dev_priv *priv = dev->data->dev_private;
693 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
694 struct rte_eth_link link, old;
695 struct dpni_link_state state = {0};
697 PMD_INIT_FUNC_TRACE();
700 RTE_LOG(ERR, PMD, "error : dpni is NULL");
703 memset(&old, 0, sizeof(old));
704 dpaa2_dev_atomic_read_link_status(dev, &old);
706 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
708 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
712 if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
713 RTE_LOG(DEBUG, PMD, "No change in status\n");
717 memset(&link, 0, sizeof(struct rte_eth_link));
718 link.link_status = state.up;
719 link.link_speed = state.rate;
721 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
722 link.link_duplex = ETH_LINK_HALF_DUPLEX;
724 link.link_duplex = ETH_LINK_FULL_DUPLEX;
726 dpaa2_dev_atomic_write_link_status(dev, &link);
728 if (link.link_status)
729 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
731 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
735 static struct eth_dev_ops dpaa2_ethdev_ops = {
736 .dev_configure = dpaa2_eth_dev_configure,
737 .dev_start = dpaa2_dev_start,
738 .dev_stop = dpaa2_dev_stop,
739 .dev_close = dpaa2_dev_close,
740 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
741 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
742 .link_update = dpaa2_dev_link_update,
743 .stats_get = dpaa2_dev_stats_get,
744 .stats_reset = dpaa2_dev_stats_reset,
745 .dev_infos_get = dpaa2_dev_info_get,
746 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
747 .mtu_set = dpaa2_dev_mtu_set,
748 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
749 .rx_queue_release = dpaa2_dev_rx_queue_release,
750 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
751 .tx_queue_release = dpaa2_dev_tx_queue_release,
755 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
757 struct rte_device *dev = eth_dev->device;
758 struct rte_dpaa2_device *dpaa2_dev;
759 struct fsl_mc_io *dpni_dev;
760 struct dpni_attr attr;
761 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
762 struct dpni_buffer_layout layout;
766 PMD_INIT_FUNC_TRACE();
768 /* For secondary processes, the primary has done all the work */
769 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
772 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
774 hw_id = dpaa2_dev->object_id;
776 dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
778 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
782 dpni_dev->regs = rte_mcp_ptr_list[0];
783 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
785 PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
786 " error code %d\n", hw_id, ret);
790 /* Clean the device first */
791 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
793 PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
794 " error code %d\n", hw_id, ret);
798 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
800 PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
801 " error code %d\n", hw_id, ret);
805 priv->num_tc = attr.num_tcs;
806 for (i = 0; i < attr.num_tcs; i++) {
807 priv->num_dist_per_tc[i] = attr.num_queues;
811 /* Distribution is per Tc only,
812 * so choosing RX queues from default TC only
814 priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
816 if (attr.num_tcs == 1)
817 priv->nb_tx_queues = attr.num_queues;
819 priv->nb_tx_queues = attr.num_tcs;
821 PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
822 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
826 priv->options = attr.options;
827 priv->max_mac_filters = attr.mac_filter_entries;
828 priv->max_vlan_filters = attr.vlan_filter_entries;
831 /* Allocate memory for hardware structure for queues */
832 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
834 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
838 /* Allocate memory for storing MAC addresses */
839 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
840 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
841 if (eth_dev->data->mac_addrs == NULL) {
842 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
843 "store MAC addresses",
844 ETHER_ADDR_LEN * attr.mac_filter_entries);
848 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
850 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
852 PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
853 " Error Code = %d\n", ret);
857 /* ... rx buffer layout ... */
858 tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
859 tot_size = RTE_ALIGN_CEIL(tot_size,
860 DPAA2_PACKET_LAYOUT_ALIGN);
862 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
863 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
864 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
865 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
866 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
868 layout.pass_frame_status = 1;
869 layout.data_head_room = tot_size
870 - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
871 layout.private_data_size = DPAA2_FD_PTA_SIZE;
872 layout.pass_parser_result = 1;
873 PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
874 tot_size, layout.data_head_room, layout.private_data_size);
875 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
876 DPNI_QUEUE_RX, &layout);
878 PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
882 /* ... tx buffer layout ... */
883 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
884 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
885 layout.pass_frame_status = 1;
886 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
887 DPNI_QUEUE_TX, &layout);
889 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
894 /* ... tx-conf and error buffer layout ... */
895 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
896 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
897 layout.pass_frame_status = 1;
898 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
899 DPNI_QUEUE_TX_CONFIRM, &layout);
901 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
906 eth_dev->dev_ops = &dpaa2_ethdev_ops;
907 eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
909 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
910 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
915 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
917 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
918 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
920 struct dpaa2_queue *dpaa2_q;
922 PMD_INIT_FUNC_TRACE();
924 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
928 PMD_INIT_LOG(WARNING, "Already closed or not started");
932 dpaa2_dev_close(eth_dev);
934 if (priv->rx_vq[0]) {
935 /* cleaning up queue storage */
936 for (i = 0; i < priv->nb_rx_queues; i++) {
937 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
938 if (dpaa2_q->q_storage)
939 rte_free(dpaa2_q->q_storage);
941 /*free the all queue memory */
942 rte_free(priv->rx_vq[0]);
943 priv->rx_vq[0] = NULL;
946 /* Allocate memory for storing MAC addresses */
947 if (eth_dev->data->mac_addrs) {
948 rte_free(eth_dev->data->mac_addrs);
949 eth_dev->data->mac_addrs = NULL;
952 /*Close the device at underlying layer*/
953 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
955 PMD_INIT_LOG(ERR, "Failure closing dpni device with"
956 " error code %d\n", ret);
959 /*Free the allocated memory for ethernet private data and dpni*/
963 eth_dev->dev_ops = NULL;
964 eth_dev->rx_pkt_burst = NULL;
965 eth_dev->tx_pkt_burst = NULL;
971 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
972 struct rte_dpaa2_device *dpaa2_dev)
974 struct rte_eth_dev *eth_dev;
975 char ethdev_name[RTE_ETH_NAME_MAX_LEN];
979 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
981 eth_dev = rte_eth_dev_allocate(ethdev_name);
985 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
986 eth_dev->data->dev_private = rte_zmalloc(
987 "ethdev private structure",
988 sizeof(struct dpaa2_dev_priv),
989 RTE_CACHE_LINE_SIZE);
990 if (eth_dev->data->dev_private == NULL) {
991 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
992 " private port data\n");
993 rte_eth_dev_release_port(eth_dev);
997 eth_dev->device = &dpaa2_dev->device;
998 dpaa2_dev->eth_dev = eth_dev;
999 eth_dev->data->rx_mbuf_alloc_failed = 0;
1001 /* Invoke PMD device initialization function */
1002 diag = dpaa2_dev_init(eth_dev);
1006 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1007 rte_free(eth_dev->data->dev_private);
1008 rte_eth_dev_release_port(eth_dev);
1013 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1015 struct rte_eth_dev *eth_dev;
1017 eth_dev = dpaa2_dev->eth_dev;
1018 dpaa2_dev_uninit(eth_dev);
1020 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1021 rte_free(eth_dev->data->dev_private);
1022 rte_eth_dev_release_port(eth_dev);
1027 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1028 .drv_type = DPAA2_MC_DPNI_DEVID,
1029 .probe = rte_dpaa2_probe,
1030 .remove = rte_dpaa2_remove,
1033 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);