4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
53 #include "dpaa2_ethdev.h"
55 static struct rte_dpaa2_driver rte_dpaa2_pmd;
58 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
60 struct dpaa2_dev_priv *priv = dev->data->dev_private;
62 PMD_INIT_FUNC_TRACE();
64 dev_info->if_index = priv->hw_id;
66 dev_info->max_mac_addrs = priv->max_mac_filters;
67 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
68 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
69 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
70 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
71 dev_info->rx_offload_capa =
72 DEV_RX_OFFLOAD_IPV4_CKSUM |
73 DEV_RX_OFFLOAD_UDP_CKSUM |
74 DEV_RX_OFFLOAD_TCP_CKSUM |
75 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
76 dev_info->tx_offload_capa =
77 DEV_TX_OFFLOAD_IPV4_CKSUM |
78 DEV_TX_OFFLOAD_UDP_CKSUM |
79 DEV_TX_OFFLOAD_TCP_CKSUM |
80 DEV_TX_OFFLOAD_SCTP_CKSUM |
81 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
82 dev_info->speed_capa = ETH_LINK_SPEED_1G |
88 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
90 struct dpaa2_dev_priv *priv = dev->data->dev_private;
93 struct dpaa2_queue *mc_q, *mcq;
96 struct dpaa2_queue *dpaa2_q;
98 PMD_INIT_FUNC_TRACE();
100 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
101 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
102 RTE_CACHE_LINE_SIZE);
104 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
108 for (i = 0; i < priv->nb_rx_queues; i++) {
110 priv->rx_vq[i] = mc_q++;
111 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
112 dpaa2_q->q_storage = rte_malloc("dq_storage",
113 sizeof(struct queue_storage_info_t),
114 RTE_CACHE_LINE_SIZE);
115 if (!dpaa2_q->q_storage)
118 memset(dpaa2_q->q_storage, 0,
119 sizeof(struct queue_storage_info_t));
120 dpaa2_q->q_storage->dq_storage[0] = rte_malloc(NULL,
121 DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
122 RTE_CACHE_LINE_SIZE);
125 for (i = 0; i < priv->nb_tx_queues; i++) {
127 mc_q->flow_id = DPNI_NEW_FLOW_ID;
128 priv->tx_vq[i] = mc_q++;
132 for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
134 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
135 mcq->tc_index = DPAA2_DEF_TC;
136 mcq->flow_id = dist_idx;
143 mc_q = priv->rx_vq[0];
145 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
146 rte_free(dpaa2_q->q_storage->dq_storage[0]);
147 rte_free(dpaa2_q->q_storage);
148 priv->rx_vq[i--] = NULL;
155 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
157 struct rte_eth_dev_data *data = dev->data;
158 struct rte_eth_conf *eth_conf = &data->dev_conf;
161 PMD_INIT_FUNC_TRACE();
163 /* Check for correct configuration */
164 if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
165 data->nb_rx_queues > 1) {
166 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
167 "but Rx queues more than 1\n");
171 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
172 /* Return in case number of Rx queues is 1 */
173 if (data->nb_rx_queues == 1)
175 ret = dpaa2_setup_flow_dist(dev,
176 eth_conf->rx_adv_conf.rss_conf.rss_hf);
178 PMD_INIT_LOG(ERR, "unable to set flow distribution."
179 "please check queue config\n");
186 /* Function to setup RX flow information. It contains traffic class ID,
187 * flow ID, destination configuration etc.
190 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
191 uint16_t rx_queue_id,
192 uint16_t nb_rx_desc __rte_unused,
193 unsigned int socket_id __rte_unused,
194 const struct rte_eth_rxconf *rx_conf __rte_unused,
195 struct rte_mempool *mb_pool)
197 struct dpaa2_dev_priv *priv = dev->data->dev_private;
198 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
199 struct dpaa2_queue *dpaa2_q;
200 struct dpni_queue cfg;
206 PMD_INIT_FUNC_TRACE();
208 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
209 dev, rx_queue_id, mb_pool, rx_conf);
211 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
212 bpid = mempool_to_bpid(mb_pool);
213 ret = dpaa2_attach_bp_list(priv,
214 rte_dpaa2_bpid_info[bpid].bp_list);
218 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
219 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
221 /*Get the tc id and flow id from given VQ id*/
222 flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
223 memset(&cfg, 0, sizeof(struct dpni_queue));
225 options = options | DPNI_QUEUE_OPT_USER_CTX;
226 cfg.user_context = (uint64_t)(dpaa2_q);
228 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
229 dpaa2_q->tc_index, flow_id, options, &cfg);
231 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
235 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
240 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
241 uint16_t tx_queue_id,
242 uint16_t nb_tx_desc __rte_unused,
243 unsigned int socket_id __rte_unused,
244 const struct rte_eth_txconf *tx_conf __rte_unused)
246 struct dpaa2_dev_priv *priv = dev->data->dev_private;
247 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
248 priv->tx_vq[tx_queue_id];
249 struct fsl_mc_io *dpni = priv->hw;
250 struct dpni_queue tx_conf_cfg;
251 struct dpni_queue tx_flow_cfg;
252 uint8_t options = 0, flow_id;
256 PMD_INIT_FUNC_TRACE();
258 /* Return if queue already configured */
259 if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
262 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
263 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
265 if (priv->num_tc == 1) {
267 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
273 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
274 tc_id, flow_id, options, &tx_flow_cfg);
276 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
277 "tc_id=%d, flow =%d ErrorCode = %x\n",
278 tc_id, flow_id, -ret);
282 dpaa2_q->flow_id = flow_id;
284 if (tx_queue_id == 0) {
285 /*Set tx-conf and error configuration*/
286 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
290 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
291 " ErrorCode = %x", ret);
295 dpaa2_q->tc_index = tc_id;
297 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
302 dpaa2_dev_rx_queue_release(void *q __rte_unused)
304 PMD_INIT_FUNC_TRACE();
308 dpaa2_dev_tx_queue_release(void *q __rte_unused)
310 PMD_INIT_FUNC_TRACE();
314 dpaa2_dev_start(struct rte_eth_dev *dev)
316 struct rte_eth_dev_data *data = dev->data;
317 struct dpaa2_dev_priv *priv = data->dev_private;
318 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
319 struct dpni_queue cfg;
320 struct dpni_error_cfg err_cfg;
322 struct dpni_queue_id qid;
323 struct dpaa2_queue *dpaa2_q;
326 PMD_INIT_FUNC_TRACE();
328 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
330 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
335 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
336 DPNI_QUEUE_TX, &qdid);
338 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
343 for (i = 0; i < data->nb_rx_queues; i++) {
344 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
345 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
346 DPNI_QUEUE_RX, dpaa2_q->tc_index,
347 dpaa2_q->flow_id, &cfg, &qid);
349 PMD_INIT_LOG(ERR, "Error to get flow "
350 "information Error code = %d\n", ret);
353 dpaa2_q->fqid = qid.fqid;
356 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
357 DPNI_OFF_RX_L3_CSUM, true);
359 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
363 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
364 DPNI_OFF_RX_L4_CSUM, true);
366 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
370 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
371 DPNI_OFF_TX_L3_CSUM, true);
373 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
377 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
378 DPNI_OFF_TX_L4_CSUM, true);
380 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
384 /*checksum errors, send them to normal path and set it in annotation */
385 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
387 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
388 err_cfg.set_frame_annotation = true;
390 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
391 priv->token, &err_cfg);
393 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
402 * This routine disables all traffic on the adapter by issuing a
403 * global reset on the MAC.
406 dpaa2_dev_stop(struct rte_eth_dev *dev)
408 struct dpaa2_dev_priv *priv = dev->data->dev_private;
409 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
412 PMD_INIT_FUNC_TRACE();
414 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
416 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
423 dpaa2_dev_close(struct rte_eth_dev *dev)
425 struct dpaa2_dev_priv *priv = dev->data->dev_private;
426 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
429 PMD_INIT_FUNC_TRACE();
431 /* Clean the device first */
432 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
434 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
435 " error code %d\n", ret);
441 dpaa2_dev_promiscuous_enable(
442 struct rte_eth_dev *dev)
445 struct dpaa2_dev_priv *priv = dev->data->dev_private;
446 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
448 PMD_INIT_FUNC_TRACE();
451 RTE_LOG(ERR, PMD, "dpni is NULL");
455 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
457 RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
461 dpaa2_dev_promiscuous_disable(
462 struct rte_eth_dev *dev)
465 struct dpaa2_dev_priv *priv = dev->data->dev_private;
466 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
468 PMD_INIT_FUNC_TRACE();
471 RTE_LOG(ERR, PMD, "dpni is NULL");
475 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
477 RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
481 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
484 struct dpaa2_dev_priv *priv = dev->data->dev_private;
485 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
486 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
488 PMD_INIT_FUNC_TRACE();
491 RTE_LOG(ERR, PMD, "dpni is NULL");
495 /* check that mtu is within the allowed range */
496 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
499 /* Set the Max Rx frame length as 'mtu' +
500 * Maximum Ethernet header length
502 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
503 mtu + ETH_VLAN_HLEN);
505 PMD_DRV_LOG(ERR, "setting the max frame length failed");
508 PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
512 static struct eth_dev_ops dpaa2_ethdev_ops = {
513 .dev_configure = dpaa2_eth_dev_configure,
514 .dev_start = dpaa2_dev_start,
515 .dev_stop = dpaa2_dev_stop,
516 .dev_close = dpaa2_dev_close,
517 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
518 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
519 .dev_infos_get = dpaa2_dev_info_get,
520 .mtu_set = dpaa2_dev_mtu_set,
521 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
522 .rx_queue_release = dpaa2_dev_rx_queue_release,
523 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
524 .tx_queue_release = dpaa2_dev_tx_queue_release,
528 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
530 struct rte_device *dev = eth_dev->device;
531 struct rte_dpaa2_device *dpaa2_dev;
532 struct fsl_mc_io *dpni_dev;
533 struct dpni_attr attr;
534 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
535 struct dpni_buffer_layout layout;
539 PMD_INIT_FUNC_TRACE();
541 /* For secondary processes, the primary has done all the work */
542 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
545 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
547 hw_id = dpaa2_dev->object_id;
549 dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
551 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
555 dpni_dev->regs = rte_mcp_ptr_list[0];
556 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
558 PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
559 " error code %d\n", hw_id, ret);
563 /* Clean the device first */
564 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
566 PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
567 " error code %d\n", hw_id, ret);
571 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
573 PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
574 " error code %d\n", hw_id, ret);
578 priv->num_tc = attr.num_tcs;
579 for (i = 0; i < attr.num_tcs; i++) {
580 priv->num_dist_per_tc[i] = attr.num_queues;
584 /* Distribution is per Tc only,
585 * so choosing RX queues from default TC only
587 priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
589 if (attr.num_tcs == 1)
590 priv->nb_tx_queues = attr.num_queues;
592 priv->nb_tx_queues = attr.num_tcs;
594 PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
595 PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
599 priv->options = attr.options;
600 priv->max_mac_filters = attr.mac_filter_entries;
601 priv->max_vlan_filters = attr.vlan_filter_entries;
604 /* Allocate memory for hardware structure for queues */
605 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
607 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
611 /* Allocate memory for storing MAC addresses */
612 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
613 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
614 if (eth_dev->data->mac_addrs == NULL) {
615 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
616 "store MAC addresses",
617 ETHER_ADDR_LEN * attr.mac_filter_entries);
621 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
623 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
625 PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
626 " Error Code = %d\n", ret);
630 /* ... rx buffer layout ... */
631 tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
632 tot_size = RTE_ALIGN_CEIL(tot_size,
633 DPAA2_PACKET_LAYOUT_ALIGN);
635 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
636 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
637 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
638 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
639 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
641 layout.pass_frame_status = 1;
642 layout.data_head_room = tot_size
643 - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
644 layout.private_data_size = DPAA2_FD_PTA_SIZE;
645 layout.pass_parser_result = 1;
646 PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
647 tot_size, layout.data_head_room, layout.private_data_size);
648 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
649 DPNI_QUEUE_RX, &layout);
651 PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
655 /* ... tx buffer layout ... */
656 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
657 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
658 layout.pass_frame_status = 1;
659 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
660 DPNI_QUEUE_TX, &layout);
662 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
667 /* ... tx-conf and error buffer layout ... */
668 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
669 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
670 layout.pass_frame_status = 1;
671 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
672 DPNI_QUEUE_TX_CONFIRM, &layout);
674 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
679 eth_dev->dev_ops = &dpaa2_ethdev_ops;
680 eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
682 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
683 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
688 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
690 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
691 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
693 struct dpaa2_queue *dpaa2_q;
695 PMD_INIT_FUNC_TRACE();
697 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
701 PMD_INIT_LOG(WARNING, "Already closed or not started");
705 dpaa2_dev_close(eth_dev);
707 if (priv->rx_vq[0]) {
708 /* cleaning up queue storage */
709 for (i = 0; i < priv->nb_rx_queues; i++) {
710 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
711 if (dpaa2_q->q_storage)
712 rte_free(dpaa2_q->q_storage);
714 /*free the all queue memory */
715 rte_free(priv->rx_vq[0]);
716 priv->rx_vq[0] = NULL;
719 /* Allocate memory for storing MAC addresses */
720 if (eth_dev->data->mac_addrs) {
721 rte_free(eth_dev->data->mac_addrs);
722 eth_dev->data->mac_addrs = NULL;
725 /*Close the device at underlying layer*/
726 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
728 PMD_INIT_LOG(ERR, "Failure closing dpni device with"
729 " error code %d\n", ret);
732 /*Free the allocated memory for ethernet private data and dpni*/
736 eth_dev->dev_ops = NULL;
737 eth_dev->rx_pkt_burst = NULL;
738 eth_dev->tx_pkt_burst = NULL;
744 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
745 struct rte_dpaa2_device *dpaa2_dev)
747 struct rte_eth_dev *eth_dev;
748 char ethdev_name[RTE_ETH_NAME_MAX_LEN];
752 sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
754 eth_dev = rte_eth_dev_allocate(ethdev_name);
758 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
759 eth_dev->data->dev_private = rte_zmalloc(
760 "ethdev private structure",
761 sizeof(struct dpaa2_dev_priv),
762 RTE_CACHE_LINE_SIZE);
763 if (eth_dev->data->dev_private == NULL) {
764 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
765 " private port data\n");
766 rte_eth_dev_release_port(eth_dev);
770 eth_dev->device = &dpaa2_dev->device;
771 dpaa2_dev->eth_dev = eth_dev;
772 eth_dev->data->rx_mbuf_alloc_failed = 0;
774 /* Invoke PMD device initialization function */
775 diag = dpaa2_dev_init(eth_dev);
779 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
780 rte_free(eth_dev->data->dev_private);
781 rte_eth_dev_release_port(eth_dev);
786 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
788 struct rte_eth_dev *eth_dev;
790 eth_dev = dpaa2_dev->eth_dev;
791 dpaa2_dev_uninit(eth_dev);
793 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
794 rte_free(eth_dev->data->dev_private);
795 rte_eth_dev_release_port(eth_dev);
800 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
801 .drv_type = DPAA2_MC_DPNI_DEVID,
802 .probe = rte_dpaa2_probe,
803 .remove = rte_dpaa2_remove,
806 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);