1 /* * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_fslmc.h>
21 #include "dpaa2_pmd_logs.h"
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_mempool.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <mc/fsl_dpmng.h>
27 #include "dpaa2_ethdev.h"
28 #include <fsl_qbman_debug.h>
30 /* Supported Rx offloads */
31 static uint64_t dev_rx_offloads_sup =
32 DEV_RX_OFFLOAD_VLAN_STRIP |
33 DEV_RX_OFFLOAD_IPV4_CKSUM |
34 DEV_RX_OFFLOAD_UDP_CKSUM |
35 DEV_RX_OFFLOAD_TCP_CKSUM |
36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
37 DEV_RX_OFFLOAD_VLAN_FILTER |
38 DEV_RX_OFFLOAD_JUMBO_FRAME;
40 /* Rx offloads which cannot be disabled */
41 static uint64_t dev_rx_offloads_nodis =
42 DEV_RX_OFFLOAD_SCATTER;
44 /* Supported Tx offloads */
45 static uint64_t dev_tx_offloads_sup =
46 DEV_TX_OFFLOAD_VLAN_INSERT |
47 DEV_TX_OFFLOAD_IPV4_CKSUM |
48 DEV_TX_OFFLOAD_UDP_CKSUM |
49 DEV_TX_OFFLOAD_TCP_CKSUM |
50 DEV_TX_OFFLOAD_SCTP_CKSUM |
51 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
53 /* Tx offloads which cannot be disabled */
54 static uint64_t dev_tx_offloads_nodis =
55 DEV_TX_OFFLOAD_MULTI_SEGS |
56 DEV_TX_OFFLOAD_MT_LOCKFREE |
57 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
59 /* enable timestamp in mbuf */
60 enum pmd_dpaa2_ts dpaa2_enable_ts;
62 struct rte_dpaa2_xstats_name_off {
63 char name[RTE_ETH_XSTATS_NAME_SIZE];
64 uint8_t page_id; /* dpni statistics page id */
65 uint8_t stats_id; /* stats id in the given page */
68 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
69 {"ingress_multicast_frames", 0, 2},
70 {"ingress_multicast_bytes", 0, 3},
71 {"ingress_broadcast_frames", 0, 4},
72 {"ingress_broadcast_bytes", 0, 5},
73 {"egress_multicast_frames", 1, 2},
74 {"egress_multicast_bytes", 1, 3},
75 {"egress_broadcast_frames", 1, 4},
76 {"egress_broadcast_bytes", 1, 5},
77 {"ingress_filtered_frames", 2, 0},
78 {"ingress_discarded_frames", 2, 1},
79 {"ingress_nobuffer_discards", 2, 2},
80 {"egress_discarded_frames", 2, 3},
81 {"egress_confirmed_frames", 2, 4},
84 static struct rte_dpaa2_driver rte_dpaa2_pmd;
85 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
86 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
87 int wait_to_complete);
88 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
89 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
90 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
92 int dpaa2_logtype_pmd;
94 __rte_experimental void
95 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
97 dpaa2_enable_ts = enable;
101 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
104 struct dpaa2_dev_priv *priv = dev->data->dev_private;
105 struct fsl_mc_io *dpni = priv->hw;
107 PMD_INIT_FUNC_TRACE();
110 DPAA2_PMD_ERR("dpni is NULL");
115 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
116 priv->token, vlan_id);
118 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
119 priv->token, vlan_id);
122 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
123 ret, vlan_id, priv->hw_id);
129 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
131 struct dpaa2_dev_priv *priv = dev->data->dev_private;
132 struct fsl_mc_io *dpni = priv->hw;
135 PMD_INIT_FUNC_TRACE();
137 if (mask & ETH_VLAN_FILTER_MASK) {
138 /* VLAN Filter not avaialble */
139 if (!priv->max_vlan_filters) {
140 DPAA2_PMD_INFO("VLAN filter not available");
144 if (dev->data->dev_conf.rxmode.offloads &
145 DEV_RX_OFFLOAD_VLAN_FILTER)
146 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
149 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
152 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
155 if (mask & ETH_VLAN_EXTEND_MASK) {
156 if (dev->data->dev_conf.rxmode.offloads &
157 DEV_RX_OFFLOAD_VLAN_EXTEND)
158 DPAA2_PMD_INFO("VLAN extend offload not supported");
165 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
166 enum rte_vlan_type vlan_type __rte_unused,
169 struct dpaa2_dev_priv *priv = dev->data->dev_private;
170 struct fsl_mc_io *dpni = priv->hw;
173 PMD_INIT_FUNC_TRACE();
175 /* nothing to be done for standard vlan tpids */
176 if (tpid == 0x8100 || tpid == 0x88A8)
179 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
182 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
183 /* if already configured tpids, remove them first */
185 struct dpni_custom_tpid_cfg tpid_list = {0};
187 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
188 priv->token, &tpid_list);
191 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
192 priv->token, tpid_list.tpid1);
195 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
203 dpaa2_fw_version_get(struct rte_eth_dev *dev,
208 struct dpaa2_dev_priv *priv = dev->data->dev_private;
209 struct fsl_mc_io *dpni = priv->hw;
210 struct mc_soc_version mc_plat_info = {0};
211 struct mc_version mc_ver_info = {0};
213 PMD_INIT_FUNC_TRACE();
215 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
216 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
218 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
219 DPAA2_PMD_WARN("\tmc_get_version failed");
221 ret = snprintf(fw_version, fw_size,
226 mc_ver_info.revision);
228 ret += 1; /* add the size of '\0' */
229 if (fw_size < (uint32_t)ret)
236 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
238 struct dpaa2_dev_priv *priv = dev->data->dev_private;
240 PMD_INIT_FUNC_TRACE();
242 dev_info->if_index = priv->hw_id;
244 dev_info->max_mac_addrs = priv->max_mac_filters;
245 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
246 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
247 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
248 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
249 dev_info->rx_offload_capa = dev_rx_offloads_sup |
250 dev_rx_offloads_nodis;
251 dev_info->tx_offload_capa = dev_tx_offloads_sup |
252 dev_tx_offloads_nodis;
253 dev_info->speed_capa = ETH_LINK_SPEED_1G |
254 ETH_LINK_SPEED_2_5G |
257 dev_info->max_hash_mac_addrs = 0;
258 dev_info->max_vfs = 0;
259 dev_info->max_vmdq_pools = ETH_16_POOLS;
260 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
264 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
266 struct dpaa2_dev_priv *priv = dev->data->dev_private;
269 uint8_t num_rxqueue_per_tc;
270 struct dpaa2_queue *mc_q, *mcq;
273 struct dpaa2_queue *dpaa2_q;
275 PMD_INIT_FUNC_TRACE();
277 num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
278 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
279 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
280 RTE_CACHE_LINE_SIZE);
282 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
286 for (i = 0; i < priv->nb_rx_queues; i++) {
287 mc_q->eth_data = dev->data;
288 priv->rx_vq[i] = mc_q++;
289 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
290 dpaa2_q->q_storage = rte_malloc("dq_storage",
291 sizeof(struct queue_storage_info_t),
292 RTE_CACHE_LINE_SIZE);
293 if (!dpaa2_q->q_storage)
296 memset(dpaa2_q->q_storage, 0,
297 sizeof(struct queue_storage_info_t));
298 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
302 for (i = 0; i < priv->nb_tx_queues; i++) {
303 mc_q->eth_data = dev->data;
304 mc_q->flow_id = 0xffff;
305 priv->tx_vq[i] = mc_q++;
306 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
307 dpaa2_q->cscn = rte_malloc(NULL,
308 sizeof(struct qbman_result), 16);
314 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
315 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
316 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
317 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
325 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
326 rte_free(dpaa2_q->cscn);
327 priv->tx_vq[i--] = NULL;
329 i = priv->nb_rx_queues;
332 mc_q = priv->rx_vq[0];
334 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
335 dpaa2_free_dq_storage(dpaa2_q->q_storage);
336 rte_free(dpaa2_q->q_storage);
337 priv->rx_vq[i--] = NULL;
344 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
346 struct dpaa2_dev_priv *priv = dev->data->dev_private;
347 struct dpaa2_queue *dpaa2_q;
350 PMD_INIT_FUNC_TRACE();
352 /* Queue allocation base */
353 if (priv->rx_vq[0]) {
354 /* cleaning up queue storage */
355 for (i = 0; i < priv->nb_rx_queues; i++) {
356 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
357 if (dpaa2_q->q_storage)
358 rte_free(dpaa2_q->q_storage);
360 /* cleanup tx queue cscn */
361 for (i = 0; i < priv->nb_tx_queues; i++) {
362 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
363 rte_free(dpaa2_q->cscn);
365 /*free memory for all queues (RX+TX) */
366 rte_free(priv->rx_vq[0]);
367 priv->rx_vq[0] = NULL;
372 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
374 struct dpaa2_dev_priv *priv = dev->data->dev_private;
375 struct fsl_mc_io *dpni = priv->hw;
376 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
377 uint64_t rx_offloads = eth_conf->rxmode.offloads;
378 uint64_t tx_offloads = eth_conf->txmode.offloads;
379 int rx_l3_csum_offload = false;
380 int rx_l4_csum_offload = false;
381 int tx_l3_csum_offload = false;
382 int tx_l4_csum_offload = false;
385 PMD_INIT_FUNC_TRACE();
387 /* Rx offloads validation */
388 if (dev_rx_offloads_nodis & ~rx_offloads) {
390 "Rx offloads non configurable - requested 0x%" PRIx64
391 " ignored 0x%" PRIx64,
392 rx_offloads, dev_rx_offloads_nodis);
395 /* Tx offloads validation */
396 if (dev_tx_offloads_nodis & ~tx_offloads) {
398 "Tx offloads non configurable - requested 0x%" PRIx64
399 " ignored 0x%" PRIx64,
400 tx_offloads, dev_tx_offloads_nodis);
403 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
404 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
405 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
406 priv->token, eth_conf->rxmode.max_rx_pkt_len);
409 "Unable to set mtu. check config");
417 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
418 ret = dpaa2_setup_flow_dist(dev,
419 eth_conf->rx_adv_conf.rss_conf.rss_hf);
421 DPAA2_PMD_ERR("Unable to set flow distribution."
422 "Check queue config");
427 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
428 rx_l3_csum_offload = true;
430 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
431 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
432 rx_l4_csum_offload = true;
434 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
435 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
437 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
441 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
442 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
444 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
448 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
449 tx_l3_csum_offload = true;
451 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
452 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
453 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
454 tx_l4_csum_offload = true;
456 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
457 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
459 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
463 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
464 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
466 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
470 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
471 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
472 * to 0 for LS2 in the hardware thus disabling data/annotation
473 * stashing. For LX2 this is fixed in hardware and thus hash result and
474 * parse results can be received in FD using this option.
476 if (dpaa2_svr_family == SVR_LX2160A) {
477 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
478 DPNI_FLCTYPE_HASH, true);
480 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
485 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
486 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
488 /* update the current status */
489 dpaa2_dev_link_update(dev, 0);
494 /* Function to setup RX flow information. It contains traffic class ID,
495 * flow ID, destination configuration etc.
498 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
499 uint16_t rx_queue_id,
500 uint16_t nb_rx_desc __rte_unused,
501 unsigned int socket_id __rte_unused,
502 const struct rte_eth_rxconf *rx_conf __rte_unused,
503 struct rte_mempool *mb_pool)
505 struct dpaa2_dev_priv *priv = dev->data->dev_private;
506 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
507 struct dpaa2_queue *dpaa2_q;
508 struct dpni_queue cfg;
514 PMD_INIT_FUNC_TRACE();
516 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
517 dev, rx_queue_id, mb_pool, rx_conf);
519 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
520 bpid = mempool_to_bpid(mb_pool);
521 ret = dpaa2_attach_bp_list(priv,
522 rte_dpaa2_bpid_info[bpid].bp_list);
526 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
527 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
528 dpaa2_q->bp_array = rte_dpaa2_bpid_info;
530 /*Get the flow id from given VQ id*/
531 flow_id = rx_queue_id % priv->nb_rx_queues;
532 memset(&cfg, 0, sizeof(struct dpni_queue));
534 options = options | DPNI_QUEUE_OPT_USER_CTX;
535 cfg.user_context = (size_t)(dpaa2_q);
537 /*if ls2088 or rev2 device, enable the stashing */
539 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
540 options |= DPNI_QUEUE_OPT_FLC;
541 cfg.flc.stash_control = true;
542 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
543 /* 00 00 00 - last 6 bit represent annotation, context stashing,
544 * data stashing setting 01 01 00 (0x14)
545 * (in following order ->DS AS CS)
546 * to enable 1 line data, 1 line annotation.
547 * For LX2, this setting should be 01 00 00 (0x10)
549 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
550 cfg.flc.value |= 0x10;
552 cfg.flc.value |= 0x14;
554 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
555 dpaa2_q->tc_index, flow_id, options, &cfg);
557 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
561 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
562 struct dpni_taildrop taildrop;
565 /*enabling per rx queue congestion control */
566 taildrop.threshold = CONG_THRESHOLD_RX_Q;
567 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
568 taildrop.oal = CONG_RX_OAL;
569 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
571 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
572 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
573 dpaa2_q->tc_index, flow_id, &taildrop);
575 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
581 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
586 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
587 uint16_t tx_queue_id,
588 uint16_t nb_tx_desc __rte_unused,
589 unsigned int socket_id __rte_unused,
590 const struct rte_eth_txconf *tx_conf __rte_unused)
592 struct dpaa2_dev_priv *priv = dev->data->dev_private;
593 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
594 priv->tx_vq[tx_queue_id];
595 struct fsl_mc_io *dpni = priv->hw;
596 struct dpni_queue tx_conf_cfg;
597 struct dpni_queue tx_flow_cfg;
598 uint8_t options = 0, flow_id;
602 PMD_INIT_FUNC_TRACE();
604 /* Return if queue already configured */
605 if (dpaa2_q->flow_id != 0xffff) {
606 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
610 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
611 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
616 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
617 tc_id, flow_id, options, &tx_flow_cfg);
619 DPAA2_PMD_ERR("Error in setting the tx flow: "
620 "tc_id=%d, flow=%d err=%d",
621 tc_id, flow_id, ret);
625 dpaa2_q->flow_id = flow_id;
627 if (tx_queue_id == 0) {
628 /*Set tx-conf and error configuration*/
629 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
633 DPAA2_PMD_ERR("Error in set tx conf mode settings: "
638 dpaa2_q->tc_index = tc_id;
640 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
641 struct dpni_congestion_notification_cfg cong_notif_cfg;
643 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
644 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
645 /* Notify that the queue is not congested when the data in
646 * the queue is below this thershold.
648 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
649 cong_notif_cfg.message_ctx = 0;
650 cong_notif_cfg.message_iova =
651 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
652 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
653 cong_notif_cfg.notification_mode =
654 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
655 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
656 DPNI_CONG_OPT_COHERENT_WRITE;
658 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
665 "Error in setting tx congestion notification: "
670 dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
671 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
676 dpaa2_dev_rx_queue_release(void *q __rte_unused)
678 PMD_INIT_FUNC_TRACE();
682 dpaa2_dev_tx_queue_release(void *q __rte_unused)
684 PMD_INIT_FUNC_TRACE();
688 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
691 struct dpaa2_dev_priv *priv = dev->data->dev_private;
692 struct dpaa2_queue *dpaa2_q;
693 struct qbman_swp *swp;
694 struct qbman_fq_query_np_rslt state;
695 uint32_t frame_cnt = 0;
697 PMD_INIT_FUNC_TRACE();
699 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
700 ret = dpaa2_affine_qbman_swp();
702 DPAA2_PMD_ERR("Failure in affining portal");
706 swp = DPAA2_PER_LCORE_PORTAL;
708 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
710 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
711 frame_cnt = qbman_fq_state_frame_count(&state);
712 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
713 rx_queue_id, frame_cnt);
718 static const uint32_t *
719 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
721 static const uint32_t ptypes[] = {
722 /*todo -= add more types */
725 RTE_PTYPE_L3_IPV4_EXT,
727 RTE_PTYPE_L3_IPV6_EXT,
735 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
741 * Dpaa2 link Interrupt handler
744 * The address of parameter (struct rte_eth_dev *) regsitered before.
750 dpaa2_interrupt_handler(void *param)
752 struct rte_eth_dev *dev = param;
753 struct dpaa2_dev_priv *priv = dev->data->dev_private;
754 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
756 int irq_index = DPNI_IRQ_INDEX;
757 unsigned int status = 0, clear = 0;
759 PMD_INIT_FUNC_TRACE();
762 DPAA2_PMD_ERR("dpni is NULL");
766 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
769 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
774 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
775 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
776 dpaa2_dev_link_update(dev, 0);
777 /* calling all the apps registered for link status event */
778 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
782 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
785 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
789 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
792 struct dpaa2_dev_priv *priv = dev->data->dev_private;
793 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
794 int irq_index = DPNI_IRQ_INDEX;
795 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
797 PMD_INIT_FUNC_TRACE();
799 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
802 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
807 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
810 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
817 dpaa2_dev_start(struct rte_eth_dev *dev)
819 struct rte_device *rdev = dev->device;
820 struct rte_dpaa2_device *dpaa2_dev;
821 struct rte_eth_dev_data *data = dev->data;
822 struct dpaa2_dev_priv *priv = data->dev_private;
823 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
824 struct dpni_queue cfg;
825 struct dpni_error_cfg err_cfg;
827 struct dpni_queue_id qid;
828 struct dpaa2_queue *dpaa2_q;
830 struct rte_intr_handle *intr_handle;
832 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
833 intr_handle = &dpaa2_dev->intr_handle;
835 PMD_INIT_FUNC_TRACE();
837 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
839 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
844 /* Power up the phy. Needed to make the link go UP */
845 dpaa2_dev_set_link_up(dev);
847 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
848 DPNI_QUEUE_TX, &qdid);
850 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
855 for (i = 0; i < data->nb_rx_queues; i++) {
856 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
857 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
858 DPNI_QUEUE_RX, dpaa2_q->tc_index,
859 dpaa2_q->flow_id, &cfg, &qid);
861 DPAA2_PMD_ERR("Error in getting flow information: "
865 dpaa2_q->fqid = qid.fqid;
868 /*checksum errors, send them to normal path and set it in annotation */
869 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
871 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
872 err_cfg.set_frame_annotation = true;
874 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
875 priv->token, &err_cfg);
877 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
882 /* if the interrupts were configured on this devices*/
883 if (intr_handle && (intr_handle->fd) &&
884 (dev->data->dev_conf.intr_conf.lsc != 0)) {
885 /* Registering LSC interrupt handler */
886 rte_intr_callback_register(intr_handle,
887 dpaa2_interrupt_handler,
890 /* enable vfio intr/eventfd mapping
891 * Interrupt index 0 is required, so we can not use
894 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
896 /* enable dpni_irqs */
897 dpaa2_eth_setup_irqs(dev, 1);
900 /* Change the tx burst function if ordered queues are used */
901 if (priv->en_ordered)
902 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
908 * This routine disables all traffic on the adapter by issuing a
909 * global reset on the MAC.
912 dpaa2_dev_stop(struct rte_eth_dev *dev)
914 struct dpaa2_dev_priv *priv = dev->data->dev_private;
915 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
917 struct rte_eth_link link;
918 struct rte_intr_handle *intr_handle = dev->intr_handle;
920 PMD_INIT_FUNC_TRACE();
922 /* reset interrupt callback */
923 if (intr_handle && (intr_handle->fd) &&
924 (dev->data->dev_conf.intr_conf.lsc != 0)) {
925 /*disable dpni irqs */
926 dpaa2_eth_setup_irqs(dev, 0);
928 /* disable vfio intr before callback unregister */
929 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
931 /* Unregistering LSC interrupt handler */
932 rte_intr_callback_unregister(intr_handle,
933 dpaa2_interrupt_handler,
937 dpaa2_dev_set_link_down(dev);
939 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
941 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
946 /* clear the recorded link status */
947 memset(&link, 0, sizeof(link));
948 rte_eth_linkstatus_set(dev, &link);
952 dpaa2_dev_close(struct rte_eth_dev *dev)
954 struct dpaa2_dev_priv *priv = dev->data->dev_private;
955 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
957 struct rte_eth_link link;
959 PMD_INIT_FUNC_TRACE();
961 /* Clean the device first */
962 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
964 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
968 memset(&link, 0, sizeof(link));
969 rte_eth_linkstatus_set(dev, &link);
973 dpaa2_dev_promiscuous_enable(
974 struct rte_eth_dev *dev)
977 struct dpaa2_dev_priv *priv = dev->data->dev_private;
978 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
980 PMD_INIT_FUNC_TRACE();
983 DPAA2_PMD_ERR("dpni is NULL");
987 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
989 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
991 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
993 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
997 dpaa2_dev_promiscuous_disable(
998 struct rte_eth_dev *dev)
1001 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1002 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1004 PMD_INIT_FUNC_TRACE();
1007 DPAA2_PMD_ERR("dpni is NULL");
1011 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1013 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1015 if (dev->data->all_multicast == 0) {
1016 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1017 priv->token, false);
1019 DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1025 dpaa2_dev_allmulticast_enable(
1026 struct rte_eth_dev *dev)
1029 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1030 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1032 PMD_INIT_FUNC_TRACE();
1035 DPAA2_PMD_ERR("dpni is NULL");
1039 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1041 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1045 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1048 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1049 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1051 PMD_INIT_FUNC_TRACE();
1054 DPAA2_PMD_ERR("dpni is NULL");
1058 /* must remain on for all promiscuous */
1059 if (dev->data->promiscuous == 1)
1062 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1064 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1068 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1071 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1072 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1073 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1076 PMD_INIT_FUNC_TRACE();
1079 DPAA2_PMD_ERR("dpni is NULL");
1083 /* check that mtu is within the allowed range */
1084 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
1087 if (frame_size > ETHER_MAX_LEN)
1088 dev->data->dev_conf.rxmode.offloads &=
1089 DEV_RX_OFFLOAD_JUMBO_FRAME;
1091 dev->data->dev_conf.rxmode.offloads &=
1092 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1094 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1096 /* Set the Max Rx frame length as 'mtu' +
1097 * Maximum Ethernet header length
1099 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1102 DPAA2_PMD_ERR("Setting the max frame length failed");
1105 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1110 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1111 struct ether_addr *addr,
1112 __rte_unused uint32_t index,
1113 __rte_unused uint32_t pool)
1116 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1117 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1119 PMD_INIT_FUNC_TRACE();
1122 DPAA2_PMD_ERR("dpni is NULL");
1126 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1127 priv->token, addr->addr_bytes);
1130 "error: Adding the MAC ADDR failed: err = %d", ret);
1135 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1139 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1140 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1141 struct rte_eth_dev_data *data = dev->data;
1142 struct ether_addr *macaddr;
1144 PMD_INIT_FUNC_TRACE();
1146 macaddr = &data->mac_addrs[index];
1149 DPAA2_PMD_ERR("dpni is NULL");
1153 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1154 priv->token, macaddr->addr_bytes);
1157 "error: Removing the MAC ADDR failed: err = %d", ret);
1161 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1162 struct ether_addr *addr)
1165 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1166 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1168 PMD_INIT_FUNC_TRACE();
1171 DPAA2_PMD_ERR("dpni is NULL");
1175 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1176 priv->token, addr->addr_bytes);
1180 "error: Setting the MAC ADDR failed %d", ret);
1186 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1187 struct rte_eth_stats *stats)
1189 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1190 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1192 uint8_t page0 = 0, page1 = 1, page2 = 2;
1193 union dpni_statistics value;
1195 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1197 memset(&value, 0, sizeof(union dpni_statistics));
1199 PMD_INIT_FUNC_TRACE();
1202 DPAA2_PMD_ERR("dpni is NULL");
1207 DPAA2_PMD_ERR("stats is NULL");
1211 /*Get Counters from page_0*/
1212 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1217 stats->ipackets = value.page_0.ingress_all_frames;
1218 stats->ibytes = value.page_0.ingress_all_bytes;
1220 /*Get Counters from page_1*/
1221 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1226 stats->opackets = value.page_1.egress_all_frames;
1227 stats->obytes = value.page_1.egress_all_bytes;
1229 /*Get Counters from page_2*/
1230 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1235 /* Ingress drop frame count due to configured rules */
1236 stats->ierrors = value.page_2.ingress_filtered_frames;
1237 /* Ingress drop frame count due to error */
1238 stats->ierrors += value.page_2.ingress_discarded_frames;
1240 stats->oerrors = value.page_2.egress_discarded_frames;
1241 stats->imissed = value.page_2.ingress_nobuffer_discards;
1243 /* Fill in per queue stats */
1244 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1245 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1246 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1247 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1249 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1251 stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1253 /* Byte counting is not implemented */
1254 stats->q_ibytes[i] = 0;
1255 stats->q_obytes[i] = 0;
1261 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1266 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1269 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1270 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1272 union dpni_statistics value[3] = {};
1273 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1281 /* Get Counters from page_0*/
1282 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1287 /* Get Counters from page_1*/
1288 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1293 /* Get Counters from page_2*/
1294 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1299 for (i = 0; i < num; i++) {
1301 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1302 raw.counter[dpaa2_xstats_strings[i].stats_id];
1306 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1311 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1312 struct rte_eth_xstat_name *xstats_names,
1315 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1317 if (limit < stat_cnt)
1320 if (xstats_names != NULL)
1321 for (i = 0; i < stat_cnt; i++)
1322 snprintf(xstats_names[i].name,
1323 sizeof(xstats_names[i].name),
1325 dpaa2_xstats_strings[i].name);
1331 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1332 uint64_t *values, unsigned int n)
1334 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1335 uint64_t values_copy[stat_cnt];
1338 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1339 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1341 union dpni_statistics value[3] = {};
1349 /* Get Counters from page_0*/
1350 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1355 /* Get Counters from page_1*/
1356 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1361 /* Get Counters from page_2*/
1362 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1367 for (i = 0; i < stat_cnt; i++) {
1368 values[i] = value[dpaa2_xstats_strings[i].page_id].
1369 raw.counter[dpaa2_xstats_strings[i].stats_id];
1374 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1376 for (i = 0; i < n; i++) {
1377 if (ids[i] >= stat_cnt) {
1378 DPAA2_PMD_ERR("xstats id value isn't valid");
1381 values[i] = values_copy[ids[i]];
1387 dpaa2_xstats_get_names_by_id(
1388 struct rte_eth_dev *dev,
1389 struct rte_eth_xstat_name *xstats_names,
1390 const uint64_t *ids,
1393 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1394 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1397 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1399 dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1401 for (i = 0; i < limit; i++) {
1402 if (ids[i] >= stat_cnt) {
1403 DPAA2_PMD_ERR("xstats id value isn't valid");
1406 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1412 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1414 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1415 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1418 struct dpaa2_queue *dpaa2_q;
1420 PMD_INIT_FUNC_TRACE();
1423 DPAA2_PMD_ERR("dpni is NULL");
1427 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1431 /* Reset the per queue stats in dpaa2_queue structure */
1432 for (i = 0; i < priv->nb_rx_queues; i++) {
1433 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1435 dpaa2_q->rx_pkts = 0;
1438 for (i = 0; i < priv->nb_tx_queues; i++) {
1439 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1441 dpaa2_q->tx_pkts = 0;
1447 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1451 /* return 0 means link status changed, -1 means not changed */
1453 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1454 int wait_to_complete __rte_unused)
1457 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1458 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1459 struct rte_eth_link link;
1460 struct dpni_link_state state = {0};
1463 DPAA2_PMD_ERR("dpni is NULL");
1467 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1469 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1473 memset(&link, 0, sizeof(struct rte_eth_link));
1474 link.link_status = state.up;
1475 link.link_speed = state.rate;
1477 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1478 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1480 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1482 ret = rte_eth_linkstatus_set(dev, &link);
1484 DPAA2_PMD_DEBUG("No change in status");
1486 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1487 link.link_status ? "Up" : "Down");
1493 * Toggle the DPNI to enable, if not already enabled.
1494 * This is not strictly PHY up/down - it is more of logical toggling.
1497 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1500 struct dpaa2_dev_priv *priv;
1501 struct fsl_mc_io *dpni;
1503 struct dpni_link_state state = {0};
1505 priv = dev->data->dev_private;
1506 dpni = (struct fsl_mc_io *)priv->hw;
1509 DPAA2_PMD_ERR("dpni is NULL");
1513 /* Check if DPNI is currently enabled */
1514 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1516 /* Unable to obtain dpni status; Not continuing */
1517 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1521 /* Enable link if not already enabled */
1523 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1525 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1529 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1531 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1535 /* changing tx burst function to start enqueues */
1536 dev->tx_pkt_burst = dpaa2_dev_tx;
1537 dev->data->dev_link.link_status = state.up;
1540 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1542 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1547 * Toggle the DPNI to disable, if not already disabled.
1548 * This is not strictly PHY up/down - it is more of logical toggling.
1551 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1554 struct dpaa2_dev_priv *priv;
1555 struct fsl_mc_io *dpni;
1556 int dpni_enabled = 0;
1559 PMD_INIT_FUNC_TRACE();
1561 priv = dev->data->dev_private;
1562 dpni = (struct fsl_mc_io *)priv->hw;
1565 DPAA2_PMD_ERR("Device has not yet been configured");
1569 /*changing tx burst function to avoid any more enqueues */
1570 dev->tx_pkt_burst = dummy_dev_tx;
1572 /* Loop while dpni_disable() attempts to drain the egress FQs
1573 * and confirm them back to us.
1576 ret = dpni_disable(dpni, 0, priv->token);
1578 DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1581 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1583 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1587 /* Allow the MC some slack */
1588 rte_delay_us(100 * 1000);
1589 } while (dpni_enabled && --retries);
1592 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1593 /* todo- we may have to manually cleanup queues.
1596 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1597 dev->data->port_id);
1600 dev->data->dev_link.link_status = 0;
1606 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1609 struct dpaa2_dev_priv *priv;
1610 struct fsl_mc_io *dpni;
1611 struct dpni_link_state state = {0};
1613 PMD_INIT_FUNC_TRACE();
1615 priv = dev->data->dev_private;
1616 dpni = (struct fsl_mc_io *)priv->hw;
1618 if (dpni == NULL || fc_conf == NULL) {
1619 DPAA2_PMD_ERR("device not configured");
1623 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1625 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1629 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1630 if (state.options & DPNI_LINK_OPT_PAUSE) {
1631 /* DPNI_LINK_OPT_PAUSE set
1632 * if ASYM_PAUSE not set,
1633 * RX Side flow control (handle received Pause frame)
1634 * TX side flow control (send Pause frame)
1635 * if ASYM_PAUSE set,
1636 * RX Side flow control (handle received Pause frame)
1637 * No TX side flow control (send Pause frame disabled)
1639 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1640 fc_conf->mode = RTE_FC_FULL;
1642 fc_conf->mode = RTE_FC_RX_PAUSE;
1644 /* DPNI_LINK_OPT_PAUSE not set
1645 * if ASYM_PAUSE set,
1646 * TX side flow control (send Pause frame)
1647 * No RX side flow control (No action on pause frame rx)
1648 * if ASYM_PAUSE not set,
1649 * Flow control disabled
1651 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1652 fc_conf->mode = RTE_FC_TX_PAUSE;
1654 fc_conf->mode = RTE_FC_NONE;
1661 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1664 struct dpaa2_dev_priv *priv;
1665 struct fsl_mc_io *dpni;
1666 struct dpni_link_state state = {0};
1667 struct dpni_link_cfg cfg = {0};
1669 PMD_INIT_FUNC_TRACE();
1671 priv = dev->data->dev_private;
1672 dpni = (struct fsl_mc_io *)priv->hw;
1675 DPAA2_PMD_ERR("dpni is NULL");
1679 /* It is necessary to obtain the current state before setting fc_conf
1680 * as MC would return error in case rate, autoneg or duplex values are
1683 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1685 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1689 /* Disable link before setting configuration */
1690 dpaa2_dev_set_link_down(dev);
1692 /* Based on fc_conf, update cfg */
1693 cfg.rate = state.rate;
1694 cfg.options = state.options;
1696 /* update cfg with fc_conf */
1697 switch (fc_conf->mode) {
1699 /* Full flow control;
1700 * OPT_PAUSE set, ASYM_PAUSE not set
1702 cfg.options |= DPNI_LINK_OPT_PAUSE;
1703 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1705 case RTE_FC_TX_PAUSE:
1706 /* Enable RX flow control
1707 * OPT_PAUSE not set;
1710 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1711 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1713 case RTE_FC_RX_PAUSE:
1714 /* Enable TX Flow control
1718 cfg.options |= DPNI_LINK_OPT_PAUSE;
1719 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1722 /* Disable Flow control
1724 * ASYM_PAUSE not set
1726 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1727 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1730 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1735 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1737 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1741 dpaa2_dev_set_link_up(dev);
1747 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1748 struct rte_eth_rss_conf *rss_conf)
1750 struct rte_eth_dev_data *data = dev->data;
1751 struct rte_eth_conf *eth_conf = &data->dev_conf;
1754 PMD_INIT_FUNC_TRACE();
1756 if (rss_conf->rss_hf) {
1757 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1759 DPAA2_PMD_ERR("Unable to set flow dist");
1763 ret = dpaa2_remove_flow_dist(dev, 0);
1765 DPAA2_PMD_ERR("Unable to remove flow dist");
1769 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1774 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1775 struct rte_eth_rss_conf *rss_conf)
1777 struct rte_eth_dev_data *data = dev->data;
1778 struct rte_eth_conf *eth_conf = &data->dev_conf;
1780 /* dpaa2 does not support rss_key, so length should be 0*/
1781 rss_conf->rss_key_len = 0;
1782 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1786 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1787 int eth_rx_queue_id,
1789 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1791 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1792 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1793 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1794 uint8_t flow_id = dpaa2_ethq->flow_id;
1795 struct dpni_queue cfg;
1799 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1800 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1801 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1802 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1803 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
1804 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
1808 memset(&cfg, 0, sizeof(struct dpni_queue));
1809 options = DPNI_QUEUE_OPT_DEST;
1810 cfg.destination.type = DPNI_DEST_DPCON;
1811 cfg.destination.id = dpcon_id;
1812 cfg.destination.priority = queue_conf->ev.priority;
1814 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1815 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1816 cfg.destination.hold_active = 1;
1819 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
1820 !eth_priv->en_ordered) {
1821 struct opr_cfg ocfg;
1823 /* Restoration window size = 256 frames */
1825 /* Restoration window size = 512 frames for LX2 */
1826 if (dpaa2_svr_family == SVR_LX2160A)
1828 /* Auto advance NESN window enabled */
1830 /* Late arrival window size disabled */
1832 /* ORL resource exhaustaion advance NESN disabled */
1834 /* Loose ordering enabled */
1836 eth_priv->en_loose_ordered = 1;
1837 /* Strict ordering enabled if explicitly set */
1838 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1840 eth_priv->en_loose_ordered = 0;
1843 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
1844 dpaa2_ethq->tc_index, flow_id,
1845 OPR_OPT_CREATE, &ocfg);
1847 DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
1851 eth_priv->en_ordered = 1;
1854 options |= DPNI_QUEUE_OPT_USER_CTX;
1855 cfg.user_context = (size_t)(dpaa2_ethq);
1857 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1858 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1860 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1864 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1869 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1870 int eth_rx_queue_id)
1872 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1873 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1874 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1875 uint8_t flow_id = dpaa2_ethq->flow_id;
1876 struct dpni_queue cfg;
1880 memset(&cfg, 0, sizeof(struct dpni_queue));
1881 options = DPNI_QUEUE_OPT_DEST;
1882 cfg.destination.type = DPNI_DEST_NONE;
1884 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1885 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1887 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1892 static struct eth_dev_ops dpaa2_ethdev_ops = {
1893 .dev_configure = dpaa2_eth_dev_configure,
1894 .dev_start = dpaa2_dev_start,
1895 .dev_stop = dpaa2_dev_stop,
1896 .dev_close = dpaa2_dev_close,
1897 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
1898 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
1899 .allmulticast_enable = dpaa2_dev_allmulticast_enable,
1900 .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1901 .dev_set_link_up = dpaa2_dev_set_link_up,
1902 .dev_set_link_down = dpaa2_dev_set_link_down,
1903 .link_update = dpaa2_dev_link_update,
1904 .stats_get = dpaa2_dev_stats_get,
1905 .xstats_get = dpaa2_dev_xstats_get,
1906 .xstats_get_by_id = dpaa2_xstats_get_by_id,
1907 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1908 .xstats_get_names = dpaa2_xstats_get_names,
1909 .stats_reset = dpaa2_dev_stats_reset,
1910 .xstats_reset = dpaa2_dev_stats_reset,
1911 .fw_version_get = dpaa2_fw_version_get,
1912 .dev_infos_get = dpaa2_dev_info_get,
1913 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1914 .mtu_set = dpaa2_dev_mtu_set,
1915 .vlan_filter_set = dpaa2_vlan_filter_set,
1916 .vlan_offload_set = dpaa2_vlan_offload_set,
1917 .vlan_tpid_set = dpaa2_vlan_tpid_set,
1918 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
1919 .rx_queue_release = dpaa2_dev_rx_queue_release,
1920 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
1921 .tx_queue_release = dpaa2_dev_tx_queue_release,
1922 .rx_queue_count = dpaa2_dev_rx_queue_count,
1923 .flow_ctrl_get = dpaa2_flow_ctrl_get,
1924 .flow_ctrl_set = dpaa2_flow_ctrl_set,
1925 .mac_addr_add = dpaa2_dev_add_mac_addr,
1926 .mac_addr_remove = dpaa2_dev_remove_mac_addr,
1927 .mac_addr_set = dpaa2_dev_set_mac_addr,
1928 .rss_hash_update = dpaa2_dev_rss_hash_update,
1929 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
1932 /* Populate the mac address from physically available (u-boot/firmware) and/or
1933 * one set by higher layers like MC (restool) etc.
1934 * Returns the table of MAC entries (multiple entries)
1937 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
1938 struct ether_addr *mac_entry)
1941 struct ether_addr phy_mac, prime_mac;
1943 memset(&phy_mac, 0, sizeof(struct ether_addr));
1944 memset(&prime_mac, 0, sizeof(struct ether_addr));
1946 /* Get the physical device MAC address */
1947 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1948 phy_mac.addr_bytes);
1950 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
1954 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1955 prime_mac.addr_bytes);
1957 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
1961 /* Now that both MAC have been obtained, do:
1962 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
1964 * If empty_mac(phy), return prime.
1965 * if both are empty, create random MAC, set as prime and return
1967 if (!is_zero_ether_addr(&phy_mac)) {
1968 /* If the addresses are not same, overwrite prime */
1969 if (!is_same_ether_addr(&phy_mac, &prime_mac)) {
1970 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1972 phy_mac.addr_bytes);
1974 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
1978 memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr));
1980 } else if (is_zero_ether_addr(&prime_mac)) {
1981 /* In case phys and prime, both are zero, create random MAC */
1982 eth_random_addr(prime_mac.addr_bytes);
1983 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1985 prime_mac.addr_bytes);
1987 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
1992 /* prime_mac the final MAC address */
1993 memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr));
2001 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2003 struct rte_device *dev = eth_dev->device;
2004 struct rte_dpaa2_device *dpaa2_dev;
2005 struct fsl_mc_io *dpni_dev;
2006 struct dpni_attr attr;
2007 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2008 struct dpni_buffer_layout layout;
2011 PMD_INIT_FUNC_TRACE();
2013 /* For secondary processes, the primary has done all the work */
2014 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2015 /* In case of secondary, only burst and ops API need to be
2018 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2019 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2020 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2024 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2026 hw_id = dpaa2_dev->object_id;
2028 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2030 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2034 dpni_dev->regs = rte_mcp_ptr_list[0];
2035 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2038 "Failure in opening dpni@%d with err code %d",
2044 /* Clean the device first */
2045 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2047 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2052 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2055 "Failure in get dpni@%d attribute, err code %d",
2060 priv->num_rx_tc = attr.num_rx_tcs;
2062 /* Resetting the "num_rx_queues" to equal number of queues in first TC
2063 * as only one TC is supported on Rx Side. Once Multiple TCs will be
2064 * in use for Rx processing then this will be changed or removed.
2066 priv->nb_rx_queues = attr.num_queues;
2068 /* Using number of TX queues as number of TX TCs */
2069 priv->nb_tx_queues = attr.num_tx_tcs;
2071 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
2072 priv->num_rx_tc, priv->nb_rx_queues,
2073 priv->nb_tx_queues);
2075 priv->hw = dpni_dev;
2076 priv->hw_id = hw_id;
2077 priv->options = attr.options;
2078 priv->max_mac_filters = attr.mac_filter_entries;
2079 priv->max_vlan_filters = attr.vlan_filter_entries;
2082 /* Allocate memory for hardware structure for queues */
2083 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2085 DPAA2_PMD_ERR("Queue allocation Failed");
2089 /* Allocate memory for storing MAC addresses.
2090 * Table of mac_filter_entries size is allocated so that RTE ether lib
2091 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2093 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2094 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2095 if (eth_dev->data->mac_addrs == NULL) {
2097 "Failed to allocate %d bytes needed to store MAC addresses",
2098 ETHER_ADDR_LEN * attr.mac_filter_entries);
2103 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]);
2105 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2106 rte_free(eth_dev->data->mac_addrs);
2107 eth_dev->data->mac_addrs = NULL;
2111 /* ... tx buffer layout ... */
2112 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2113 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2114 layout.pass_frame_status = 1;
2115 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2116 DPNI_QUEUE_TX, &layout);
2118 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2122 /* ... tx-conf and error buffer layout ... */
2123 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2124 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2125 layout.pass_frame_status = 1;
2126 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2127 DPNI_QUEUE_TX_CONFIRM, &layout);
2129 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2134 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2136 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2137 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2139 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2142 dpaa2_dev_uninit(eth_dev);
2147 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2149 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2150 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2153 PMD_INIT_FUNC_TRACE();
2155 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2159 DPAA2_PMD_WARN("Already closed or not started");
2163 dpaa2_dev_close(eth_dev);
2165 dpaa2_free_rx_tx_queues(eth_dev);
2167 /* Close the device at underlying layer*/
2168 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2171 "Failure closing dpni device with err code %d",
2175 /* Free the allocated memory for ethernet private data and dpni*/
2179 eth_dev->dev_ops = NULL;
2180 eth_dev->rx_pkt_burst = NULL;
2181 eth_dev->tx_pkt_burst = NULL;
2183 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2188 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2189 struct rte_dpaa2_device *dpaa2_dev)
2191 struct rte_eth_dev *eth_dev;
2194 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2195 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2198 eth_dev->data->dev_private = rte_zmalloc(
2199 "ethdev private structure",
2200 sizeof(struct dpaa2_dev_priv),
2201 RTE_CACHE_LINE_SIZE);
2202 if (eth_dev->data->dev_private == NULL) {
2204 "Unable to allocate memory for private data");
2205 rte_eth_dev_release_port(eth_dev);
2209 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2214 eth_dev->device = &dpaa2_dev->device;
2216 dpaa2_dev->eth_dev = eth_dev;
2217 eth_dev->data->rx_mbuf_alloc_failed = 0;
2219 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2220 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2222 /* Invoke PMD device initialization function */
2223 diag = dpaa2_dev_init(eth_dev);
2225 rte_eth_dev_probing_finish(eth_dev);
2229 rte_eth_dev_release_port(eth_dev);
2234 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2236 struct rte_eth_dev *eth_dev;
2238 eth_dev = dpaa2_dev->eth_dev;
2239 dpaa2_dev_uninit(eth_dev);
2241 rte_eth_dev_release_port(eth_dev);
2246 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2247 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2248 .drv_type = DPAA2_ETH,
2249 .probe = rte_dpaa2_probe,
2250 .remove = rte_dpaa2_remove,
2253 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2255 RTE_INIT(dpaa2_pmd_init_log)
2257 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2258 if (dpaa2_logtype_pmd >= 0)
2259 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);