1 /* * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_fslmc.h>
21 #include "dpaa2_pmd_logs.h"
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_mempool.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <mc/fsl_dpmng.h>
27 #include "dpaa2_ethdev.h"
28 #include <fsl_qbman_debug.h>
30 /* Supported Rx offloads */
31 static uint64_t dev_rx_offloads_sup =
32 DEV_RX_OFFLOAD_VLAN_STRIP |
33 DEV_RX_OFFLOAD_IPV4_CKSUM |
34 DEV_RX_OFFLOAD_UDP_CKSUM |
35 DEV_RX_OFFLOAD_TCP_CKSUM |
36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
37 DEV_RX_OFFLOAD_VLAN_FILTER |
38 DEV_RX_OFFLOAD_JUMBO_FRAME;
40 /* Rx offloads which cannot be disabled */
41 static uint64_t dev_rx_offloads_nodis =
42 DEV_RX_OFFLOAD_SCATTER;
44 /* Supported Tx offloads */
45 static uint64_t dev_tx_offloads_sup =
46 DEV_TX_OFFLOAD_VLAN_INSERT |
47 DEV_TX_OFFLOAD_IPV4_CKSUM |
48 DEV_TX_OFFLOAD_UDP_CKSUM |
49 DEV_TX_OFFLOAD_TCP_CKSUM |
50 DEV_TX_OFFLOAD_SCTP_CKSUM |
51 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
53 /* Tx offloads which cannot be disabled */
54 static uint64_t dev_tx_offloads_nodis =
55 DEV_TX_OFFLOAD_MULTI_SEGS |
56 DEV_TX_OFFLOAD_MT_LOCKFREE |
57 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
59 struct rte_dpaa2_xstats_name_off {
60 char name[RTE_ETH_XSTATS_NAME_SIZE];
61 uint8_t page_id; /* dpni statistics page id */
62 uint8_t stats_id; /* stats id in the given page */
65 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
66 {"ingress_multicast_frames", 0, 2},
67 {"ingress_multicast_bytes", 0, 3},
68 {"ingress_broadcast_frames", 0, 4},
69 {"ingress_broadcast_bytes", 0, 5},
70 {"egress_multicast_frames", 1, 2},
71 {"egress_multicast_bytes", 1, 3},
72 {"egress_broadcast_frames", 1, 4},
73 {"egress_broadcast_bytes", 1, 5},
74 {"ingress_filtered_frames", 2, 0},
75 {"ingress_discarded_frames", 2, 1},
76 {"ingress_nobuffer_discards", 2, 2},
77 {"egress_discarded_frames", 2, 3},
78 {"egress_confirmed_frames", 2, 4},
81 static struct rte_dpaa2_driver rte_dpaa2_pmd;
82 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
83 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
84 int wait_to_complete);
85 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
86 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
87 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
89 int dpaa2_logtype_pmd;
92 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
95 struct dpaa2_dev_priv *priv = dev->data->dev_private;
96 struct fsl_mc_io *dpni = priv->hw;
98 PMD_INIT_FUNC_TRACE();
101 DPAA2_PMD_ERR("dpni is NULL");
106 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
107 priv->token, vlan_id);
109 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
110 priv->token, vlan_id);
113 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
114 ret, vlan_id, priv->hw_id);
120 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
122 struct dpaa2_dev_priv *priv = dev->data->dev_private;
123 struct fsl_mc_io *dpni = priv->hw;
126 PMD_INIT_FUNC_TRACE();
128 if (mask & ETH_VLAN_FILTER_MASK) {
129 /* VLAN Filter not avaialble */
130 if (!priv->max_vlan_filters) {
131 DPAA2_PMD_INFO("VLAN filter not available");
135 if (dev->data->dev_conf.rxmode.offloads &
136 DEV_RX_OFFLOAD_VLAN_FILTER)
137 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
140 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
143 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
146 if (mask & ETH_VLAN_EXTEND_MASK) {
147 if (dev->data->dev_conf.rxmode.offloads &
148 DEV_RX_OFFLOAD_VLAN_EXTEND)
149 DPAA2_PMD_INFO("VLAN extend offload not supported");
156 dpaa2_fw_version_get(struct rte_eth_dev *dev,
161 struct dpaa2_dev_priv *priv = dev->data->dev_private;
162 struct fsl_mc_io *dpni = priv->hw;
163 struct mc_soc_version mc_plat_info = {0};
164 struct mc_version mc_ver_info = {0};
166 PMD_INIT_FUNC_TRACE();
168 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
169 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
171 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
172 DPAA2_PMD_WARN("\tmc_get_version failed");
174 ret = snprintf(fw_version, fw_size,
179 mc_ver_info.revision);
181 ret += 1; /* add the size of '\0' */
182 if (fw_size < (uint32_t)ret)
189 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
191 struct dpaa2_dev_priv *priv = dev->data->dev_private;
193 PMD_INIT_FUNC_TRACE();
195 dev_info->if_index = priv->hw_id;
197 dev_info->max_mac_addrs = priv->max_mac_filters;
198 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
199 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
200 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
201 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
202 dev_info->rx_offload_capa = dev_rx_offloads_sup |
203 dev_rx_offloads_nodis;
204 dev_info->tx_offload_capa = dev_tx_offloads_sup |
205 dev_tx_offloads_nodis;
206 dev_info->speed_capa = ETH_LINK_SPEED_1G |
207 ETH_LINK_SPEED_2_5G |
210 dev_info->max_hash_mac_addrs = 0;
211 dev_info->max_vfs = 0;
212 dev_info->max_vmdq_pools = ETH_16_POOLS;
213 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
217 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
219 struct dpaa2_dev_priv *priv = dev->data->dev_private;
222 struct dpaa2_queue *mc_q, *mcq;
225 struct dpaa2_queue *dpaa2_q;
227 PMD_INIT_FUNC_TRACE();
229 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
230 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
231 RTE_CACHE_LINE_SIZE);
233 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
237 for (i = 0; i < priv->nb_rx_queues; i++) {
239 priv->rx_vq[i] = mc_q++;
240 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
241 dpaa2_q->q_storage = rte_malloc("dq_storage",
242 sizeof(struct queue_storage_info_t),
243 RTE_CACHE_LINE_SIZE);
244 if (!dpaa2_q->q_storage)
247 memset(dpaa2_q->q_storage, 0,
248 sizeof(struct queue_storage_info_t));
249 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
253 for (i = 0; i < priv->nb_tx_queues; i++) {
255 mc_q->flow_id = 0xffff;
256 priv->tx_vq[i] = mc_q++;
257 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
258 dpaa2_q->cscn = rte_malloc(NULL,
259 sizeof(struct qbman_result), 16);
265 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
266 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
267 mcq->tc_index = DPAA2_DEF_TC;
268 mcq->flow_id = dist_idx;
276 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
277 rte_free(dpaa2_q->cscn);
278 priv->tx_vq[i--] = NULL;
280 i = priv->nb_rx_queues;
283 mc_q = priv->rx_vq[0];
285 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
286 dpaa2_free_dq_storage(dpaa2_q->q_storage);
287 rte_free(dpaa2_q->q_storage);
288 priv->rx_vq[i--] = NULL;
295 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
297 struct dpaa2_dev_priv *priv = dev->data->dev_private;
298 struct dpaa2_queue *dpaa2_q;
301 PMD_INIT_FUNC_TRACE();
303 /* Queue allocation base */
304 if (priv->rx_vq[0]) {
305 /* cleaning up queue storage */
306 for (i = 0; i < priv->nb_rx_queues; i++) {
307 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
308 if (dpaa2_q->q_storage)
309 rte_free(dpaa2_q->q_storage);
311 /* cleanup tx queue cscn */
312 for (i = 0; i < priv->nb_tx_queues; i++) {
313 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
314 rte_free(dpaa2_q->cscn);
316 /*free memory for all queues (RX+TX) */
317 rte_free(priv->rx_vq[0]);
318 priv->rx_vq[0] = NULL;
323 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
325 struct dpaa2_dev_priv *priv = dev->data->dev_private;
326 struct fsl_mc_io *dpni = priv->hw;
327 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
328 uint64_t rx_offloads = eth_conf->rxmode.offloads;
329 uint64_t tx_offloads = eth_conf->txmode.offloads;
330 int rx_l3_csum_offload = false;
331 int rx_l4_csum_offload = false;
332 int tx_l3_csum_offload = false;
333 int tx_l4_csum_offload = false;
336 PMD_INIT_FUNC_TRACE();
338 /* Rx offloads validation */
339 if (dev_rx_offloads_nodis & ~rx_offloads) {
341 "Rx offloads non configurable - requested 0x%" PRIx64
342 " ignored 0x%" PRIx64,
343 rx_offloads, dev_rx_offloads_nodis);
346 /* Tx offloads validation */
347 if (dev_tx_offloads_nodis & ~tx_offloads) {
349 "Tx offloads non configurable - requested 0x%" PRIx64
350 " ignored 0x%" PRIx64,
351 tx_offloads, dev_tx_offloads_nodis);
354 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
355 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
356 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
357 priv->token, eth_conf->rxmode.max_rx_pkt_len);
360 "Unable to set mtu. check config");
368 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
369 ret = dpaa2_setup_flow_dist(dev,
370 eth_conf->rx_adv_conf.rss_conf.rss_hf);
372 DPAA2_PMD_ERR("Unable to set flow distribution."
373 "Check queue config");
378 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
379 rx_l3_csum_offload = true;
381 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
382 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
383 rx_l4_csum_offload = true;
385 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
386 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
388 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
392 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
393 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
395 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
399 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
400 tx_l3_csum_offload = true;
402 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
403 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
404 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
405 tx_l4_csum_offload = true;
407 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
408 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
410 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
414 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
415 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
417 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
421 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
422 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
423 * to 0 for LS2 in the hardware thus disabling data/annotation
424 * stashing. For LX2 this is fixed in hardware and thus hash result and
425 * parse results can be received in FD using this option.
427 if (dpaa2_svr_family == SVR_LX2160A) {
428 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
429 DPNI_FLCTYPE_HASH, true);
431 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
436 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
437 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
439 /* update the current status */
440 dpaa2_dev_link_update(dev, 0);
445 /* Function to setup RX flow information. It contains traffic class ID,
446 * flow ID, destination configuration etc.
449 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
450 uint16_t rx_queue_id,
451 uint16_t nb_rx_desc __rte_unused,
452 unsigned int socket_id __rte_unused,
453 const struct rte_eth_rxconf *rx_conf __rte_unused,
454 struct rte_mempool *mb_pool)
456 struct dpaa2_dev_priv *priv = dev->data->dev_private;
457 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
458 struct dpaa2_queue *dpaa2_q;
459 struct dpni_queue cfg;
465 PMD_INIT_FUNC_TRACE();
467 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
468 dev, rx_queue_id, mb_pool, rx_conf);
470 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
471 bpid = mempool_to_bpid(mb_pool);
472 ret = dpaa2_attach_bp_list(priv,
473 rte_dpaa2_bpid_info[bpid].bp_list);
477 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
478 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
480 /*Get the flow id from given VQ id*/
481 flow_id = rx_queue_id % priv->nb_rx_queues;
482 memset(&cfg, 0, sizeof(struct dpni_queue));
484 options = options | DPNI_QUEUE_OPT_USER_CTX;
485 cfg.user_context = (size_t)(dpaa2_q);
487 /*if ls2088 or rev2 device, enable the stashing */
489 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
490 options |= DPNI_QUEUE_OPT_FLC;
491 cfg.flc.stash_control = true;
492 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
493 /* 00 00 00 - last 6 bit represent annotation, context stashing,
494 * data stashing setting 01 01 00 (0x14)
495 * (in following order ->DS AS CS)
496 * to enable 1 line data, 1 line annotation.
497 * For LX2, this setting should be 01 00 00 (0x10)
499 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
500 cfg.flc.value |= 0x10;
502 cfg.flc.value |= 0x14;
504 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
505 dpaa2_q->tc_index, flow_id, options, &cfg);
507 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
511 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
512 struct dpni_taildrop taildrop;
515 /*enabling per rx queue congestion control */
516 taildrop.threshold = CONG_THRESHOLD_RX_Q;
517 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
518 taildrop.oal = CONG_RX_OAL;
519 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
521 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
522 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
523 dpaa2_q->tc_index, flow_id, &taildrop);
525 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
531 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
536 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
537 uint16_t tx_queue_id,
538 uint16_t nb_tx_desc __rte_unused,
539 unsigned int socket_id __rte_unused,
540 const struct rte_eth_txconf *tx_conf __rte_unused)
542 struct dpaa2_dev_priv *priv = dev->data->dev_private;
543 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
544 priv->tx_vq[tx_queue_id];
545 struct fsl_mc_io *dpni = priv->hw;
546 struct dpni_queue tx_conf_cfg;
547 struct dpni_queue tx_flow_cfg;
548 uint8_t options = 0, flow_id;
552 PMD_INIT_FUNC_TRACE();
554 /* Return if queue already configured */
555 if (dpaa2_q->flow_id != 0xffff) {
556 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
560 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
561 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
566 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
567 tc_id, flow_id, options, &tx_flow_cfg);
569 DPAA2_PMD_ERR("Error in setting the tx flow: "
570 "tc_id=%d, flow=%d err=%d",
571 tc_id, flow_id, ret);
575 dpaa2_q->flow_id = flow_id;
577 if (tx_queue_id == 0) {
578 /*Set tx-conf and error configuration*/
579 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
583 DPAA2_PMD_ERR("Error in set tx conf mode settings: "
588 dpaa2_q->tc_index = tc_id;
590 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
591 struct dpni_congestion_notification_cfg cong_notif_cfg;
593 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
594 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
595 /* Notify that the queue is not congested when the data in
596 * the queue is below this thershold.
598 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
599 cong_notif_cfg.message_ctx = 0;
600 cong_notif_cfg.message_iova =
601 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
602 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
603 cong_notif_cfg.notification_mode =
604 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
605 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
606 DPNI_CONG_OPT_COHERENT_WRITE;
608 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
615 "Error in setting tx congestion notification: "
620 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
625 dpaa2_dev_rx_queue_release(void *q __rte_unused)
627 PMD_INIT_FUNC_TRACE();
631 dpaa2_dev_tx_queue_release(void *q __rte_unused)
633 PMD_INIT_FUNC_TRACE();
637 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
640 struct dpaa2_dev_priv *priv = dev->data->dev_private;
641 struct dpaa2_queue *dpaa2_q;
642 struct qbman_swp *swp;
643 struct qbman_fq_query_np_rslt state;
644 uint32_t frame_cnt = 0;
646 PMD_INIT_FUNC_TRACE();
648 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
649 ret = dpaa2_affine_qbman_swp();
651 DPAA2_PMD_ERR("Failure in affining portal");
655 swp = DPAA2_PER_LCORE_PORTAL;
657 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
659 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
660 frame_cnt = qbman_fq_state_frame_count(&state);
661 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
662 rx_queue_id, frame_cnt);
667 static const uint32_t *
668 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
670 static const uint32_t ptypes[] = {
671 /*todo -= add more types */
674 RTE_PTYPE_L3_IPV4_EXT,
676 RTE_PTYPE_L3_IPV6_EXT,
684 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
690 * Dpaa2 link Interrupt handler
693 * The address of parameter (struct rte_eth_dev *) regsitered before.
699 dpaa2_interrupt_handler(void *param)
701 struct rte_eth_dev *dev = param;
702 struct dpaa2_dev_priv *priv = dev->data->dev_private;
703 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
705 int irq_index = DPNI_IRQ_INDEX;
706 unsigned int status = 0, clear = 0;
708 PMD_INIT_FUNC_TRACE();
711 DPAA2_PMD_ERR("dpni is NULL");
715 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
718 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
723 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
724 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
725 dpaa2_dev_link_update(dev, 0);
726 /* calling all the apps registered for link status event */
727 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
731 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
734 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
738 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
741 struct dpaa2_dev_priv *priv = dev->data->dev_private;
742 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
743 int irq_index = DPNI_IRQ_INDEX;
744 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
746 PMD_INIT_FUNC_TRACE();
748 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
751 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
756 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
759 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
766 dpaa2_dev_start(struct rte_eth_dev *dev)
768 struct rte_device *rdev = dev->device;
769 struct rte_dpaa2_device *dpaa2_dev;
770 struct rte_eth_dev_data *data = dev->data;
771 struct dpaa2_dev_priv *priv = data->dev_private;
772 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
773 struct dpni_queue cfg;
774 struct dpni_error_cfg err_cfg;
776 struct dpni_queue_id qid;
777 struct dpaa2_queue *dpaa2_q;
779 struct rte_intr_handle *intr_handle;
781 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
782 intr_handle = &dpaa2_dev->intr_handle;
784 PMD_INIT_FUNC_TRACE();
786 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
788 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
793 /* Power up the phy. Needed to make the link go UP */
794 dpaa2_dev_set_link_up(dev);
796 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
797 DPNI_QUEUE_TX, &qdid);
799 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
804 for (i = 0; i < data->nb_rx_queues; i++) {
805 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
806 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
807 DPNI_QUEUE_RX, dpaa2_q->tc_index,
808 dpaa2_q->flow_id, &cfg, &qid);
810 DPAA2_PMD_ERR("Error in getting flow information: "
814 dpaa2_q->fqid = qid.fqid;
817 /*checksum errors, send them to normal path and set it in annotation */
818 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
820 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
821 err_cfg.set_frame_annotation = true;
823 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
824 priv->token, &err_cfg);
826 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
831 /* if the interrupts were configured on this devices*/
832 if (intr_handle && (intr_handle->fd) &&
833 (dev->data->dev_conf.intr_conf.lsc != 0)) {
834 /* Registering LSC interrupt handler */
835 rte_intr_callback_register(intr_handle,
836 dpaa2_interrupt_handler,
839 /* enable vfio intr/eventfd mapping
840 * Interrupt index 0 is required, so we can not use
843 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
845 /* enable dpni_irqs */
846 dpaa2_eth_setup_irqs(dev, 1);
853 * This routine disables all traffic on the adapter by issuing a
854 * global reset on the MAC.
857 dpaa2_dev_stop(struct rte_eth_dev *dev)
859 struct dpaa2_dev_priv *priv = dev->data->dev_private;
860 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
862 struct rte_eth_link link;
863 struct rte_intr_handle *intr_handle = dev->intr_handle;
865 PMD_INIT_FUNC_TRACE();
867 /* reset interrupt callback */
868 if (intr_handle && (intr_handle->fd) &&
869 (dev->data->dev_conf.intr_conf.lsc != 0)) {
870 /*disable dpni irqs */
871 dpaa2_eth_setup_irqs(dev, 0);
873 /* disable vfio intr before callback unregister */
874 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
876 /* Unregistering LSC interrupt handler */
877 rte_intr_callback_unregister(intr_handle,
878 dpaa2_interrupt_handler,
882 dpaa2_dev_set_link_down(dev);
884 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
886 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
891 /* clear the recorded link status */
892 memset(&link, 0, sizeof(link));
893 rte_eth_linkstatus_set(dev, &link);
897 dpaa2_dev_close(struct rte_eth_dev *dev)
899 struct dpaa2_dev_priv *priv = dev->data->dev_private;
900 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
902 struct rte_eth_link link;
904 PMD_INIT_FUNC_TRACE();
906 /* Clean the device first */
907 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
909 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
913 memset(&link, 0, sizeof(link));
914 rte_eth_linkstatus_set(dev, &link);
918 dpaa2_dev_promiscuous_enable(
919 struct rte_eth_dev *dev)
922 struct dpaa2_dev_priv *priv = dev->data->dev_private;
923 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
925 PMD_INIT_FUNC_TRACE();
928 DPAA2_PMD_ERR("dpni is NULL");
932 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
934 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
936 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
938 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
942 dpaa2_dev_promiscuous_disable(
943 struct rte_eth_dev *dev)
946 struct dpaa2_dev_priv *priv = dev->data->dev_private;
947 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
949 PMD_INIT_FUNC_TRACE();
952 DPAA2_PMD_ERR("dpni is NULL");
956 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
958 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
960 if (dev->data->all_multicast == 0) {
961 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
964 DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
970 dpaa2_dev_allmulticast_enable(
971 struct rte_eth_dev *dev)
974 struct dpaa2_dev_priv *priv = dev->data->dev_private;
975 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
977 PMD_INIT_FUNC_TRACE();
980 DPAA2_PMD_ERR("dpni is NULL");
984 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
986 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
990 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
993 struct dpaa2_dev_priv *priv = dev->data->dev_private;
994 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
996 PMD_INIT_FUNC_TRACE();
999 DPAA2_PMD_ERR("dpni is NULL");
1003 /* must remain on for all promiscuous */
1004 if (dev->data->promiscuous == 1)
1007 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1009 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1013 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1016 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1017 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1018 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1021 PMD_INIT_FUNC_TRACE();
1024 DPAA2_PMD_ERR("dpni is NULL");
1028 /* check that mtu is within the allowed range */
1029 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
1032 if (frame_size > ETHER_MAX_LEN)
1033 dev->data->dev_conf.rxmode.offloads &=
1034 DEV_RX_OFFLOAD_JUMBO_FRAME;
1036 dev->data->dev_conf.rxmode.offloads &=
1037 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1039 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1041 /* Set the Max Rx frame length as 'mtu' +
1042 * Maximum Ethernet header length
1044 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1047 DPAA2_PMD_ERR("Setting the max frame length failed");
1050 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1055 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1056 struct ether_addr *addr,
1057 __rte_unused uint32_t index,
1058 __rte_unused uint32_t pool)
1061 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1062 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1064 PMD_INIT_FUNC_TRACE();
1067 DPAA2_PMD_ERR("dpni is NULL");
1071 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1072 priv->token, addr->addr_bytes);
1075 "error: Adding the MAC ADDR failed: err = %d", ret);
1080 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1084 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1085 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1086 struct rte_eth_dev_data *data = dev->data;
1087 struct ether_addr *macaddr;
1089 PMD_INIT_FUNC_TRACE();
1091 macaddr = &data->mac_addrs[index];
1094 DPAA2_PMD_ERR("dpni is NULL");
1098 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1099 priv->token, macaddr->addr_bytes);
1102 "error: Removing the MAC ADDR failed: err = %d", ret);
1106 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1107 struct ether_addr *addr)
1110 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1111 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1113 PMD_INIT_FUNC_TRACE();
1116 DPAA2_PMD_ERR("dpni is NULL");
1120 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1121 priv->token, addr->addr_bytes);
1125 "error: Setting the MAC ADDR failed %d", ret);
1131 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1132 struct rte_eth_stats *stats)
1134 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1135 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1137 uint8_t page0 = 0, page1 = 1, page2 = 2;
1138 union dpni_statistics value;
1140 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1142 memset(&value, 0, sizeof(union dpni_statistics));
1144 PMD_INIT_FUNC_TRACE();
1147 DPAA2_PMD_ERR("dpni is NULL");
1152 DPAA2_PMD_ERR("stats is NULL");
1156 /*Get Counters from page_0*/
1157 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1162 stats->ipackets = value.page_0.ingress_all_frames;
1163 stats->ibytes = value.page_0.ingress_all_bytes;
1165 /*Get Counters from page_1*/
1166 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1171 stats->opackets = value.page_1.egress_all_frames;
1172 stats->obytes = value.page_1.egress_all_bytes;
1174 /*Get Counters from page_2*/
1175 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1180 /* Ingress drop frame count due to configured rules */
1181 stats->ierrors = value.page_2.ingress_filtered_frames;
1182 /* Ingress drop frame count due to error */
1183 stats->ierrors += value.page_2.ingress_discarded_frames;
1185 stats->oerrors = value.page_2.egress_discarded_frames;
1186 stats->imissed = value.page_2.ingress_nobuffer_discards;
1188 /* Fill in per queue stats */
1189 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1190 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1191 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1192 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1194 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1196 stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1198 /* Byte counting is not implemented */
1199 stats->q_ibytes[i] = 0;
1200 stats->q_obytes[i] = 0;
1206 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1211 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1214 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1215 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1217 union dpni_statistics value[3] = {};
1218 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1226 /* Get Counters from page_0*/
1227 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1232 /* Get Counters from page_1*/
1233 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1238 /* Get Counters from page_2*/
1239 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1244 for (i = 0; i < num; i++) {
1246 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1247 raw.counter[dpaa2_xstats_strings[i].stats_id];
1251 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1256 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1257 struct rte_eth_xstat_name *xstats_names,
1260 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1262 if (limit < stat_cnt)
1265 if (xstats_names != NULL)
1266 for (i = 0; i < stat_cnt; i++)
1267 snprintf(xstats_names[i].name,
1268 sizeof(xstats_names[i].name),
1270 dpaa2_xstats_strings[i].name);
1276 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1277 uint64_t *values, unsigned int n)
1279 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1280 uint64_t values_copy[stat_cnt];
1283 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1284 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1286 union dpni_statistics value[3] = {};
1294 /* Get Counters from page_0*/
1295 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1300 /* Get Counters from page_1*/
1301 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1306 /* Get Counters from page_2*/
1307 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1312 for (i = 0; i < stat_cnt; i++) {
1313 values[i] = value[dpaa2_xstats_strings[i].page_id].
1314 raw.counter[dpaa2_xstats_strings[i].stats_id];
1319 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1321 for (i = 0; i < n; i++) {
1322 if (ids[i] >= stat_cnt) {
1323 DPAA2_PMD_ERR("xstats id value isn't valid");
1326 values[i] = values_copy[ids[i]];
1332 dpaa2_xstats_get_names_by_id(
1333 struct rte_eth_dev *dev,
1334 struct rte_eth_xstat_name *xstats_names,
1335 const uint64_t *ids,
1338 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1339 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1342 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1344 dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1346 for (i = 0; i < limit; i++) {
1347 if (ids[i] >= stat_cnt) {
1348 DPAA2_PMD_ERR("xstats id value isn't valid");
1351 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1357 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1359 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1360 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1363 struct dpaa2_queue *dpaa2_q;
1365 PMD_INIT_FUNC_TRACE();
1368 DPAA2_PMD_ERR("dpni is NULL");
1372 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1376 /* Reset the per queue stats in dpaa2_queue structure */
1377 for (i = 0; i < priv->nb_rx_queues; i++) {
1378 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1380 dpaa2_q->rx_pkts = 0;
1383 for (i = 0; i < priv->nb_tx_queues; i++) {
1384 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1386 dpaa2_q->tx_pkts = 0;
1392 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1396 /* return 0 means link status changed, -1 means not changed */
1398 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1399 int wait_to_complete __rte_unused)
1402 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1403 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1404 struct rte_eth_link link;
1405 struct dpni_link_state state = {0};
1408 DPAA2_PMD_ERR("dpni is NULL");
1412 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1414 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1418 memset(&link, 0, sizeof(struct rte_eth_link));
1419 link.link_status = state.up;
1420 link.link_speed = state.rate;
1422 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1423 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1425 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1427 ret = rte_eth_linkstatus_set(dev, &link);
1429 DPAA2_PMD_DEBUG("No change in status");
1431 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1432 link.link_status ? "Up" : "Down");
1438 * Toggle the DPNI to enable, if not already enabled.
1439 * This is not strictly PHY up/down - it is more of logical toggling.
1442 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1445 struct dpaa2_dev_priv *priv;
1446 struct fsl_mc_io *dpni;
1448 struct dpni_link_state state = {0};
1450 priv = dev->data->dev_private;
1451 dpni = (struct fsl_mc_io *)priv->hw;
1454 DPAA2_PMD_ERR("dpni is NULL");
1458 /* Check if DPNI is currently enabled */
1459 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1461 /* Unable to obtain dpni status; Not continuing */
1462 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1466 /* Enable link if not already enabled */
1468 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1470 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1474 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1476 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1480 /* changing tx burst function to start enqueues */
1481 dev->tx_pkt_burst = dpaa2_dev_tx;
1482 dev->data->dev_link.link_status = state.up;
1485 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1487 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1492 * Toggle the DPNI to disable, if not already disabled.
1493 * This is not strictly PHY up/down - it is more of logical toggling.
1496 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1499 struct dpaa2_dev_priv *priv;
1500 struct fsl_mc_io *dpni;
1501 int dpni_enabled = 0;
1504 PMD_INIT_FUNC_TRACE();
1506 priv = dev->data->dev_private;
1507 dpni = (struct fsl_mc_io *)priv->hw;
1510 DPAA2_PMD_ERR("Device has not yet been configured");
1514 /*changing tx burst function to avoid any more enqueues */
1515 dev->tx_pkt_burst = dummy_dev_tx;
1517 /* Loop while dpni_disable() attempts to drain the egress FQs
1518 * and confirm them back to us.
1521 ret = dpni_disable(dpni, 0, priv->token);
1523 DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1526 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1528 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1532 /* Allow the MC some slack */
1533 rte_delay_us(100 * 1000);
1534 } while (dpni_enabled && --retries);
1537 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1538 /* todo- we may have to manually cleanup queues.
1541 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1542 dev->data->port_id);
1545 dev->data->dev_link.link_status = 0;
1551 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1554 struct dpaa2_dev_priv *priv;
1555 struct fsl_mc_io *dpni;
1556 struct dpni_link_state state = {0};
1558 PMD_INIT_FUNC_TRACE();
1560 priv = dev->data->dev_private;
1561 dpni = (struct fsl_mc_io *)priv->hw;
1563 if (dpni == NULL || fc_conf == NULL) {
1564 DPAA2_PMD_ERR("device not configured");
1568 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1570 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1574 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1575 if (state.options & DPNI_LINK_OPT_PAUSE) {
1576 /* DPNI_LINK_OPT_PAUSE set
1577 * if ASYM_PAUSE not set,
1578 * RX Side flow control (handle received Pause frame)
1579 * TX side flow control (send Pause frame)
1580 * if ASYM_PAUSE set,
1581 * RX Side flow control (handle received Pause frame)
1582 * No TX side flow control (send Pause frame disabled)
1584 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1585 fc_conf->mode = RTE_FC_FULL;
1587 fc_conf->mode = RTE_FC_RX_PAUSE;
1589 /* DPNI_LINK_OPT_PAUSE not set
1590 * if ASYM_PAUSE set,
1591 * TX side flow control (send Pause frame)
1592 * No RX side flow control (No action on pause frame rx)
1593 * if ASYM_PAUSE not set,
1594 * Flow control disabled
1596 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1597 fc_conf->mode = RTE_FC_TX_PAUSE;
1599 fc_conf->mode = RTE_FC_NONE;
1606 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1609 struct dpaa2_dev_priv *priv;
1610 struct fsl_mc_io *dpni;
1611 struct dpni_link_state state = {0};
1612 struct dpni_link_cfg cfg = {0};
1614 PMD_INIT_FUNC_TRACE();
1616 priv = dev->data->dev_private;
1617 dpni = (struct fsl_mc_io *)priv->hw;
1620 DPAA2_PMD_ERR("dpni is NULL");
1624 /* It is necessary to obtain the current state before setting fc_conf
1625 * as MC would return error in case rate, autoneg or duplex values are
1628 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1630 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1634 /* Disable link before setting configuration */
1635 dpaa2_dev_set_link_down(dev);
1637 /* Based on fc_conf, update cfg */
1638 cfg.rate = state.rate;
1639 cfg.options = state.options;
1641 /* update cfg with fc_conf */
1642 switch (fc_conf->mode) {
1644 /* Full flow control;
1645 * OPT_PAUSE set, ASYM_PAUSE not set
1647 cfg.options |= DPNI_LINK_OPT_PAUSE;
1648 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1650 case RTE_FC_TX_PAUSE:
1651 /* Enable RX flow control
1652 * OPT_PAUSE not set;
1655 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1656 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1658 case RTE_FC_RX_PAUSE:
1659 /* Enable TX Flow control
1663 cfg.options |= DPNI_LINK_OPT_PAUSE;
1664 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1667 /* Disable Flow control
1669 * ASYM_PAUSE not set
1671 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1672 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1675 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1680 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1682 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1686 dpaa2_dev_set_link_up(dev);
1692 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1693 struct rte_eth_rss_conf *rss_conf)
1695 struct rte_eth_dev_data *data = dev->data;
1696 struct rte_eth_conf *eth_conf = &data->dev_conf;
1699 PMD_INIT_FUNC_TRACE();
1701 if (rss_conf->rss_hf) {
1702 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1704 DPAA2_PMD_ERR("Unable to set flow dist");
1708 ret = dpaa2_remove_flow_dist(dev, 0);
1710 DPAA2_PMD_ERR("Unable to remove flow dist");
1714 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1719 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1720 struct rte_eth_rss_conf *rss_conf)
1722 struct rte_eth_dev_data *data = dev->data;
1723 struct rte_eth_conf *eth_conf = &data->dev_conf;
1725 /* dpaa2 does not support rss_key, so length should be 0*/
1726 rss_conf->rss_key_len = 0;
1727 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1731 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1732 int eth_rx_queue_id,
1734 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1736 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1737 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1738 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1739 uint8_t flow_id = dpaa2_ethq->flow_id;
1740 struct dpni_queue cfg;
1744 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1745 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1746 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1747 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1751 memset(&cfg, 0, sizeof(struct dpni_queue));
1752 options = DPNI_QUEUE_OPT_DEST;
1753 cfg.destination.type = DPNI_DEST_DPCON;
1754 cfg.destination.id = dpcon_id;
1755 cfg.destination.priority = queue_conf->ev.priority;
1757 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1758 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1759 cfg.destination.hold_active = 1;
1762 options |= DPNI_QUEUE_OPT_USER_CTX;
1763 cfg.user_context = (size_t)(dpaa2_ethq);
1765 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1766 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1768 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1772 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1777 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1778 int eth_rx_queue_id)
1780 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1781 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1782 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1783 uint8_t flow_id = dpaa2_ethq->flow_id;
1784 struct dpni_queue cfg;
1788 memset(&cfg, 0, sizeof(struct dpni_queue));
1789 options = DPNI_QUEUE_OPT_DEST;
1790 cfg.destination.type = DPNI_DEST_NONE;
1792 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1793 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1795 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1800 static struct eth_dev_ops dpaa2_ethdev_ops = {
1801 .dev_configure = dpaa2_eth_dev_configure,
1802 .dev_start = dpaa2_dev_start,
1803 .dev_stop = dpaa2_dev_stop,
1804 .dev_close = dpaa2_dev_close,
1805 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
1806 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
1807 .allmulticast_enable = dpaa2_dev_allmulticast_enable,
1808 .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1809 .dev_set_link_up = dpaa2_dev_set_link_up,
1810 .dev_set_link_down = dpaa2_dev_set_link_down,
1811 .link_update = dpaa2_dev_link_update,
1812 .stats_get = dpaa2_dev_stats_get,
1813 .xstats_get = dpaa2_dev_xstats_get,
1814 .xstats_get_by_id = dpaa2_xstats_get_by_id,
1815 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1816 .xstats_get_names = dpaa2_xstats_get_names,
1817 .stats_reset = dpaa2_dev_stats_reset,
1818 .xstats_reset = dpaa2_dev_stats_reset,
1819 .fw_version_get = dpaa2_fw_version_get,
1820 .dev_infos_get = dpaa2_dev_info_get,
1821 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1822 .mtu_set = dpaa2_dev_mtu_set,
1823 .vlan_filter_set = dpaa2_vlan_filter_set,
1824 .vlan_offload_set = dpaa2_vlan_offload_set,
1825 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
1826 .rx_queue_release = dpaa2_dev_rx_queue_release,
1827 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
1828 .tx_queue_release = dpaa2_dev_tx_queue_release,
1829 .rx_queue_count = dpaa2_dev_rx_queue_count,
1830 .flow_ctrl_get = dpaa2_flow_ctrl_get,
1831 .flow_ctrl_set = dpaa2_flow_ctrl_set,
1832 .mac_addr_add = dpaa2_dev_add_mac_addr,
1833 .mac_addr_remove = dpaa2_dev_remove_mac_addr,
1834 .mac_addr_set = dpaa2_dev_set_mac_addr,
1835 .rss_hash_update = dpaa2_dev_rss_hash_update,
1836 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
1839 /* Populate the mac address from physically available (u-boot/firmware) and/or
1840 * one set by higher layers like MC (restool) etc.
1841 * Returns the table of MAC entries (multiple entries)
1844 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
1845 struct ether_addr *mac_entry)
1848 struct ether_addr phy_mac, prime_mac;
1850 memset(&phy_mac, 0, sizeof(struct ether_addr));
1851 memset(&prime_mac, 0, sizeof(struct ether_addr));
1853 /* Get the physical device MAC address */
1854 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1855 phy_mac.addr_bytes);
1857 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
1861 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1862 prime_mac.addr_bytes);
1864 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
1868 /* Now that both MAC have been obtained, do:
1869 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
1871 * If empty_mac(phy), return prime.
1872 * if both are empty, create random MAC, set as prime and return
1874 if (!is_zero_ether_addr(&phy_mac)) {
1875 /* If the addresses are not same, overwrite prime */
1876 if (!is_same_ether_addr(&phy_mac, &prime_mac)) {
1877 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1879 phy_mac.addr_bytes);
1881 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
1885 memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr));
1887 } else if (is_zero_ether_addr(&prime_mac)) {
1888 /* In case phys and prime, both are zero, create random MAC */
1889 eth_random_addr(prime_mac.addr_bytes);
1890 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1892 prime_mac.addr_bytes);
1894 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
1899 /* prime_mac the final MAC address */
1900 memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr));
1908 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1910 struct rte_device *dev = eth_dev->device;
1911 struct rte_dpaa2_device *dpaa2_dev;
1912 struct fsl_mc_io *dpni_dev;
1913 struct dpni_attr attr;
1914 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1915 struct dpni_buffer_layout layout;
1918 PMD_INIT_FUNC_TRACE();
1920 /* For secondary processes, the primary has done all the work */
1921 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1922 /* In case of secondary, only burst and ops API need to be
1925 eth_dev->dev_ops = &dpaa2_ethdev_ops;
1926 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1927 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1931 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1933 hw_id = dpaa2_dev->object_id;
1935 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1937 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
1941 dpni_dev->regs = rte_mcp_ptr_list[0];
1942 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1945 "Failure in opening dpni@%d with err code %d",
1951 /* Clean the device first */
1952 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1954 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
1959 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1962 "Failure in get dpni@%d attribute, err code %d",
1967 priv->num_rx_tc = attr.num_rx_tcs;
1969 /* Resetting the "num_rx_queues" to equal number of queues in first TC
1970 * as only one TC is supported on Rx Side. Once Multiple TCs will be
1971 * in use for Rx processing then this will be changed or removed.
1973 priv->nb_rx_queues = attr.num_queues;
1975 /* Using number of TX queues as number of TX TCs */
1976 priv->nb_tx_queues = attr.num_tx_tcs;
1978 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
1979 priv->num_rx_tc, priv->nb_rx_queues,
1980 priv->nb_tx_queues);
1982 priv->hw = dpni_dev;
1983 priv->hw_id = hw_id;
1984 priv->options = attr.options;
1985 priv->max_mac_filters = attr.mac_filter_entries;
1986 priv->max_vlan_filters = attr.vlan_filter_entries;
1989 /* Allocate memory for hardware structure for queues */
1990 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1992 DPAA2_PMD_ERR("Queue allocation Failed");
1996 /* Allocate memory for storing MAC addresses.
1997 * Table of mac_filter_entries size is allocated so that RTE ether lib
1998 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2000 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2001 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2002 if (eth_dev->data->mac_addrs == NULL) {
2004 "Failed to allocate %d bytes needed to store MAC addresses",
2005 ETHER_ADDR_LEN * attr.mac_filter_entries);
2010 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]);
2012 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2013 rte_free(eth_dev->data->mac_addrs);
2014 eth_dev->data->mac_addrs = NULL;
2018 /* ... tx buffer layout ... */
2019 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2020 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2021 layout.pass_frame_status = 1;
2022 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2023 DPNI_QUEUE_TX, &layout);
2025 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2029 /* ... tx-conf and error buffer layout ... */
2030 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2031 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2032 layout.pass_frame_status = 1;
2033 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2034 DPNI_QUEUE_TX_CONFIRM, &layout);
2036 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2041 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2043 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2044 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2046 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2049 dpaa2_dev_uninit(eth_dev);
2054 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2056 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2057 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2060 PMD_INIT_FUNC_TRACE();
2062 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2066 DPAA2_PMD_WARN("Already closed or not started");
2070 dpaa2_dev_close(eth_dev);
2072 dpaa2_free_rx_tx_queues(eth_dev);
2074 /* Close the device at underlying layer*/
2075 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2078 "Failure closing dpni device with err code %d",
2082 /* Free the allocated memory for ethernet private data and dpni*/
2086 eth_dev->dev_ops = NULL;
2087 eth_dev->rx_pkt_burst = NULL;
2088 eth_dev->tx_pkt_burst = NULL;
2090 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2095 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2096 struct rte_dpaa2_device *dpaa2_dev)
2098 struct rte_eth_dev *eth_dev;
2101 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2102 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2105 eth_dev->data->dev_private = rte_zmalloc(
2106 "ethdev private structure",
2107 sizeof(struct dpaa2_dev_priv),
2108 RTE_CACHE_LINE_SIZE);
2109 if (eth_dev->data->dev_private == NULL) {
2111 "Unable to allocate memory for private data");
2112 rte_eth_dev_release_port(eth_dev);
2116 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2121 eth_dev->device = &dpaa2_dev->device;
2123 dpaa2_dev->eth_dev = eth_dev;
2124 eth_dev->data->rx_mbuf_alloc_failed = 0;
2126 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2127 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2129 /* Invoke PMD device initialization function */
2130 diag = dpaa2_dev_init(eth_dev);
2132 rte_eth_dev_probing_finish(eth_dev);
2136 rte_eth_dev_release_port(eth_dev);
2141 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2143 struct rte_eth_dev *eth_dev;
2145 eth_dev = dpaa2_dev->eth_dev;
2146 dpaa2_dev_uninit(eth_dev);
2148 rte_eth_dev_release_port(eth_dev);
2153 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2154 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2155 .drv_type = DPAA2_ETH,
2156 .probe = rte_dpaa2_probe,
2157 .remove = rte_dpaa2_remove,
2160 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2162 RTE_INIT(dpaa2_pmd_init_log)
2164 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2165 if (dpaa2_logtype_pmd >= 0)
2166 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);