1 /* * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include "dpaa2_sparser.h"
30 #include <fsl_qbman_debug.h>
32 #define DRIVER_LOOPBACK_MODE "drv_loopback"
33 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
35 /* Supported Rx offloads */
36 static uint64_t dev_rx_offloads_sup =
37 DEV_RX_OFFLOAD_CHECKSUM |
38 DEV_RX_OFFLOAD_SCTP_CKSUM |
39 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
40 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
41 DEV_RX_OFFLOAD_VLAN_STRIP |
42 DEV_RX_OFFLOAD_VLAN_FILTER |
43 DEV_RX_OFFLOAD_JUMBO_FRAME |
44 DEV_RX_OFFLOAD_TIMESTAMP;
46 /* Rx offloads which cannot be disabled */
47 static uint64_t dev_rx_offloads_nodis =
48 DEV_RX_OFFLOAD_RSS_HASH |
49 DEV_RX_OFFLOAD_SCATTER;
51 /* Supported Tx offloads */
52 static uint64_t dev_tx_offloads_sup =
53 DEV_TX_OFFLOAD_VLAN_INSERT |
54 DEV_TX_OFFLOAD_IPV4_CKSUM |
55 DEV_TX_OFFLOAD_UDP_CKSUM |
56 DEV_TX_OFFLOAD_TCP_CKSUM |
57 DEV_TX_OFFLOAD_SCTP_CKSUM |
58 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
59 DEV_TX_OFFLOAD_MT_LOCKFREE |
60 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
62 /* Tx offloads which cannot be disabled */
63 static uint64_t dev_tx_offloads_nodis =
64 DEV_TX_OFFLOAD_MULTI_SEGS;
66 /* enable timestamp in mbuf */
67 enum pmd_dpaa2_ts dpaa2_enable_ts;
69 struct rte_dpaa2_xstats_name_off {
70 char name[RTE_ETH_XSTATS_NAME_SIZE];
71 uint8_t page_id; /* dpni statistics page id */
72 uint8_t stats_id; /* stats id in the given page */
75 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
76 {"ingress_multicast_frames", 0, 2},
77 {"ingress_multicast_bytes", 0, 3},
78 {"ingress_broadcast_frames", 0, 4},
79 {"ingress_broadcast_bytes", 0, 5},
80 {"egress_multicast_frames", 1, 2},
81 {"egress_multicast_bytes", 1, 3},
82 {"egress_broadcast_frames", 1, 4},
83 {"egress_broadcast_bytes", 1, 5},
84 {"ingress_filtered_frames", 2, 0},
85 {"ingress_discarded_frames", 2, 1},
86 {"ingress_nobuffer_discards", 2, 2},
87 {"egress_discarded_frames", 2, 3},
88 {"egress_confirmed_frames", 2, 4},
89 {"cgr_reject_frames", 4, 0},
90 {"cgr_reject_bytes", 4, 1},
93 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
95 RTE_ETH_FILTER_DELETE,
96 RTE_ETH_FILTER_UPDATE,
101 static struct rte_dpaa2_driver rte_dpaa2_pmd;
102 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
103 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
104 int wait_to_complete);
105 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
106 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
107 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
109 int dpaa2_logtype_pmd;
112 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
114 dpaa2_enable_ts = enable;
118 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
121 struct dpaa2_dev_priv *priv = dev->data->dev_private;
122 struct fsl_mc_io *dpni = dev->process_private;
124 PMD_INIT_FUNC_TRACE();
127 DPAA2_PMD_ERR("dpni is NULL");
132 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
135 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
136 priv->token, vlan_id);
139 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
140 ret, vlan_id, priv->hw_id);
146 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
148 struct dpaa2_dev_priv *priv = dev->data->dev_private;
149 struct fsl_mc_io *dpni = dev->process_private;
152 PMD_INIT_FUNC_TRACE();
154 if (mask & ETH_VLAN_FILTER_MASK) {
155 /* VLAN Filter not avaialble */
156 if (!priv->max_vlan_filters) {
157 DPAA2_PMD_INFO("VLAN filter not available");
161 if (dev->data->dev_conf.rxmode.offloads &
162 DEV_RX_OFFLOAD_VLAN_FILTER)
163 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
166 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
169 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
172 if (mask & ETH_VLAN_EXTEND_MASK) {
173 if (dev->data->dev_conf.rxmode.offloads &
174 DEV_RX_OFFLOAD_VLAN_EXTEND)
175 DPAA2_PMD_INFO("VLAN extend offload not supported");
182 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
183 enum rte_vlan_type vlan_type __rte_unused,
186 struct dpaa2_dev_priv *priv = dev->data->dev_private;
187 struct fsl_mc_io *dpni = dev->process_private;
190 PMD_INIT_FUNC_TRACE();
192 /* nothing to be done for standard vlan tpids */
193 if (tpid == 0x8100 || tpid == 0x88A8)
196 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
199 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
200 /* if already configured tpids, remove them first */
202 struct dpni_custom_tpid_cfg tpid_list = {0};
204 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
205 priv->token, &tpid_list);
208 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
209 priv->token, tpid_list.tpid1);
212 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
220 dpaa2_fw_version_get(struct rte_eth_dev *dev,
225 struct fsl_mc_io *dpni = dev->process_private;
226 struct mc_soc_version mc_plat_info = {0};
227 struct mc_version mc_ver_info = {0};
229 PMD_INIT_FUNC_TRACE();
231 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
232 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
234 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
235 DPAA2_PMD_WARN("\tmc_get_version failed");
237 ret = snprintf(fw_version, fw_size,
242 mc_ver_info.revision);
244 ret += 1; /* add the size of '\0' */
245 if (fw_size < (uint32_t)ret)
252 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
254 struct dpaa2_dev_priv *priv = dev->data->dev_private;
256 PMD_INIT_FUNC_TRACE();
258 dev_info->if_index = priv->hw_id;
260 dev_info->max_mac_addrs = priv->max_mac_filters;
261 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
262 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
263 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
264 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
265 dev_info->rx_offload_capa = dev_rx_offloads_sup |
266 dev_rx_offloads_nodis;
267 dev_info->tx_offload_capa = dev_tx_offloads_sup |
268 dev_tx_offloads_nodis;
269 dev_info->speed_capa = ETH_LINK_SPEED_1G |
270 ETH_LINK_SPEED_2_5G |
273 dev_info->max_hash_mac_addrs = 0;
274 dev_info->max_vfs = 0;
275 dev_info->max_vmdq_pools = ETH_16_POOLS;
276 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
282 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
284 struct dpaa2_dev_priv *priv = dev->data->dev_private;
287 uint8_t num_rxqueue_per_tc;
288 struct dpaa2_queue *mc_q, *mcq;
291 struct dpaa2_queue *dpaa2_q;
293 PMD_INIT_FUNC_TRACE();
295 num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
296 if (priv->tx_conf_en)
297 tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
299 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
300 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
301 RTE_CACHE_LINE_SIZE);
303 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
307 for (i = 0; i < priv->nb_rx_queues; i++) {
308 mc_q->eth_data = dev->data;
309 priv->rx_vq[i] = mc_q++;
310 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
311 dpaa2_q->q_storage = rte_malloc("dq_storage",
312 sizeof(struct queue_storage_info_t),
313 RTE_CACHE_LINE_SIZE);
314 if (!dpaa2_q->q_storage)
317 memset(dpaa2_q->q_storage, 0,
318 sizeof(struct queue_storage_info_t));
319 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
323 for (i = 0; i < priv->nb_tx_queues; i++) {
324 mc_q->eth_data = dev->data;
325 mc_q->flow_id = 0xffff;
326 priv->tx_vq[i] = mc_q++;
327 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
328 dpaa2_q->cscn = rte_malloc(NULL,
329 sizeof(struct qbman_result), 16);
334 if (priv->tx_conf_en) {
335 /*Setup tx confirmation queues*/
336 for (i = 0; i < priv->nb_tx_queues; i++) {
337 mc_q->eth_data = dev->data;
340 priv->tx_conf_vq[i] = mc_q++;
341 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
343 rte_malloc("dq_storage",
344 sizeof(struct queue_storage_info_t),
345 RTE_CACHE_LINE_SIZE);
346 if (!dpaa2_q->q_storage)
349 memset(dpaa2_q->q_storage, 0,
350 sizeof(struct queue_storage_info_t));
351 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
357 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
358 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
359 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
360 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
368 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
369 rte_free(dpaa2_q->q_storage);
370 priv->tx_conf_vq[i--] = NULL;
372 i = priv->nb_tx_queues;
376 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
377 rte_free(dpaa2_q->cscn);
378 priv->tx_vq[i--] = NULL;
380 i = priv->nb_rx_queues;
383 mc_q = priv->rx_vq[0];
385 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
386 dpaa2_free_dq_storage(dpaa2_q->q_storage);
387 rte_free(dpaa2_q->q_storage);
388 priv->rx_vq[i--] = NULL;
395 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
397 struct dpaa2_dev_priv *priv = dev->data->dev_private;
398 struct dpaa2_queue *dpaa2_q;
401 PMD_INIT_FUNC_TRACE();
403 /* Queue allocation base */
404 if (priv->rx_vq[0]) {
405 /* cleaning up queue storage */
406 for (i = 0; i < priv->nb_rx_queues; i++) {
407 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
408 if (dpaa2_q->q_storage)
409 rte_free(dpaa2_q->q_storage);
411 /* cleanup tx queue cscn */
412 for (i = 0; i < priv->nb_tx_queues; i++) {
413 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
414 rte_free(dpaa2_q->cscn);
416 if (priv->tx_conf_en) {
417 /* cleanup tx conf queue storage */
418 for (i = 0; i < priv->nb_tx_queues; i++) {
419 dpaa2_q = (struct dpaa2_queue *)
421 rte_free(dpaa2_q->q_storage);
424 /*free memory for all queues (RX+TX) */
425 rte_free(priv->rx_vq[0]);
426 priv->rx_vq[0] = NULL;
431 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
433 struct dpaa2_dev_priv *priv = dev->data->dev_private;
434 struct fsl_mc_io *dpni = dev->process_private;
435 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
436 uint64_t rx_offloads = eth_conf->rxmode.offloads;
437 uint64_t tx_offloads = eth_conf->txmode.offloads;
438 int rx_l3_csum_offload = false;
439 int rx_l4_csum_offload = false;
440 int tx_l3_csum_offload = false;
441 int tx_l4_csum_offload = false;
444 PMD_INIT_FUNC_TRACE();
446 /* Rx offloads which are enabled by default */
447 if (dev_rx_offloads_nodis & ~rx_offloads) {
449 "Some of rx offloads enabled by default - requested 0x%" PRIx64
450 " fixed are 0x%" PRIx64,
451 rx_offloads, dev_rx_offloads_nodis);
454 /* Tx offloads which are enabled by default */
455 if (dev_tx_offloads_nodis & ~tx_offloads) {
457 "Some of tx offloads enabled by default - requested 0x%" PRIx64
458 " fixed are 0x%" PRIx64,
459 tx_offloads, dev_tx_offloads_nodis);
462 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
463 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
464 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
465 priv->token, eth_conf->rxmode.max_rx_pkt_len
466 - RTE_ETHER_CRC_LEN);
469 "Unable to set mtu. check config");
473 dev->data->dev_conf.rxmode.max_rx_pkt_len -
474 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
481 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
482 ret = dpaa2_setup_flow_dist(dev,
483 eth_conf->rx_adv_conf.rss_conf.rss_hf);
485 DPAA2_PMD_ERR("Unable to set flow distribution."
486 "Check queue config");
491 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
492 rx_l3_csum_offload = true;
494 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
495 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
496 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
497 rx_l4_csum_offload = true;
499 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
500 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
502 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
506 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
507 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
509 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
513 if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
514 dpaa2_enable_ts = true;
516 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
517 tx_l3_csum_offload = true;
519 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
520 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
521 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
522 tx_l4_csum_offload = true;
524 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
525 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
527 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
531 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
532 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
534 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
538 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
539 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
540 * to 0 for LS2 in the hardware thus disabling data/annotation
541 * stashing. For LX2 this is fixed in hardware and thus hash result and
542 * parse results can be received in FD using this option.
544 if (dpaa2_svr_family == SVR_LX2160A) {
545 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
546 DPNI_FLCTYPE_HASH, true);
548 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
553 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
554 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
559 /* Function to setup RX flow information. It contains traffic class ID,
560 * flow ID, destination configuration etc.
563 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
564 uint16_t rx_queue_id,
566 unsigned int socket_id __rte_unused,
567 const struct rte_eth_rxconf *rx_conf __rte_unused,
568 struct rte_mempool *mb_pool)
570 struct dpaa2_dev_priv *priv = dev->data->dev_private;
571 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
572 struct dpaa2_queue *dpaa2_q;
573 struct dpni_queue cfg;
579 PMD_INIT_FUNC_TRACE();
581 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
582 dev, rx_queue_id, mb_pool, rx_conf);
584 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
585 bpid = mempool_to_bpid(mb_pool);
586 ret = dpaa2_attach_bp_list(priv,
587 rte_dpaa2_bpid_info[bpid].bp_list);
591 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
592 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
593 dpaa2_q->bp_array = rte_dpaa2_bpid_info;
595 /*Get the flow id from given VQ id*/
596 flow_id = dpaa2_q->flow_id;
597 memset(&cfg, 0, sizeof(struct dpni_queue));
599 options = options | DPNI_QUEUE_OPT_USER_CTX;
600 cfg.user_context = (size_t)(dpaa2_q);
602 /* check if a private cgr available. */
603 for (i = 0; i < priv->max_cgs; i++) {
604 if (!priv->cgid_in_use[i]) {
605 priv->cgid_in_use[i] = 1;
610 if (i < priv->max_cgs) {
611 options |= DPNI_QUEUE_OPT_SET_CGID;
613 dpaa2_q->cgid = cfg.cgid;
615 dpaa2_q->cgid = 0xff;
618 /*if ls2088 or rev2 device, enable the stashing */
620 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
621 options |= DPNI_QUEUE_OPT_FLC;
622 cfg.flc.stash_control = true;
623 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
624 /* 00 00 00 - last 6 bit represent annotation, context stashing,
625 * data stashing setting 01 01 00 (0x14)
626 * (in following order ->DS AS CS)
627 * to enable 1 line data, 1 line annotation.
628 * For LX2, this setting should be 01 00 00 (0x10)
630 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
631 cfg.flc.value |= 0x10;
633 cfg.flc.value |= 0x14;
635 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
636 dpaa2_q->tc_index, flow_id, options, &cfg);
638 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
642 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
643 struct dpni_taildrop taildrop;
647 /* Private CGR will use tail drop length as nb_rx_desc.
648 * for rest cases we can use standard byte based tail drop.
649 * There is no HW restriction, but number of CGRs are limited,
650 * hence this restriction is placed.
652 if (dpaa2_q->cgid != 0xff) {
653 /*enabling per rx queue congestion control */
654 taildrop.threshold = nb_rx_desc;
655 taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
657 DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
659 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
660 DPNI_CP_CONGESTION_GROUP,
665 /*enabling per rx queue congestion control */
666 taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
667 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
668 taildrop.oal = CONG_RX_OAL;
669 DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
671 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
672 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
673 dpaa2_q->tc_index, flow_id,
677 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
681 } else { /* Disable tail Drop */
682 struct dpni_taildrop taildrop = {0};
683 DPAA2_PMD_INFO("Tail drop is disabled on queue");
686 if (dpaa2_q->cgid != 0xff) {
687 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
688 DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
692 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
693 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
694 dpaa2_q->tc_index, flow_id, &taildrop);
697 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
703 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
708 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
709 uint16_t tx_queue_id,
710 uint16_t nb_tx_desc __rte_unused,
711 unsigned int socket_id __rte_unused,
712 const struct rte_eth_txconf *tx_conf __rte_unused)
714 struct dpaa2_dev_priv *priv = dev->data->dev_private;
715 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
716 priv->tx_vq[tx_queue_id];
717 struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
718 priv->tx_conf_vq[tx_queue_id];
719 struct fsl_mc_io *dpni = dev->process_private;
720 struct dpni_queue tx_conf_cfg;
721 struct dpni_queue tx_flow_cfg;
722 uint8_t options = 0, flow_id;
723 struct dpni_queue_id qid;
727 PMD_INIT_FUNC_TRACE();
729 /* Return if queue already configured */
730 if (dpaa2_q->flow_id != 0xffff) {
731 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
735 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
736 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
741 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
742 tc_id, flow_id, options, &tx_flow_cfg);
744 DPAA2_PMD_ERR("Error in setting the tx flow: "
745 "tc_id=%d, flow=%d err=%d",
746 tc_id, flow_id, ret);
750 dpaa2_q->flow_id = flow_id;
752 if (tx_queue_id == 0) {
753 /*Set tx-conf and error configuration*/
754 if (priv->tx_conf_en)
755 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
759 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
763 DPAA2_PMD_ERR("Error in set tx conf mode settings: "
768 dpaa2_q->tc_index = tc_id;
770 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
771 DPNI_QUEUE_TX, dpaa2_q->tc_index,
772 dpaa2_q->flow_id, &tx_flow_cfg, &qid);
774 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
777 dpaa2_q->fqid = qid.fqid;
779 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
780 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
782 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
783 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
784 /* Notify that the queue is not congested when the data in
785 * the queue is below this thershold.
787 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
788 cong_notif_cfg.message_ctx = 0;
789 cong_notif_cfg.message_iova =
790 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
791 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
792 cong_notif_cfg.notification_mode =
793 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
794 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
795 DPNI_CONG_OPT_COHERENT_WRITE;
796 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
798 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
805 "Error in setting tx congestion notification: "
810 dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
811 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
813 if (priv->tx_conf_en) {
814 dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
815 options = options | DPNI_QUEUE_OPT_USER_CTX;
816 tx_conf_cfg.user_context = (size_t)(dpaa2_q);
817 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
818 DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
819 dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
821 DPAA2_PMD_ERR("Error in setting the tx conf flow: "
822 "tc_index=%d, flow=%d err=%d",
823 dpaa2_tx_conf_q->tc_index,
824 dpaa2_tx_conf_q->flow_id, ret);
828 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
829 DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
830 dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
832 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
835 dpaa2_tx_conf_q->fqid = qid.fqid;
841 dpaa2_dev_rx_queue_release(void *q __rte_unused)
843 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
844 struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
845 struct fsl_mc_io *dpni =
846 (struct fsl_mc_io *)priv->eth_dev->process_private;
849 struct dpni_queue cfg;
851 memset(&cfg, 0, sizeof(struct dpni_queue));
852 PMD_INIT_FUNC_TRACE();
853 if (dpaa2_q->cgid != 0xff) {
854 options = DPNI_QUEUE_OPT_CLEAR_CGID;
855 cfg.cgid = dpaa2_q->cgid;
857 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
859 dpaa2_q->tc_index, dpaa2_q->flow_id,
862 DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
864 priv->cgid_in_use[dpaa2_q->cgid] = 0;
865 dpaa2_q->cgid = 0xff;
870 dpaa2_dev_tx_queue_release(void *q __rte_unused)
872 PMD_INIT_FUNC_TRACE();
876 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
879 struct dpaa2_dev_priv *priv = dev->data->dev_private;
880 struct dpaa2_queue *dpaa2_q;
881 struct qbman_swp *swp;
882 struct qbman_fq_query_np_rslt state;
883 uint32_t frame_cnt = 0;
885 PMD_INIT_FUNC_TRACE();
887 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
888 ret = dpaa2_affine_qbman_swp();
890 DPAA2_PMD_ERR("Failure in affining portal");
894 swp = DPAA2_PER_LCORE_PORTAL;
896 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
898 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
899 frame_cnt = qbman_fq_state_frame_count(&state);
900 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
901 rx_queue_id, frame_cnt);
906 static const uint32_t *
907 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
909 static const uint32_t ptypes[] = {
910 /*todo -= add more types */
913 RTE_PTYPE_L3_IPV4_EXT,
915 RTE_PTYPE_L3_IPV6_EXT,
923 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
924 dev->rx_pkt_burst == dpaa2_dev_rx ||
925 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
931 * Dpaa2 link Interrupt handler
934 * The address of parameter (struct rte_eth_dev *) regsitered before.
940 dpaa2_interrupt_handler(void *param)
942 struct rte_eth_dev *dev = param;
943 struct dpaa2_dev_priv *priv = dev->data->dev_private;
944 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
946 int irq_index = DPNI_IRQ_INDEX;
947 unsigned int status = 0, clear = 0;
949 PMD_INIT_FUNC_TRACE();
952 DPAA2_PMD_ERR("dpni is NULL");
956 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
959 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
964 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
965 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
966 dpaa2_dev_link_update(dev, 0);
967 /* calling all the apps registered for link status event */
968 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
972 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
975 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
979 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
982 struct dpaa2_dev_priv *priv = dev->data->dev_private;
983 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
984 int irq_index = DPNI_IRQ_INDEX;
985 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
987 PMD_INIT_FUNC_TRACE();
989 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
992 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
997 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1000 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1007 dpaa2_dev_start(struct rte_eth_dev *dev)
1009 struct rte_device *rdev = dev->device;
1010 struct rte_dpaa2_device *dpaa2_dev;
1011 struct rte_eth_dev_data *data = dev->data;
1012 struct dpaa2_dev_priv *priv = data->dev_private;
1013 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1014 struct dpni_queue cfg;
1015 struct dpni_error_cfg err_cfg;
1017 struct dpni_queue_id qid;
1018 struct dpaa2_queue *dpaa2_q;
1020 struct rte_intr_handle *intr_handle;
1022 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1023 intr_handle = &dpaa2_dev->intr_handle;
1025 PMD_INIT_FUNC_TRACE();
1027 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1029 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1034 /* Power up the phy. Needed to make the link go UP */
1035 dpaa2_dev_set_link_up(dev);
1037 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
1038 DPNI_QUEUE_TX, &qdid);
1040 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
1045 for (i = 0; i < data->nb_rx_queues; i++) {
1046 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1047 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1048 DPNI_QUEUE_RX, dpaa2_q->tc_index,
1049 dpaa2_q->flow_id, &cfg, &qid);
1051 DPAA2_PMD_ERR("Error in getting flow information: "
1055 dpaa2_q->fqid = qid.fqid;
1058 /*checksum errors, send them to normal path and set it in annotation */
1059 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1060 err_cfg.errors |= DPNI_ERROR_PHE;
1062 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1063 err_cfg.set_frame_annotation = true;
1065 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1066 priv->token, &err_cfg);
1068 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1073 /* if the interrupts were configured on this devices*/
1074 if (intr_handle && (intr_handle->fd) &&
1075 (dev->data->dev_conf.intr_conf.lsc != 0)) {
1076 /* Registering LSC interrupt handler */
1077 rte_intr_callback_register(intr_handle,
1078 dpaa2_interrupt_handler,
1081 /* enable vfio intr/eventfd mapping
1082 * Interrupt index 0 is required, so we can not use
1085 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1087 /* enable dpni_irqs */
1088 dpaa2_eth_setup_irqs(dev, 1);
1091 /* Change the tx burst function if ordered queues are used */
1092 if (priv->en_ordered)
1093 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1099 * This routine disables all traffic on the adapter by issuing a
1100 * global reset on the MAC.
1103 dpaa2_dev_stop(struct rte_eth_dev *dev)
1105 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1106 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1108 struct rte_eth_link link;
1109 struct rte_intr_handle *intr_handle = dev->intr_handle;
1111 PMD_INIT_FUNC_TRACE();
1113 /* reset interrupt callback */
1114 if (intr_handle && (intr_handle->fd) &&
1115 (dev->data->dev_conf.intr_conf.lsc != 0)) {
1116 /*disable dpni irqs */
1117 dpaa2_eth_setup_irqs(dev, 0);
1119 /* disable vfio intr before callback unregister */
1120 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1122 /* Unregistering LSC interrupt handler */
1123 rte_intr_callback_unregister(intr_handle,
1124 dpaa2_interrupt_handler,
1128 dpaa2_dev_set_link_down(dev);
1130 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1132 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1137 /* clear the recorded link status */
1138 memset(&link, 0, sizeof(link));
1139 rte_eth_linkstatus_set(dev, &link);
1143 dpaa2_dev_close(struct rte_eth_dev *dev)
1145 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1146 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1148 struct rte_eth_link link;
1150 PMD_INIT_FUNC_TRACE();
1152 dpaa2_flow_clean(dev);
1154 /* Clean the device first */
1155 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1157 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1161 memset(&link, 0, sizeof(link));
1162 rte_eth_linkstatus_set(dev, &link);
1166 dpaa2_dev_promiscuous_enable(
1167 struct rte_eth_dev *dev)
1170 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1171 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1173 PMD_INIT_FUNC_TRACE();
1176 DPAA2_PMD_ERR("dpni is NULL");
1180 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1182 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1184 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1186 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1192 dpaa2_dev_promiscuous_disable(
1193 struct rte_eth_dev *dev)
1196 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1197 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1199 PMD_INIT_FUNC_TRACE();
1202 DPAA2_PMD_ERR("dpni is NULL");
1206 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1208 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1210 if (dev->data->all_multicast == 0) {
1211 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1212 priv->token, false);
1214 DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1222 dpaa2_dev_allmulticast_enable(
1223 struct rte_eth_dev *dev)
1226 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1227 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1229 PMD_INIT_FUNC_TRACE();
1232 DPAA2_PMD_ERR("dpni is NULL");
1236 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1238 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1244 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1247 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1248 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1250 PMD_INIT_FUNC_TRACE();
1253 DPAA2_PMD_ERR("dpni is NULL");
1257 /* must remain on for all promiscuous */
1258 if (dev->data->promiscuous == 1)
1261 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1263 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1269 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1272 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1273 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1274 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1277 PMD_INIT_FUNC_TRACE();
1280 DPAA2_PMD_ERR("dpni is NULL");
1284 /* check that mtu is within the allowed range */
1285 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1288 if (frame_size > RTE_ETHER_MAX_LEN)
1289 dev->data->dev_conf.rxmode.offloads |=
1290 DEV_RX_OFFLOAD_JUMBO_FRAME;
1292 dev->data->dev_conf.rxmode.offloads &=
1293 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1295 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1297 /* Set the Max Rx frame length as 'mtu' +
1298 * Maximum Ethernet header length
1300 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1301 frame_size - RTE_ETHER_CRC_LEN);
1303 DPAA2_PMD_ERR("Setting the max frame length failed");
1306 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1311 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1312 struct rte_ether_addr *addr,
1313 __rte_unused uint32_t index,
1314 __rte_unused uint32_t pool)
1317 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1318 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1320 PMD_INIT_FUNC_TRACE();
1323 DPAA2_PMD_ERR("dpni is NULL");
1327 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1328 addr->addr_bytes, 0, 0, 0);
1331 "error: Adding the MAC ADDR failed: err = %d", ret);
1336 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1340 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1341 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1342 struct rte_eth_dev_data *data = dev->data;
1343 struct rte_ether_addr *macaddr;
1345 PMD_INIT_FUNC_TRACE();
1347 macaddr = &data->mac_addrs[index];
1350 DPAA2_PMD_ERR("dpni is NULL");
1354 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1355 priv->token, macaddr->addr_bytes);
1358 "error: Removing the MAC ADDR failed: err = %d", ret);
1362 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1363 struct rte_ether_addr *addr)
1366 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1367 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1369 PMD_INIT_FUNC_TRACE();
1372 DPAA2_PMD_ERR("dpni is NULL");
1376 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1377 priv->token, addr->addr_bytes);
1381 "error: Setting the MAC ADDR failed %d", ret);
1387 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1388 struct rte_eth_stats *stats)
1390 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1391 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1393 uint8_t page0 = 0, page1 = 1, page2 = 2;
1394 union dpni_statistics value;
1396 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1398 memset(&value, 0, sizeof(union dpni_statistics));
1400 PMD_INIT_FUNC_TRACE();
1403 DPAA2_PMD_ERR("dpni is NULL");
1408 DPAA2_PMD_ERR("stats is NULL");
1412 /*Get Counters from page_0*/
1413 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1418 stats->ipackets = value.page_0.ingress_all_frames;
1419 stats->ibytes = value.page_0.ingress_all_bytes;
1421 /*Get Counters from page_1*/
1422 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1427 stats->opackets = value.page_1.egress_all_frames;
1428 stats->obytes = value.page_1.egress_all_bytes;
1430 /*Get Counters from page_2*/
1431 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1436 /* Ingress drop frame count due to configured rules */
1437 stats->ierrors = value.page_2.ingress_filtered_frames;
1438 /* Ingress drop frame count due to error */
1439 stats->ierrors += value.page_2.ingress_discarded_frames;
1441 stats->oerrors = value.page_2.egress_discarded_frames;
1442 stats->imissed = value.page_2.ingress_nobuffer_discards;
1444 /* Fill in per queue stats */
1445 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1446 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1447 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1448 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1450 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1452 stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1454 /* Byte counting is not implemented */
1455 stats->q_ibytes[i] = 0;
1456 stats->q_obytes[i] = 0;
1462 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1467 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1470 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1471 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1473 union dpni_statistics value[5] = {};
1474 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1482 /* Get Counters from page_0*/
1483 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1488 /* Get Counters from page_1*/
1489 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1494 /* Get Counters from page_2*/
1495 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1500 for (i = 0; i < priv->max_cgs; i++) {
1501 if (!priv->cgid_in_use[i]) {
1502 /* Get Counters from page_4*/
1503 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1512 for (i = 0; i < num; i++) {
1514 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1515 raw.counter[dpaa2_xstats_strings[i].stats_id];
1519 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1524 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1525 struct rte_eth_xstat_name *xstats_names,
1528 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1530 if (limit < stat_cnt)
1533 if (xstats_names != NULL)
1534 for (i = 0; i < stat_cnt; i++)
1535 strlcpy(xstats_names[i].name,
1536 dpaa2_xstats_strings[i].name,
1537 sizeof(xstats_names[i].name));
1543 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1544 uint64_t *values, unsigned int n)
1546 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1547 uint64_t values_copy[stat_cnt];
1550 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1551 struct fsl_mc_io *dpni =
1552 (struct fsl_mc_io *)dev->process_private;
1554 union dpni_statistics value[5] = {};
1562 /* Get Counters from page_0*/
1563 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1568 /* Get Counters from page_1*/
1569 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1574 /* Get Counters from page_2*/
1575 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1580 /* Get Counters from page_4*/
1581 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1586 for (i = 0; i < stat_cnt; i++) {
1587 values[i] = value[dpaa2_xstats_strings[i].page_id].
1588 raw.counter[dpaa2_xstats_strings[i].stats_id];
1593 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1595 for (i = 0; i < n; i++) {
1596 if (ids[i] >= stat_cnt) {
1597 DPAA2_PMD_ERR("xstats id value isn't valid");
1600 values[i] = values_copy[ids[i]];
1606 dpaa2_xstats_get_names_by_id(
1607 struct rte_eth_dev *dev,
1608 struct rte_eth_xstat_name *xstats_names,
1609 const uint64_t *ids,
1612 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1613 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1616 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1618 dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1620 for (i = 0; i < limit; i++) {
1621 if (ids[i] >= stat_cnt) {
1622 DPAA2_PMD_ERR("xstats id value isn't valid");
1625 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1631 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1633 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1634 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1637 struct dpaa2_queue *dpaa2_q;
1639 PMD_INIT_FUNC_TRACE();
1642 DPAA2_PMD_ERR("dpni is NULL");
1646 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1650 /* Reset the per queue stats in dpaa2_queue structure */
1651 for (i = 0; i < priv->nb_rx_queues; i++) {
1652 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1654 dpaa2_q->rx_pkts = 0;
1657 for (i = 0; i < priv->nb_tx_queues; i++) {
1658 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1660 dpaa2_q->tx_pkts = 0;
1666 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1670 /* return 0 means link status changed, -1 means not changed */
1672 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1673 int wait_to_complete __rte_unused)
1676 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1677 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1678 struct rte_eth_link link;
1679 struct dpni_link_state state = {0};
1682 DPAA2_PMD_ERR("dpni is NULL");
1686 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1688 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1692 memset(&link, 0, sizeof(struct rte_eth_link));
1693 link.link_status = state.up;
1694 link.link_speed = state.rate;
1696 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1697 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1699 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1701 ret = rte_eth_linkstatus_set(dev, &link);
1703 DPAA2_PMD_DEBUG("No change in status");
1705 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1706 link.link_status ? "Up" : "Down");
1712 * Toggle the DPNI to enable, if not already enabled.
1713 * This is not strictly PHY up/down - it is more of logical toggling.
1716 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1719 struct dpaa2_dev_priv *priv;
1720 struct fsl_mc_io *dpni;
1722 struct dpni_link_state state = {0};
1724 priv = dev->data->dev_private;
1725 dpni = (struct fsl_mc_io *)dev->process_private;
1728 DPAA2_PMD_ERR("dpni is NULL");
1732 /* Check if DPNI is currently enabled */
1733 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1735 /* Unable to obtain dpni status; Not continuing */
1736 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1740 /* Enable link if not already enabled */
1742 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1744 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1748 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1750 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1754 /* changing tx burst function to start enqueues */
1755 dev->tx_pkt_burst = dpaa2_dev_tx;
1756 dev->data->dev_link.link_status = state.up;
1757 dev->data->dev_link.link_speed = state.rate;
1760 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1762 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1767 * Toggle the DPNI to disable, if not already disabled.
1768 * This is not strictly PHY up/down - it is more of logical toggling.
1771 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1774 struct dpaa2_dev_priv *priv;
1775 struct fsl_mc_io *dpni;
1776 int dpni_enabled = 0;
1779 PMD_INIT_FUNC_TRACE();
1781 priv = dev->data->dev_private;
1782 dpni = (struct fsl_mc_io *)dev->process_private;
1785 DPAA2_PMD_ERR("Device has not yet been configured");
1789 /*changing tx burst function to avoid any more enqueues */
1790 dev->tx_pkt_burst = dummy_dev_tx;
1792 /* Loop while dpni_disable() attempts to drain the egress FQs
1793 * and confirm them back to us.
1796 ret = dpni_disable(dpni, 0, priv->token);
1798 DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1801 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1803 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1807 /* Allow the MC some slack */
1808 rte_delay_us(100 * 1000);
1809 } while (dpni_enabled && --retries);
1812 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1813 /* todo- we may have to manually cleanup queues.
1816 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1817 dev->data->port_id);
1820 dev->data->dev_link.link_status = 0;
1826 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1829 struct dpaa2_dev_priv *priv;
1830 struct fsl_mc_io *dpni;
1831 struct dpni_link_state state = {0};
1833 PMD_INIT_FUNC_TRACE();
1835 priv = dev->data->dev_private;
1836 dpni = (struct fsl_mc_io *)dev->process_private;
1838 if (dpni == NULL || fc_conf == NULL) {
1839 DPAA2_PMD_ERR("device not configured");
1843 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1845 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1849 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1850 if (state.options & DPNI_LINK_OPT_PAUSE) {
1851 /* DPNI_LINK_OPT_PAUSE set
1852 * if ASYM_PAUSE not set,
1853 * RX Side flow control (handle received Pause frame)
1854 * TX side flow control (send Pause frame)
1855 * if ASYM_PAUSE set,
1856 * RX Side flow control (handle received Pause frame)
1857 * No TX side flow control (send Pause frame disabled)
1859 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1860 fc_conf->mode = RTE_FC_FULL;
1862 fc_conf->mode = RTE_FC_RX_PAUSE;
1864 /* DPNI_LINK_OPT_PAUSE not set
1865 * if ASYM_PAUSE set,
1866 * TX side flow control (send Pause frame)
1867 * No RX side flow control (No action on pause frame rx)
1868 * if ASYM_PAUSE not set,
1869 * Flow control disabled
1871 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1872 fc_conf->mode = RTE_FC_TX_PAUSE;
1874 fc_conf->mode = RTE_FC_NONE;
1881 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1884 struct dpaa2_dev_priv *priv;
1885 struct fsl_mc_io *dpni;
1886 struct dpni_link_state state = {0};
1887 struct dpni_link_cfg cfg = {0};
1889 PMD_INIT_FUNC_TRACE();
1891 priv = dev->data->dev_private;
1892 dpni = (struct fsl_mc_io *)dev->process_private;
1895 DPAA2_PMD_ERR("dpni is NULL");
1899 /* It is necessary to obtain the current state before setting fc_conf
1900 * as MC would return error in case rate, autoneg or duplex values are
1903 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1905 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1909 /* Disable link before setting configuration */
1910 dpaa2_dev_set_link_down(dev);
1912 /* Based on fc_conf, update cfg */
1913 cfg.rate = state.rate;
1914 cfg.options = state.options;
1916 /* update cfg with fc_conf */
1917 switch (fc_conf->mode) {
1919 /* Full flow control;
1920 * OPT_PAUSE set, ASYM_PAUSE not set
1922 cfg.options |= DPNI_LINK_OPT_PAUSE;
1923 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1925 case RTE_FC_TX_PAUSE:
1926 /* Enable RX flow control
1927 * OPT_PAUSE not set;
1930 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1931 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1933 case RTE_FC_RX_PAUSE:
1934 /* Enable TX Flow control
1938 cfg.options |= DPNI_LINK_OPT_PAUSE;
1939 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1942 /* Disable Flow control
1944 * ASYM_PAUSE not set
1946 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1947 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1950 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1955 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1957 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1961 dpaa2_dev_set_link_up(dev);
1967 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1968 struct rte_eth_rss_conf *rss_conf)
1970 struct rte_eth_dev_data *data = dev->data;
1971 struct rte_eth_conf *eth_conf = &data->dev_conf;
1974 PMD_INIT_FUNC_TRACE();
1976 if (rss_conf->rss_hf) {
1977 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1979 DPAA2_PMD_ERR("Unable to set flow dist");
1983 ret = dpaa2_remove_flow_dist(dev, 0);
1985 DPAA2_PMD_ERR("Unable to remove flow dist");
1989 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1994 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1995 struct rte_eth_rss_conf *rss_conf)
1997 struct rte_eth_dev_data *data = dev->data;
1998 struct rte_eth_conf *eth_conf = &data->dev_conf;
2000 /* dpaa2 does not support rss_key, so length should be 0*/
2001 rss_conf->rss_key_len = 0;
2002 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2006 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2007 int eth_rx_queue_id,
2008 struct dpaa2_dpcon_dev *dpcon,
2009 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2011 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2012 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2013 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2014 uint8_t flow_id = dpaa2_ethq->flow_id;
2015 struct dpni_queue cfg;
2016 uint8_t options, priority;
2019 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2020 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2021 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2022 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2023 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2024 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2028 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2029 (dpcon->num_priorities - 1);
2031 memset(&cfg, 0, sizeof(struct dpni_queue));
2032 options = DPNI_QUEUE_OPT_DEST;
2033 cfg.destination.type = DPNI_DEST_DPCON;
2034 cfg.destination.id = dpcon->dpcon_id;
2035 cfg.destination.priority = priority;
2037 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2038 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2039 cfg.destination.hold_active = 1;
2042 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2043 !eth_priv->en_ordered) {
2044 struct opr_cfg ocfg;
2046 /* Restoration window size = 256 frames */
2048 /* Restoration window size = 512 frames for LX2 */
2049 if (dpaa2_svr_family == SVR_LX2160A)
2051 /* Auto advance NESN window enabled */
2053 /* Late arrival window size disabled */
2055 /* ORL resource exhaustaion advance NESN disabled */
2057 /* Loose ordering enabled */
2059 eth_priv->en_loose_ordered = 1;
2060 /* Strict ordering enabled if explicitly set */
2061 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2063 eth_priv->en_loose_ordered = 0;
2066 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2067 dpaa2_ethq->tc_index, flow_id,
2068 OPR_OPT_CREATE, &ocfg);
2070 DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2074 eth_priv->en_ordered = 1;
2077 options |= DPNI_QUEUE_OPT_USER_CTX;
2078 cfg.user_context = (size_t)(dpaa2_ethq);
2080 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2081 dpaa2_ethq->tc_index, flow_id, options, &cfg);
2083 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2087 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2092 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2093 int eth_rx_queue_id)
2095 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2096 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2097 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2098 uint8_t flow_id = dpaa2_ethq->flow_id;
2099 struct dpni_queue cfg;
2103 memset(&cfg, 0, sizeof(struct dpni_queue));
2104 options = DPNI_QUEUE_OPT_DEST;
2105 cfg.destination.type = DPNI_DEST_NONE;
2107 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2108 dpaa2_ethq->tc_index, flow_id, options, &cfg);
2110 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2116 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2120 for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2121 if (dpaa2_supported_filter_ops[i] == filter_op)
2128 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2129 enum rte_filter_type filter_type,
2130 enum rte_filter_op filter_op,
2138 switch (filter_type) {
2139 case RTE_ETH_FILTER_GENERIC:
2140 if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2144 *(const void **)arg = &dpaa2_flow_ops;
2145 dpaa2_filter_type |= filter_type;
2148 RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2156 static struct eth_dev_ops dpaa2_ethdev_ops = {
2157 .dev_configure = dpaa2_eth_dev_configure,
2158 .dev_start = dpaa2_dev_start,
2159 .dev_stop = dpaa2_dev_stop,
2160 .dev_close = dpaa2_dev_close,
2161 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
2162 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
2163 .allmulticast_enable = dpaa2_dev_allmulticast_enable,
2164 .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2165 .dev_set_link_up = dpaa2_dev_set_link_up,
2166 .dev_set_link_down = dpaa2_dev_set_link_down,
2167 .link_update = dpaa2_dev_link_update,
2168 .stats_get = dpaa2_dev_stats_get,
2169 .xstats_get = dpaa2_dev_xstats_get,
2170 .xstats_get_by_id = dpaa2_xstats_get_by_id,
2171 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2172 .xstats_get_names = dpaa2_xstats_get_names,
2173 .stats_reset = dpaa2_dev_stats_reset,
2174 .xstats_reset = dpaa2_dev_stats_reset,
2175 .fw_version_get = dpaa2_fw_version_get,
2176 .dev_infos_get = dpaa2_dev_info_get,
2177 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2178 .mtu_set = dpaa2_dev_mtu_set,
2179 .vlan_filter_set = dpaa2_vlan_filter_set,
2180 .vlan_offload_set = dpaa2_vlan_offload_set,
2181 .vlan_tpid_set = dpaa2_vlan_tpid_set,
2182 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
2183 .rx_queue_release = dpaa2_dev_rx_queue_release,
2184 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
2185 .tx_queue_release = dpaa2_dev_tx_queue_release,
2186 .rx_queue_count = dpaa2_dev_rx_queue_count,
2187 .flow_ctrl_get = dpaa2_flow_ctrl_get,
2188 .flow_ctrl_set = dpaa2_flow_ctrl_set,
2189 .mac_addr_add = dpaa2_dev_add_mac_addr,
2190 .mac_addr_remove = dpaa2_dev_remove_mac_addr,
2191 .mac_addr_set = dpaa2_dev_set_mac_addr,
2192 .rss_hash_update = dpaa2_dev_rss_hash_update,
2193 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
2194 .filter_ctrl = dpaa2_dev_flow_ctrl,
2195 #if defined(RTE_LIBRTE_IEEE1588)
2196 .timesync_enable = dpaa2_timesync_enable,
2197 .timesync_disable = dpaa2_timesync_disable,
2198 .timesync_read_time = dpaa2_timesync_read_time,
2199 .timesync_write_time = dpaa2_timesync_write_time,
2200 .timesync_adjust_time = dpaa2_timesync_adjust_time,
2201 .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2202 .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2206 /* Populate the mac address from physically available (u-boot/firmware) and/or
2207 * one set by higher layers like MC (restool) etc.
2208 * Returns the table of MAC entries (multiple entries)
2211 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2212 struct rte_ether_addr *mac_entry)
2215 struct rte_ether_addr phy_mac, prime_mac;
2217 memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2218 memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2220 /* Get the physical device MAC address */
2221 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2222 phy_mac.addr_bytes);
2224 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2228 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2229 prime_mac.addr_bytes);
2231 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2235 /* Now that both MAC have been obtained, do:
2236 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2238 * If empty_mac(phy), return prime.
2239 * if both are empty, create random MAC, set as prime and return
2241 if (!rte_is_zero_ether_addr(&phy_mac)) {
2242 /* If the addresses are not same, overwrite prime */
2243 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2244 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2246 phy_mac.addr_bytes);
2248 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2252 memcpy(&prime_mac, &phy_mac,
2253 sizeof(struct rte_ether_addr));
2255 } else if (rte_is_zero_ether_addr(&prime_mac)) {
2256 /* In case phys and prime, both are zero, create random MAC */
2257 rte_eth_random_addr(prime_mac.addr_bytes);
2258 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2260 prime_mac.addr_bytes);
2262 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2267 /* prime_mac the final MAC address */
2268 memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2276 check_devargs_handler(__rte_unused const char *key, const char *value,
2277 __rte_unused void *opaque)
2279 if (strcmp(value, "1"))
2286 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2288 struct rte_kvargs *kvlist;
2293 kvlist = rte_kvargs_parse(devargs->args, NULL);
2297 if (!rte_kvargs_count(kvlist, key)) {
2298 rte_kvargs_free(kvlist);
2302 if (rte_kvargs_process(kvlist, key,
2303 check_devargs_handler, NULL) < 0) {
2304 rte_kvargs_free(kvlist);
2307 rte_kvargs_free(kvlist);
2313 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2315 struct rte_device *dev = eth_dev->device;
2316 struct rte_dpaa2_device *dpaa2_dev;
2317 struct fsl_mc_io *dpni_dev;
2318 struct dpni_attr attr;
2319 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2320 struct dpni_buffer_layout layout;
2323 PMD_INIT_FUNC_TRACE();
2325 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2327 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2330 dpni_dev->regs = rte_mcp_ptr_list[0];
2331 eth_dev->process_private = (void *)dpni_dev;
2333 /* For secondary processes, the primary has done all the work */
2334 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2335 /* In case of secondary, only burst and ops API need to be
2338 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2339 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2340 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2341 else if (dpaa2_get_devargs(dev->devargs,
2342 DRIVER_NO_PREFETCH_MODE))
2343 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2345 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2346 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2350 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2352 hw_id = dpaa2_dev->object_id;
2353 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2356 "Failure in opening dpni@%d with err code %d",
2362 /* Clean the device first */
2363 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2365 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2370 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2373 "Failure in get dpni@%d attribute, err code %d",
2378 priv->num_rx_tc = attr.num_rx_tcs;
2379 /* only if the custom CG is enabled */
2380 if (attr.options & DPNI_OPT_CUSTOM_CG)
2381 priv->max_cgs = attr.num_cgs;
2385 for (i = 0; i < priv->max_cgs; i++)
2386 priv->cgid_in_use[i] = 0;
2388 for (i = 0; i < attr.num_rx_tcs; i++)
2389 priv->nb_rx_queues += attr.num_queues;
2391 /* Using number of TX queues as number of TX TCs */
2392 priv->nb_tx_queues = attr.num_tx_tcs;
2394 DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2395 priv->num_rx_tc, priv->nb_rx_queues,
2396 priv->nb_tx_queues, priv->max_cgs);
2398 priv->hw = dpni_dev;
2399 priv->hw_id = hw_id;
2400 priv->options = attr.options;
2401 priv->max_mac_filters = attr.mac_filter_entries;
2402 priv->max_vlan_filters = attr.vlan_filter_entries;
2404 #if defined(RTE_LIBRTE_IEEE1588)
2405 priv->tx_conf_en = 1;
2407 priv->tx_conf_en = 0;
2410 /* Allocate memory for hardware structure for queues */
2411 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2413 DPAA2_PMD_ERR("Queue allocation Failed");
2417 /* Allocate memory for storing MAC addresses.
2418 * Table of mac_filter_entries size is allocated so that RTE ether lib
2419 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2421 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2422 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2423 if (eth_dev->data->mac_addrs == NULL) {
2425 "Failed to allocate %d bytes needed to store MAC addresses",
2426 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2431 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]);
2433 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2434 rte_free(eth_dev->data->mac_addrs);
2435 eth_dev->data->mac_addrs = NULL;
2439 /* ... tx buffer layout ... */
2440 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2441 if (priv->tx_conf_en) {
2442 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2443 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2444 layout.pass_timestamp = true;
2446 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2448 layout.pass_frame_status = 1;
2449 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2450 DPNI_QUEUE_TX, &layout);
2452 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2456 /* ... tx-conf and error buffer layout ... */
2457 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2458 if (priv->tx_conf_en) {
2459 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2460 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2461 layout.pass_timestamp = true;
2463 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2465 layout.pass_frame_status = 1;
2466 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2467 DPNI_QUEUE_TX_CONFIRM, &layout);
2469 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2474 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2476 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2477 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2478 DPAA2_PMD_INFO("Loopback mode");
2479 } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2480 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2481 DPAA2_PMD_INFO("No Prefetch mode");
2483 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2485 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2487 /*Init fields w.r.t. classficaition*/
2488 memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
2489 priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2490 if (!priv->extract.qos_extract_param) {
2491 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2492 " classificaiton ", ret);
2495 for (i = 0; i < MAX_TCS; i++) {
2496 memset(&priv->extract.fs_key_cfg[i], 0,
2497 sizeof(struct dpkg_profile_cfg));
2498 priv->extract.fs_extract_param[i] =
2499 (size_t)rte_malloc(NULL, 256, 64);
2500 if (!priv->extract.fs_extract_param[i]) {
2501 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2507 ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2508 RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2511 DPAA2_PMD_ERR("Unable to set mtu. check config");
2515 /*TODO To enable soft parser support DPAA2 driver needs to integrate
2516 * with external entity to receive byte code for software sequence
2517 * and same will be offload to the H/W using MC interface.
2518 * Currently it is assumed that DPAA2 driver has byte code by some
2519 * mean and same if offloaded to H/W.
2521 if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2522 WRIOP_SS_INITIALIZER(priv);
2523 ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2525 DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2530 ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2533 DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2538 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2541 dpaa2_dev_uninit(eth_dev);
2546 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2548 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2549 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private;
2552 PMD_INIT_FUNC_TRACE();
2554 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2558 DPAA2_PMD_WARN("Already closed or not started");
2562 dpaa2_dev_close(eth_dev);
2564 dpaa2_free_rx_tx_queues(eth_dev);
2566 /* Close the device at underlying layer*/
2567 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2570 "Failure closing dpni device with err code %d",
2574 /* Free the allocated memory for ethernet private data and dpni*/
2576 eth_dev->process_private = NULL;
2579 for (i = 0; i < MAX_TCS; i++) {
2580 if (priv->extract.fs_extract_param[i])
2581 rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
2584 if (priv->extract.qos_extract_param)
2585 rte_free((void *)(size_t)priv->extract.qos_extract_param);
2587 eth_dev->dev_ops = NULL;
2588 eth_dev->rx_pkt_burst = NULL;
2589 eth_dev->tx_pkt_burst = NULL;
2591 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2596 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2597 struct rte_dpaa2_device *dpaa2_dev)
2599 struct rte_eth_dev *eth_dev;
2600 struct dpaa2_dev_priv *dev_priv;
2603 if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2604 RTE_PKTMBUF_HEADROOM) {
2606 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2607 RTE_PKTMBUF_HEADROOM,
2608 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2613 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2614 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2617 dev_priv = rte_zmalloc("ethdev private structure",
2618 sizeof(struct dpaa2_dev_priv),
2619 RTE_CACHE_LINE_SIZE);
2620 if (dev_priv == NULL) {
2622 "Unable to allocate memory for private data");
2623 rte_eth_dev_release_port(eth_dev);
2626 eth_dev->data->dev_private = (void *)dev_priv;
2627 /* Store a pointer to eth_dev in dev_private */
2628 dev_priv->eth_dev = eth_dev;
2629 dev_priv->tx_conf_en = 0;
2631 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2633 DPAA2_PMD_DEBUG("returning enodev");
2638 eth_dev->device = &dpaa2_dev->device;
2640 dpaa2_dev->eth_dev = eth_dev;
2641 eth_dev->data->rx_mbuf_alloc_failed = 0;
2643 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2644 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2646 /* Invoke PMD device initialization function */
2647 diag = dpaa2_dev_init(eth_dev);
2649 rte_eth_dev_probing_finish(eth_dev);
2653 rte_eth_dev_release_port(eth_dev);
2658 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2660 struct rte_eth_dev *eth_dev;
2662 eth_dev = dpaa2_dev->eth_dev;
2663 dpaa2_dev_uninit(eth_dev);
2665 rte_eth_dev_release_port(eth_dev);
2670 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2671 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2672 .drv_type = DPAA2_ETH,
2673 .probe = rte_dpaa2_probe,
2674 .remove = rte_dpaa2_remove,
2677 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2678 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2679 DRIVER_LOOPBACK_MODE "=<int> "
2680 DRIVER_NO_PREFETCH_MODE "=<int>");
2681 RTE_INIT(dpaa2_pmd_init_log)
2683 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2684 if (dpaa2_logtype_pmd >= 0)
2685 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);