1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2020 NXP
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
23 #include <rte_debug.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_malloc.h>
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <dpaa_flow.h>
43 #include <rte_pmd_dpaa.h>
50 #include <fmlib/fm_ext.h>
52 /* Supported Rx offloads */
53 static uint64_t dev_rx_offloads_sup =
54 DEV_RX_OFFLOAD_JUMBO_FRAME |
55 DEV_RX_OFFLOAD_SCATTER;
57 /* Rx offloads which cannot be disabled */
58 static uint64_t dev_rx_offloads_nodis =
59 DEV_RX_OFFLOAD_IPV4_CKSUM |
60 DEV_RX_OFFLOAD_UDP_CKSUM |
61 DEV_RX_OFFLOAD_TCP_CKSUM |
62 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
63 DEV_RX_OFFLOAD_RSS_HASH;
65 /* Supported Tx offloads */
66 static uint64_t dev_tx_offloads_sup =
67 DEV_TX_OFFLOAD_MT_LOCKFREE |
68 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
70 /* Tx offloads which cannot be disabled */
71 static uint64_t dev_tx_offloads_nodis =
72 DEV_TX_OFFLOAD_IPV4_CKSUM |
73 DEV_TX_OFFLOAD_UDP_CKSUM |
74 DEV_TX_OFFLOAD_TCP_CKSUM |
75 DEV_TX_OFFLOAD_SCTP_CKSUM |
76 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
77 DEV_TX_OFFLOAD_MULTI_SEGS;
79 /* Keep track of whether QMAN and BMAN have been globally initialized */
80 static int is_global_init;
81 static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
82 static int default_q; /* use default queue - FMC is not executed*/
83 /* At present we only allow up to 4 push mode queues as default - as each of
84 * this queue need dedicated portal and we are short of portals.
86 #define DPAA_MAX_PUSH_MODE_QUEUE 8
87 #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
89 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
90 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
93 /* Per RX FQ Taildrop in frame count */
94 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
96 /* Per TX FQ Taildrop in frame count, disabled by default */
97 static unsigned int td_tx_threshold;
99 struct rte_dpaa_xstats_name_off {
100 char name[RTE_ETH_XSTATS_NAME_SIZE];
104 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
106 offsetof(struct dpaa_if_stats, raln)},
108 offsetof(struct dpaa_if_stats, rxpf)},
110 offsetof(struct dpaa_if_stats, rfcs)},
112 offsetof(struct dpaa_if_stats, rvlan)},
114 offsetof(struct dpaa_if_stats, rerr)},
116 offsetof(struct dpaa_if_stats, rdrp)},
118 offsetof(struct dpaa_if_stats, rund)},
120 offsetof(struct dpaa_if_stats, rovr)},
122 offsetof(struct dpaa_if_stats, rfrg)},
124 offsetof(struct dpaa_if_stats, txpf)},
126 offsetof(struct dpaa_if_stats, terr)},
128 offsetof(struct dpaa_if_stats, tvlan)},
130 offsetof(struct dpaa_if_stats, tund)},
133 static struct rte_dpaa_driver rte_dpaa_pmd;
136 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
138 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
139 int wait_to_complete __rte_unused);
141 static void dpaa_interrupt_handler(void *param);
144 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
146 memset(opts, 0, sizeof(struct qm_mcc_initfq));
147 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
148 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
149 QM_FQCTRL_PREFERINCACHE;
150 opts->fqd.context_a.stashing.exclusive = 0;
151 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
152 opts->fqd.context_a.stashing.annotation_cl =
153 DPAA_IF_RX_ANNOTATION_STASH;
154 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
155 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
159 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
161 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
163 uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
165 PMD_INIT_FUNC_TRACE();
167 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
170 * Refuse mtu that requires the support of scattered packets
171 * when this feature has not been enabled before.
173 if (dev->data->min_rx_buf_size &&
174 !dev->data->scattered_rx && frame_size > buffsz) {
175 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
179 /* check <seg size> * <max_seg> >= max_frame */
180 if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
181 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
182 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
183 buffsz * DPAA_SGT_MAX_ENTRIES);
187 if (frame_size > RTE_ETHER_MAX_LEN)
188 dev->data->dev_conf.rxmode.offloads |=
189 DEV_RX_OFFLOAD_JUMBO_FRAME;
191 dev->data->dev_conf.rxmode.offloads &=
192 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
194 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
196 fman_if_set_maxfrm(dev->process_private, frame_size);
202 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
204 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
205 uint64_t rx_offloads = eth_conf->rxmode.offloads;
206 uint64_t tx_offloads = eth_conf->txmode.offloads;
207 struct rte_device *rdev = dev->device;
208 struct rte_dpaa_device *dpaa_dev;
209 struct fman_if *fif = dev->process_private;
210 struct __fman_if *__fif;
211 struct rte_intr_handle *intr_handle;
214 PMD_INIT_FUNC_TRACE();
216 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
217 intr_handle = &dpaa_dev->intr_handle;
218 __fif = container_of(fif, struct __fman_if, __if);
220 /* Rx offloads which are enabled by default */
221 if (dev_rx_offloads_nodis & ~rx_offloads) {
223 "Some of rx offloads enabled by default - requested 0x%" PRIx64
224 " fixed are 0x%" PRIx64,
225 rx_offloads, dev_rx_offloads_nodis);
228 /* Tx offloads which are enabled by default */
229 if (dev_tx_offloads_nodis & ~tx_offloads) {
231 "Some of tx offloads enabled by default - requested 0x%" PRIx64
232 " fixed are 0x%" PRIx64,
233 tx_offloads, dev_tx_offloads_nodis);
236 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
239 DPAA_PMD_DEBUG("enabling jumbo");
241 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
243 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
245 DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
247 dev->data->dev_conf.rxmode.max_rx_pkt_len,
248 DPAA_MAX_RX_PKT_LEN);
249 max_len = DPAA_MAX_RX_PKT_LEN;
252 fman_if_set_maxfrm(dev->process_private, max_len);
253 dev->data->mtu = max_len
254 - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
257 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
258 DPAA_PMD_DEBUG("enabling scatter mode");
259 fman_if_set_sg(dev->process_private, 1);
260 dev->data->scattered_rx = 1;
263 if (!(default_q || fmc_q)) {
264 if (dpaa_fm_config(dev,
265 eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
266 dpaa_write_fm_config_to_file();
267 DPAA_PMD_ERR("FM port configuration: Failed\n");
270 dpaa_write_fm_config_to_file();
273 /* if the interrupts were configured on this devices*/
274 if (intr_handle && intr_handle->fd) {
275 if (dev->data->dev_conf.intr_conf.lsc != 0)
276 rte_intr_callback_register(intr_handle,
277 dpaa_interrupt_handler,
280 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
282 if (dev->data->dev_conf.intr_conf.lsc != 0) {
283 rte_intr_callback_unregister(intr_handle,
284 dpaa_interrupt_handler,
287 printf("Failed to enable interrupt: Not Supported\n");
289 printf("Failed to enable interrupt\n");
291 dev->data->dev_conf.intr_conf.lsc = 0;
292 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
298 static const uint32_t *
299 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
301 static const uint32_t ptypes[] = {
303 RTE_PTYPE_L2_ETHER_VLAN,
304 RTE_PTYPE_L2_ETHER_ARP,
305 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
306 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
316 PMD_INIT_FUNC_TRACE();
318 if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
323 static void dpaa_interrupt_handler(void *param)
325 struct rte_eth_dev *dev = param;
326 struct rte_device *rdev = dev->device;
327 struct rte_dpaa_device *dpaa_dev;
328 struct rte_intr_handle *intr_handle;
332 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
333 intr_handle = &dpaa_dev->intr_handle;
335 bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
337 DPAA_PMD_ERR("Error reading eventfd\n");
338 dpaa_eth_link_update(dev, 0);
339 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
342 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
344 struct dpaa_if *dpaa_intf = dev->data->dev_private;
346 PMD_INIT_FUNC_TRACE();
348 if (!(default_q || fmc_q))
349 dpaa_write_fm_config_to_file();
351 /* Change tx callback to the real one */
352 if (dpaa_intf->cgr_tx)
353 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
355 dev->tx_pkt_burst = dpaa_eth_queue_tx;
357 fman_if_enable_rx(dev->process_private);
362 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
364 struct fman_if *fif = dev->process_private;
366 PMD_INIT_FUNC_TRACE();
368 if (!fif->is_shared_mac)
369 fman_if_disable_rx(fif);
370 dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
373 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
375 struct fman_if *fif = dev->process_private;
376 struct __fman_if *__fif;
377 struct rte_device *rdev = dev->device;
378 struct rte_dpaa_device *dpaa_dev;
379 struct rte_intr_handle *intr_handle;
380 struct dpaa_if *dpaa_intf = dev->data->dev_private;
383 PMD_INIT_FUNC_TRACE();
385 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
389 DPAA_PMD_WARN("Already closed or not started");
393 /* DPAA FM deconfig */
394 if (!(default_q || fmc_q)) {
395 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
396 DPAA_PMD_WARN("DPAA FM deconfig failed\n");
399 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
400 intr_handle = &dpaa_dev->intr_handle;
401 __fif = container_of(fif, struct __fman_if, __if);
403 dpaa_eth_dev_stop(dev);
405 if (intr_handle && intr_handle->fd &&
406 dev->data->dev_conf.intr_conf.lsc != 0) {
407 dpaa_intr_disable(__fif->node_name);
408 rte_intr_callback_unregister(intr_handle,
409 dpaa_interrupt_handler,
413 /* release configuration memory */
414 if (dpaa_intf->fc_conf)
415 rte_free(dpaa_intf->fc_conf);
417 /* Release RX congestion Groups */
418 if (dpaa_intf->cgr_rx) {
419 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
420 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
422 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
423 dpaa_intf->nb_rx_queues);
426 rte_free(dpaa_intf->cgr_rx);
427 dpaa_intf->cgr_rx = NULL;
428 /* Release TX congestion Groups */
429 if (dpaa_intf->cgr_tx) {
430 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
431 qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
433 qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,
435 rte_free(dpaa_intf->cgr_tx);
436 dpaa_intf->cgr_tx = NULL;
439 rte_free(dpaa_intf->rx_queues);
440 dpaa_intf->rx_queues = NULL;
442 rte_free(dpaa_intf->tx_queues);
443 dpaa_intf->tx_queues = NULL;
446 dev->rx_pkt_burst = NULL;
447 dev->tx_pkt_burst = NULL;
453 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
458 FILE *svr_file = NULL;
459 unsigned int svr_ver = 0;
461 PMD_INIT_FUNC_TRACE();
463 svr_file = fopen(DPAA_SOC_ID_FILE, "r");
465 DPAA_PMD_ERR("Unable to open SoC device");
466 return -ENOTSUP; /* Not supported on this infra */
468 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
469 dpaa_svr_family = svr_ver & SVR_MASK;
471 DPAA_PMD_ERR("Unable to read SoC device");
475 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
476 svr_ver, fman_ip_rev);
477 ret += 1; /* add the size of '\0' */
479 if (fw_size < (uint32_t)ret)
485 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
486 struct rte_eth_dev_info *dev_info)
488 struct dpaa_if *dpaa_intf = dev->data->dev_private;
489 struct fman_if *fif = dev->process_private;
491 DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
493 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
494 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
495 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
496 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
497 dev_info->max_hash_mac_addrs = 0;
498 dev_info->max_vfs = 0;
499 dev_info->max_vmdq_pools = ETH_16_POOLS;
500 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
502 if (fif->mac_type == fman_mac_1g) {
503 dev_info->speed_capa = ETH_LINK_SPEED_1G;
504 } else if (fif->mac_type == fman_mac_2_5g) {
505 dev_info->speed_capa = ETH_LINK_SPEED_1G
506 | ETH_LINK_SPEED_2_5G;
507 } else if (fif->mac_type == fman_mac_10g) {
508 dev_info->speed_capa = ETH_LINK_SPEED_1G
509 | ETH_LINK_SPEED_2_5G
510 | ETH_LINK_SPEED_10G;
512 DPAA_PMD_ERR("invalid link_speed: %s, %d",
513 dpaa_intf->name, fif->mac_type);
517 dev_info->rx_offload_capa = dev_rx_offloads_sup |
518 dev_rx_offloads_nodis;
519 dev_info->tx_offload_capa = dev_tx_offloads_sup |
520 dev_tx_offloads_nodis;
521 dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
522 dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
523 dev_info->default_rxportconf.nb_queues = 1;
524 dev_info->default_txportconf.nb_queues = 1;
525 dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
526 dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
532 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
533 __rte_unused uint16_t queue_id,
534 struct rte_eth_burst_mode *mode)
536 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
539 const struct burst_info {
542 } rx_offload_map[] = {
543 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
544 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
545 {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
546 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
547 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
548 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
549 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
552 /* Update Rx offload info */
553 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
554 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
555 snprintf(mode->info, sizeof(mode->info), "%s",
556 rx_offload_map[i].output);
565 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
566 __rte_unused uint16_t queue_id,
567 struct rte_eth_burst_mode *mode)
569 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
572 const struct burst_info {
575 } tx_offload_map[] = {
576 {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
577 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
578 {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
579 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
580 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
581 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
582 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
583 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
586 /* Update Tx offload info */
587 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
588 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
589 snprintf(mode->info, sizeof(mode->info), "%s",
590 tx_offload_map[i].output);
598 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
599 int wait_to_complete __rte_unused)
601 struct dpaa_if *dpaa_intf = dev->data->dev_private;
602 struct rte_eth_link *link = &dev->data->dev_link;
603 struct fman_if *fif = dev->process_private;
604 struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
607 PMD_INIT_FUNC_TRACE();
609 if (fif->mac_type == fman_mac_1g)
610 link->link_speed = ETH_SPEED_NUM_1G;
611 else if (fif->mac_type == fman_mac_2_5g)
612 link->link_speed = ETH_SPEED_NUM_2_5G;
613 else if (fif->mac_type == fman_mac_10g)
614 link->link_speed = ETH_SPEED_NUM_10G;
616 DPAA_PMD_ERR("invalid link_speed: %s, %d",
617 dpaa_intf->name, fif->mac_type);
619 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
620 ret = dpaa_get_link_status(__fif->node_name);
623 link->link_status = ret;
625 link->link_status = dpaa_intf->valid;
628 link->link_duplex = ETH_LINK_FULL_DUPLEX;
629 link->link_autoneg = ETH_LINK_AUTONEG;
631 DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
632 link->link_status ? "Up" : "Down");
636 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
637 struct rte_eth_stats *stats)
639 PMD_INIT_FUNC_TRACE();
641 fman_if_stats_get(dev->process_private, stats);
645 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
647 PMD_INIT_FUNC_TRACE();
649 fman_if_stats_reset(dev->process_private);
655 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
658 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
659 uint64_t values[sizeof(struct dpaa_if_stats) / 8];
667 fman_if_stats_get_all(dev->process_private, values,
668 sizeof(struct dpaa_if_stats) / 8);
670 for (i = 0; i < num; i++) {
672 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
678 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
679 struct rte_eth_xstat_name *xstats_names,
682 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
684 if (limit < stat_cnt)
687 if (xstats_names != NULL)
688 for (i = 0; i < stat_cnt; i++)
689 strlcpy(xstats_names[i].name,
690 dpaa_xstats_strings[i].name,
691 sizeof(xstats_names[i].name));
697 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
698 uint64_t *values, unsigned int n)
700 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
701 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
710 fman_if_stats_get_all(dev->process_private, values_copy,
711 sizeof(struct dpaa_if_stats) / 8);
713 for (i = 0; i < stat_cnt; i++)
715 values_copy[dpaa_xstats_strings[i].offset / 8];
720 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
722 for (i = 0; i < n; i++) {
723 if (ids[i] >= stat_cnt) {
724 DPAA_PMD_ERR("id value isn't valid");
727 values[i] = values_copy[ids[i]];
733 dpaa_xstats_get_names_by_id(
734 struct rte_eth_dev *dev,
735 struct rte_eth_xstat_name *xstats_names,
739 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
740 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
743 return dpaa_xstats_get_names(dev, xstats_names, limit);
745 dpaa_xstats_get_names(dev, xstats_names_copy, limit);
747 for (i = 0; i < limit; i++) {
748 if (ids[i] >= stat_cnt) {
749 DPAA_PMD_ERR("id value isn't valid");
752 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
757 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
759 PMD_INIT_FUNC_TRACE();
761 fman_if_promiscuous_enable(dev->process_private);
766 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
768 PMD_INIT_FUNC_TRACE();
770 fman_if_promiscuous_disable(dev->process_private);
775 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
777 PMD_INIT_FUNC_TRACE();
779 fman_if_set_mcast_filter_table(dev->process_private);
784 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
786 PMD_INIT_FUNC_TRACE();
788 fman_if_reset_mcast_filter_table(dev->process_private);
793 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
795 struct dpaa_if *dpaa_intf = dev->data->dev_private;
796 struct fman_if_ic_params icp;
800 memset(&icp, 0, sizeof(icp));
801 /* set ICEOF for to the default value , which is 0*/
802 icp.iciof = DEFAULT_ICIOF;
803 icp.iceof = DEFAULT_RX_ICEOF;
804 icp.icsz = DEFAULT_ICSZ;
805 fman_if_set_ic_params(dev->process_private, &icp);
807 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
808 fman_if_set_fdoff(dev->process_private, fd_offset);
810 /* Buffer pool size should be equal to Dataroom Size*/
811 bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
813 fman_if_set_bp(dev->process_private,
814 dpaa_intf->bp_info->mp->size,
815 dpaa_intf->bp_info->bpid, bp_size);
818 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
819 int8_t vsp_id, uint32_t bpid)
821 struct dpaa_if *dpaa_intf = dev->data->dev_private;
822 struct fman_if *fif = dev->process_private;
824 if (fif->num_profiles) {
826 vsp_id = fif->base_profile_id;
832 if (dpaa_intf->vsp_bpid[vsp_id] &&
833 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
834 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
843 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
845 unsigned int socket_id __rte_unused,
846 const struct rte_eth_rxconf *rx_conf,
847 struct rte_mempool *mp)
849 struct dpaa_if *dpaa_intf = dev->data->dev_private;
850 struct fman_if *fif = dev->process_private;
851 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
852 struct qm_mcc_initfq opts = {0};
855 u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
857 PMD_INIT_FUNC_TRACE();
859 if (queue_idx >= dev->data->nb_rx_queues) {
860 rte_errno = EOVERFLOW;
861 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
862 (void *)dev, queue_idx, dev->data->nb_rx_queues);
866 /* Rx deferred start is not supported */
867 if (rx_conf->rx_deferred_start) {
868 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
871 rxq->nb_desc = UINT16_MAX;
872 rxq->offloads = rx_conf->offloads;
874 DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
875 queue_idx, rxq->fqid);
877 if (!fif->num_profiles) {
878 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
879 dpaa_intf->bp_info->mp != mp) {
880 DPAA_PMD_WARN("Multiple pools on same interface not"
885 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
886 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
891 /* Max packet can fit in single buffer */
892 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
894 } else if (dev->data->dev_conf.rxmode.offloads &
895 DEV_RX_OFFLOAD_SCATTER) {
896 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
897 buffsz * DPAA_SGT_MAX_ENTRIES) {
898 DPAA_PMD_ERR("max RxPkt size %d too big to fit "
900 dev->data->dev_conf.rxmode.max_rx_pkt_len,
901 buffsz * DPAA_SGT_MAX_ENTRIES);
902 rte_errno = EOVERFLOW;
906 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
907 " larger than a single mbuf (%u) and scattered"
908 " mode has not been requested",
909 dev->data->dev_conf.rxmode.max_rx_pkt_len,
910 buffsz - RTE_PKTMBUF_HEADROOM);
913 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
915 /* For shared interface, it's done in kernel, skip.*/
916 if (!fif->is_shared_mac)
917 dpaa_fman_if_pool_setup(dev);
919 if (fif->num_profiles) {
920 int8_t vsp_id = rxq->vsp_id;
923 ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
924 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
927 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
931 DPAA_PMD_INFO("Base profile is associated to"
932 " RXQ fqid:%d\r\n", rxq->fqid);
933 if (fif->is_shared_mac) {
934 DPAA_PMD_ERR("Fatal: Base profile is associated"
935 " to shared interface on DPDK.");
938 dpaa_intf->vsp_bpid[fif->base_profile_id] =
939 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
942 dpaa_intf->vsp_bpid[0] =
943 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
946 dpaa_intf->valid = 1;
947 DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
948 fman_if_get_sg_enable(fif),
949 dev->data->dev_conf.rxmode.max_rx_pkt_len);
950 /* checking if push mode only, no error check for now */
951 if (!rxq->is_static &&
952 dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
953 struct qman_portal *qp;
956 dpaa_push_queue_idx++;
957 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
958 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
959 QM_FQCTRL_CTXASTASHING |
960 QM_FQCTRL_PREFERINCACHE;
961 opts.fqd.context_a.stashing.exclusive = 0;
962 /* In muticore scenario stashing becomes a bottleneck on LS1046.
963 * So do not enable stashing in this case
965 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
966 opts.fqd.context_a.stashing.annotation_cl =
967 DPAA_IF_RX_ANNOTATION_STASH;
968 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
969 opts.fqd.context_a.stashing.context_cl =
970 DPAA_IF_RX_CONTEXT_STASH;
972 /*Create a channel and associate given queue with the channel*/
973 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
974 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
975 opts.fqd.dest.channel = rxq->ch_id;
976 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
977 flags = QMAN_INITFQ_FLAG_SCHED;
979 /* Configure tail drop */
980 if (dpaa_intf->cgr_rx) {
981 opts.we_mask |= QM_INITFQ_WE_CGID;
982 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
983 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
985 ret = qman_init_fq(rxq, flags, &opts);
987 DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
988 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
991 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
992 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
994 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
995 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
998 rxq->is_static = true;
1000 /* Allocate qman specific portals */
1001 qp = fsl_qman_fq_portal_create(&q_fd);
1003 DPAA_PMD_ERR("Unable to alloc fq portal");
1008 /* Set up the device interrupt handler */
1009 if (!dev->intr_handle) {
1010 struct rte_dpaa_device *dpaa_dev;
1011 struct rte_device *rdev = dev->device;
1013 dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1015 dev->intr_handle = &dpaa_dev->intr_handle;
1016 dev->intr_handle->intr_vec = rte_zmalloc(NULL,
1017 dpaa_push_mode_max_queue, 0);
1018 if (!dev->intr_handle->intr_vec) {
1019 DPAA_PMD_ERR("intr_vec alloc failed");
1022 dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
1023 dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
1026 dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
1027 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
1028 dev->intr_handle->efds[queue_idx] = q_fd;
1031 rxq->bp_array = rte_dpaa_bpid_info;
1032 dev->data->rx_queues[queue_idx] = rxq;
1034 /* configure the CGR size as per the desc size */
1035 if (dpaa_intf->cgr_rx) {
1036 struct qm_mcc_initcgr cgr_opts = {0};
1038 rxq->nb_desc = nb_desc;
1039 /* Enable tail drop with cgr on this queue */
1040 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1041 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1044 "rx taildrop modify fail on fqid %d (ret=%d)",
1048 /* Enable main queue to receive error packets also by default */
1049 fman_if_set_err_fqid(fif, rxq->fqid);
1054 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1055 int eth_rx_queue_id,
1057 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1061 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1062 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1063 struct qm_mcc_initfq opts = {0};
1065 if (dpaa_push_mode_max_queue)
1066 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1067 "PUSH mode already enabled for first %d queues.\n"
1068 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1069 dpaa_push_mode_max_queue);
1071 dpaa_poll_queue_default_config(&opts);
1073 switch (queue_conf->ev.sched_type) {
1074 case RTE_SCHED_TYPE_ATOMIC:
1075 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1076 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1077 * configuration with HOLD_ACTIVE setting
1079 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1080 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1082 case RTE_SCHED_TYPE_ORDERED:
1083 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1086 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1087 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1091 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1092 opts.fqd.dest.channel = ch_id;
1093 opts.fqd.dest.wq = queue_conf->ev.priority;
1095 if (dpaa_intf->cgr_rx) {
1096 opts.we_mask |= QM_INITFQ_WE_CGID;
1097 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1098 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1101 flags = QMAN_INITFQ_FLAG_SCHED;
1103 ret = qman_init_fq(rxq, flags, &opts);
1105 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1106 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1110 /* copy configuration which needs to be filled during dequeue */
1111 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1112 dev->data->rx_queues[eth_rx_queue_id] = rxq;
1118 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1119 int eth_rx_queue_id)
1121 struct qm_mcc_initfq opts;
1124 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1125 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1127 dpaa_poll_queue_default_config(&opts);
1129 if (dpaa_intf->cgr_rx) {
1130 opts.we_mask |= QM_INITFQ_WE_CGID;
1131 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1132 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1135 ret = qman_init_fq(rxq, flags, &opts);
1137 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1141 rxq->cb.dqrr_dpdk_cb = NULL;
1142 dev->data->rx_queues[eth_rx_queue_id] = NULL;
1148 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
1150 PMD_INIT_FUNC_TRACE();
1154 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1155 uint16_t nb_desc __rte_unused,
1156 unsigned int socket_id __rte_unused,
1157 const struct rte_eth_txconf *tx_conf)
1159 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1160 struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
1162 PMD_INIT_FUNC_TRACE();
1164 /* Tx deferred start is not supported */
1165 if (tx_conf->tx_deferred_start) {
1166 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1169 txq->nb_desc = UINT16_MAX;
1170 txq->offloads = tx_conf->offloads;
1172 if (queue_idx >= dev->data->nb_tx_queues) {
1173 rte_errno = EOVERFLOW;
1174 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1175 (void *)dev, queue_idx, dev->data->nb_tx_queues);
1179 DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1180 queue_idx, txq->fqid);
1181 dev->data->tx_queues[queue_idx] = txq;
1186 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
1188 PMD_INIT_FUNC_TRACE();
1192 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1194 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1195 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
1198 PMD_INIT_FUNC_TRACE();
1200 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1201 DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
1202 rx_queue_id, frm_cnt);
1207 static int dpaa_link_down(struct rte_eth_dev *dev)
1209 struct fman_if *fif = dev->process_private;
1210 struct __fman_if *__fif;
1212 PMD_INIT_FUNC_TRACE();
1214 __fif = container_of(fif, struct __fman_if, __if);
1216 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1217 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1219 dpaa_eth_dev_stop(dev);
1223 static int dpaa_link_up(struct rte_eth_dev *dev)
1225 struct fman_if *fif = dev->process_private;
1226 struct __fman_if *__fif;
1228 PMD_INIT_FUNC_TRACE();
1230 __fif = container_of(fif, struct __fman_if, __if);
1232 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1233 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1235 dpaa_eth_dev_start(dev);
1240 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1241 struct rte_eth_fc_conf *fc_conf)
1243 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1244 struct rte_eth_fc_conf *net_fc;
1246 PMD_INIT_FUNC_TRACE();
1248 if (!(dpaa_intf->fc_conf)) {
1249 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1250 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1251 if (!dpaa_intf->fc_conf) {
1252 DPAA_PMD_ERR("unable to save flow control info");
1256 net_fc = dpaa_intf->fc_conf;
1258 if (fc_conf->high_water < fc_conf->low_water) {
1259 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1263 if (fc_conf->mode == RTE_FC_NONE) {
1265 } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
1266 fc_conf->mode == RTE_FC_FULL) {
1267 fman_if_set_fc_threshold(dev->process_private,
1268 fc_conf->high_water,
1270 dpaa_intf->bp_info->bpid);
1271 if (fc_conf->pause_time)
1272 fman_if_set_fc_quanta(dev->process_private,
1273 fc_conf->pause_time);
1276 /* Save the information in dpaa device */
1277 net_fc->pause_time = fc_conf->pause_time;
1278 net_fc->high_water = fc_conf->high_water;
1279 net_fc->low_water = fc_conf->low_water;
1280 net_fc->send_xon = fc_conf->send_xon;
1281 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1282 net_fc->mode = fc_conf->mode;
1283 net_fc->autoneg = fc_conf->autoneg;
1289 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1290 struct rte_eth_fc_conf *fc_conf)
1292 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1293 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1296 PMD_INIT_FUNC_TRACE();
1299 fc_conf->pause_time = net_fc->pause_time;
1300 fc_conf->high_water = net_fc->high_water;
1301 fc_conf->low_water = net_fc->low_water;
1302 fc_conf->send_xon = net_fc->send_xon;
1303 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1304 fc_conf->mode = net_fc->mode;
1305 fc_conf->autoneg = net_fc->autoneg;
1308 ret = fman_if_get_fc_threshold(dev->process_private);
1310 fc_conf->mode = RTE_FC_TX_PAUSE;
1311 fc_conf->pause_time =
1312 fman_if_get_fc_quanta(dev->process_private);
1314 fc_conf->mode = RTE_FC_NONE;
1321 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1322 struct rte_ether_addr *addr,
1324 __rte_unused uint32_t pool)
1328 PMD_INIT_FUNC_TRACE();
1330 ret = fman_if_add_mac_addr(dev->process_private,
1331 addr->addr_bytes, index);
1334 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1339 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1342 PMD_INIT_FUNC_TRACE();
1344 fman_if_clear_mac_addr(dev->process_private, index);
1348 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1349 struct rte_ether_addr *addr)
1353 PMD_INIT_FUNC_TRACE();
1355 ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1357 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1363 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1364 struct rte_eth_rss_conf *rss_conf)
1366 struct rte_eth_dev_data *data = dev->data;
1367 struct rte_eth_conf *eth_conf = &data->dev_conf;
1369 PMD_INIT_FUNC_TRACE();
1371 if (!(default_q || fmc_q)) {
1372 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1373 DPAA_PMD_ERR("FM port configuration: Failed\n");
1376 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1378 DPAA_PMD_ERR("Function not supported\n");
1385 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1386 struct rte_eth_rss_conf *rss_conf)
1388 struct rte_eth_dev_data *data = dev->data;
1389 struct rte_eth_conf *eth_conf = &data->dev_conf;
1391 /* dpaa does not support rss_key, so length should be 0*/
1392 rss_conf->rss_key_len = 0;
1393 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1397 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1400 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1401 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1403 if (!rxq->is_static)
1406 return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1409 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1412 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1413 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1417 if (!rxq->is_static)
1420 qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1422 temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1423 if (temp1 != sizeof(temp))
1424 DPAA_PMD_ERR("irq read error");
1426 qman_fq_portal_thread_irq(rxq->qp);
1432 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1433 struct rte_eth_rxq_info *qinfo)
1435 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1436 struct qman_fq *rxq;
1438 rxq = dev->data->rx_queues[queue_id];
1440 qinfo->mp = dpaa_intf->bp_info->mp;
1441 qinfo->scattered_rx = dev->data->scattered_rx;
1442 qinfo->nb_desc = rxq->nb_desc;
1443 qinfo->conf.rx_free_thresh = 1;
1444 qinfo->conf.rx_drop_en = 1;
1445 qinfo->conf.rx_deferred_start = 0;
1446 qinfo->conf.offloads = rxq->offloads;
1450 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1451 struct rte_eth_txq_info *qinfo)
1453 struct qman_fq *txq;
1455 txq = dev->data->tx_queues[queue_id];
1457 qinfo->nb_desc = txq->nb_desc;
1458 qinfo->conf.tx_thresh.pthresh = 0;
1459 qinfo->conf.tx_thresh.hthresh = 0;
1460 qinfo->conf.tx_thresh.wthresh = 0;
1462 qinfo->conf.tx_free_thresh = 0;
1463 qinfo->conf.tx_rs_thresh = 0;
1464 qinfo->conf.offloads = txq->offloads;
1465 qinfo->conf.tx_deferred_start = 0;
1468 static struct eth_dev_ops dpaa_devops = {
1469 .dev_configure = dpaa_eth_dev_configure,
1470 .dev_start = dpaa_eth_dev_start,
1471 .dev_stop = dpaa_eth_dev_stop,
1472 .dev_close = dpaa_eth_dev_close,
1473 .dev_infos_get = dpaa_eth_dev_info,
1474 .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1476 .rx_queue_setup = dpaa_eth_rx_queue_setup,
1477 .tx_queue_setup = dpaa_eth_tx_queue_setup,
1478 .rx_queue_release = dpaa_eth_rx_queue_release,
1479 .tx_queue_release = dpaa_eth_tx_queue_release,
1480 .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
1481 .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
1482 .rxq_info_get = dpaa_rxq_info_get,
1483 .txq_info_get = dpaa_txq_info_get,
1485 .flow_ctrl_get = dpaa_flow_ctrl_get,
1486 .flow_ctrl_set = dpaa_flow_ctrl_set,
1488 .link_update = dpaa_eth_link_update,
1489 .stats_get = dpaa_eth_stats_get,
1490 .xstats_get = dpaa_dev_xstats_get,
1491 .xstats_get_by_id = dpaa_xstats_get_by_id,
1492 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
1493 .xstats_get_names = dpaa_xstats_get_names,
1494 .xstats_reset = dpaa_eth_stats_reset,
1495 .stats_reset = dpaa_eth_stats_reset,
1496 .promiscuous_enable = dpaa_eth_promiscuous_enable,
1497 .promiscuous_disable = dpaa_eth_promiscuous_disable,
1498 .allmulticast_enable = dpaa_eth_multicast_enable,
1499 .allmulticast_disable = dpaa_eth_multicast_disable,
1500 .mtu_set = dpaa_mtu_set,
1501 .dev_set_link_down = dpaa_link_down,
1502 .dev_set_link_up = dpaa_link_up,
1503 .mac_addr_add = dpaa_dev_add_mac_addr,
1504 .mac_addr_remove = dpaa_dev_remove_mac_addr,
1505 .mac_addr_set = dpaa_dev_set_mac_addr,
1507 .fw_version_get = dpaa_fw_version_get,
1509 .rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
1510 .rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
1511 .rss_hash_update = dpaa_dev_rss_hash_update,
1512 .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
1516 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1518 if (strcmp(dev->device->driver->name,
1526 is_dpaa_supported(struct rte_eth_dev *dev)
1528 return is_device_supported(dev, &rte_dpaa_pmd);
1532 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1534 struct rte_eth_dev *dev;
1536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1538 dev = &rte_eth_devices[port];
1540 if (!is_dpaa_supported(dev))
1544 fman_if_loopback_enable(dev->process_private);
1546 fman_if_loopback_disable(dev->process_private);
1551 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1552 struct fman_if *fman_intf)
1554 struct rte_eth_fc_conf *fc_conf;
1557 PMD_INIT_FUNC_TRACE();
1559 if (!(dpaa_intf->fc_conf)) {
1560 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1561 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1562 if (!dpaa_intf->fc_conf) {
1563 DPAA_PMD_ERR("unable to save flow control info");
1567 fc_conf = dpaa_intf->fc_conf;
1568 ret = fman_if_get_fc_threshold(fman_intf);
1570 fc_conf->mode = RTE_FC_TX_PAUSE;
1571 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1573 fc_conf->mode = RTE_FC_NONE;
1579 /* Initialise an Rx FQ */
1580 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1583 struct qm_mcc_initfq opts = {0};
1585 u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1586 struct qm_mcc_initcgr cgr_opts = {
1587 .we_mask = QM_CGR_WE_CS_THRES |
1591 .cstd_en = QM_CGR_EN,
1592 .mode = QMAN_CGR_MODE_FRAME
1596 if (fmc_q || default_q) {
1597 ret = qman_reserve_fqid(fqid);
1599 DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
1605 DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1606 ret = qman_create_fq(fqid, flags, fq);
1608 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1612 fq->is_static = false;
1614 dpaa_poll_queue_default_config(&opts);
1617 /* Enable tail drop with cgr on this queue */
1618 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1620 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1624 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1628 opts.we_mask |= QM_INITFQ_WE_CGID;
1629 opts.fqd.cgid = cgr_rx->cgrid;
1630 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1633 ret = qman_init_fq(fq, 0, &opts);
1635 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1639 /* Initialise a Tx FQ */
1640 static int dpaa_tx_queue_init(struct qman_fq *fq,
1641 struct fman_if *fman_intf,
1642 struct qman_cgr *cgr_tx)
1644 struct qm_mcc_initfq opts = {0};
1645 struct qm_mcc_initcgr cgr_opts = {
1646 .we_mask = QM_CGR_WE_CS_THRES |
1650 .cstd_en = QM_CGR_EN,
1651 .mode = QMAN_CGR_MODE_FRAME
1656 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1657 QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1659 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1662 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1663 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1664 opts.fqd.dest.channel = fman_intf->tx_channel_id;
1665 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1666 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1667 opts.fqd.context_b = 0;
1668 /* no tx-confirmation */
1669 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1670 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1671 DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1674 /* Enable tail drop with cgr on this queue */
1675 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1676 td_tx_threshold, 0);
1678 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1682 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1686 opts.we_mask |= QM_INITFQ_WE_CGID;
1687 opts.fqd.cgid = cgr_tx->cgrid;
1688 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1689 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1693 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1695 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1699 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1700 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1701 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1703 struct qm_mcc_initfq opts = {0};
1706 PMD_INIT_FUNC_TRACE();
1708 ret = qman_reserve_fqid(fqid);
1710 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1714 /* "map" this Rx FQ to one of the interfaces Tx FQID */
1715 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1716 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1718 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1722 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1723 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1724 ret = qman_init_fq(fq, 0, &opts);
1726 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1732 /* Initialise a network interface */
1734 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1736 struct rte_dpaa_device *dpaa_device;
1737 struct fm_eth_port_cfg *cfg;
1738 struct dpaa_if *dpaa_intf;
1739 struct fman_if *fman_intf;
1742 PMD_INIT_FUNC_TRACE();
1744 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1745 dev_id = dpaa_device->id.dev_id;
1746 cfg = dpaa_get_eth_port_cfg(dev_id);
1747 fman_intf = cfg->fman_if;
1748 eth_dev->process_private = fman_intf;
1750 /* Plugging of UCODE burst API not supported in Secondary */
1751 dpaa_intf = eth_dev->data->dev_private;
1752 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1753 if (dpaa_intf->cgr_tx)
1754 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1756 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1757 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1758 qman_set_fq_lookup_table(
1759 dpaa_intf->rx_queues->qman_fq_lookup_table);
1765 /* Initialise a network interface */
1767 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1769 int num_rx_fqs, fqid;
1772 struct rte_dpaa_device *dpaa_device;
1773 struct dpaa_if *dpaa_intf;
1774 struct fm_eth_port_cfg *cfg;
1775 struct fman_if *fman_intf;
1776 struct fman_if_bpool *bp, *tmp_bp;
1777 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1778 uint32_t cgrid_tx[MAX_DPAA_CORES];
1779 uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1780 int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1783 PMD_INIT_FUNC_TRACE();
1785 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1786 dev_id = dpaa_device->id.dev_id;
1787 dpaa_intf = eth_dev->data->dev_private;
1788 cfg = dpaa_get_eth_port_cfg(dev_id);
1789 fman_intf = cfg->fman_if;
1791 dpaa_intf->name = dpaa_device->name;
1793 /* save fman_if & cfg in the interface struture */
1794 eth_dev->process_private = fman_intf;
1795 dpaa_intf->ifid = dev_id;
1796 dpaa_intf->cfg = cfg;
1798 memset((char *)dev_rx_fqids, 0,
1799 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1801 memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1803 /* Initialize Rx FQ's */
1805 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1807 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1809 DPAA_MAX_NUM_PCD_QUEUES);
1810 if (num_rx_fqs < 0) {
1811 DPAA_PMD_ERR("%s FMC initializes failed!",
1816 DPAA_PMD_WARN("%s is not configured by FMC.",
1820 /* FMCLESS mode, load balance to multiple cores.*/
1821 num_rx_fqs = rte_lcore_count();
1824 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1827 if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1828 DPAA_PMD_ERR("Invalid number of RX queues\n");
1832 if (num_rx_fqs > 0) {
1833 dpaa_intf->rx_queues = rte_zmalloc(NULL,
1834 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1835 if (!dpaa_intf->rx_queues) {
1836 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1840 dpaa_intf->rx_queues = NULL;
1843 memset(cgrid, 0, sizeof(cgrid));
1844 memset(cgrid_tx, 0, sizeof(cgrid_tx));
1846 /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1847 * Tx tail drop is disabled.
1849 if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1850 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1851 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1853 /* if a very large value is being configured */
1854 if (td_tx_threshold > UINT16_MAX)
1855 td_tx_threshold = CGR_RX_PERFQ_THRESH;
1858 /* If congestion control is enabled globally*/
1859 if (num_rx_fqs > 0 && td_threshold) {
1860 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1861 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1862 if (!dpaa_intf->cgr_rx) {
1863 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1868 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1869 if (ret != num_rx_fqs) {
1870 DPAA_PMD_WARN("insufficient CGRIDs available");
1875 dpaa_intf->cgr_rx = NULL;
1878 if (!fmc_q && !default_q) {
1879 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1882 DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1887 for (loop = 0; loop < num_rx_fqs; loop++) {
1891 fqid = dev_rx_fqids[loop];
1893 vsp_id = dev_vspids[loop];
1895 if (dpaa_intf->cgr_rx)
1896 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1898 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1899 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1903 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
1904 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1906 dpaa_intf->nb_rx_queues = num_rx_fqs;
1908 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1909 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1910 MAX_DPAA_CORES, MAX_CACHELINE);
1911 if (!dpaa_intf->tx_queues) {
1912 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1917 /* If congestion control is enabled globally*/
1918 if (td_tx_threshold) {
1919 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
1920 sizeof(struct qman_cgr) * MAX_DPAA_CORES,
1922 if (!dpaa_intf->cgr_tx) {
1923 DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
1928 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
1930 if (ret != MAX_DPAA_CORES) {
1931 DPAA_PMD_WARN("insufficient CGRIDs available");
1936 dpaa_intf->cgr_tx = NULL;
1940 for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
1941 if (dpaa_intf->cgr_tx)
1942 dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
1944 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1946 dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
1949 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1951 dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
1953 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1954 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
1955 [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1957 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
1960 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1961 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
1962 [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1964 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
1967 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1970 DPAA_PMD_DEBUG("All frame queues created");
1972 /* Get the initial configuration for flow control */
1973 dpaa_fc_set_default(dpaa_intf, fman_intf);
1975 /* reset bpool list, initialize bpool dynamically */
1976 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1977 list_del(&bp->node);
1981 /* Populate ethdev structure */
1982 eth_dev->dev_ops = &dpaa_devops;
1983 eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
1984 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1985 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1987 /* Allocate memory for storing MAC addresses */
1988 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1989 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1990 if (eth_dev->data->mac_addrs == NULL) {
1991 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1992 "store MAC addresses",
1993 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1998 /* copy the primary mac address */
1999 rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
2001 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
2003 fman_intf->mac_addr.addr_bytes[0],
2004 fman_intf->mac_addr.addr_bytes[1],
2005 fman_intf->mac_addr.addr_bytes[2],
2006 fman_intf->mac_addr.addr_bytes[3],
2007 fman_intf->mac_addr.addr_bytes[4],
2008 fman_intf->mac_addr.addr_bytes[5]);
2010 if (!fman_intf->is_shared_mac) {
2011 /* Configure error packet handling */
2012 fman_if_receive_rx_errors(fman_intf,
2013 FM_FD_RX_STATUS_ERR_MASK);
2014 /* Disable RX mode */
2015 fman_if_disable_rx(fman_intf);
2016 /* Disable promiscuous mode */
2017 fman_if_promiscuous_disable(fman_intf);
2018 /* Disable multicast */
2019 fman_if_reset_mcast_filter_table(fman_intf);
2020 /* Reset interface statistics */
2021 fman_if_stats_reset(fman_intf);
2022 /* Disable SG by default */
2023 fman_if_set_sg(fman_intf, 0);
2024 fman_if_set_maxfrm(fman_intf,
2025 RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2031 rte_free(dpaa_intf->tx_queues);
2032 dpaa_intf->tx_queues = NULL;
2033 dpaa_intf->nb_tx_queues = 0;
2036 rte_free(dpaa_intf->cgr_rx);
2037 rte_free(dpaa_intf->cgr_tx);
2038 rte_free(dpaa_intf->rx_queues);
2039 dpaa_intf->rx_queues = NULL;
2040 dpaa_intf->nb_rx_queues = 0;
2045 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2046 struct rte_dpaa_device *dpaa_dev)
2050 struct rte_eth_dev *eth_dev;
2052 PMD_INIT_FUNC_TRACE();
2054 if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
2055 RTE_PKTMBUF_HEADROOM) {
2057 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
2058 RTE_PKTMBUF_HEADROOM,
2059 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
2064 /* In case of secondary process, the device is already configured
2065 * and no further action is required, except portal initialization
2066 * and verifying secondary attachment to port name.
2068 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2069 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2072 eth_dev->device = &dpaa_dev->device;
2073 eth_dev->dev_ops = &dpaa_devops;
2075 ret = dpaa_dev_init_secondary(eth_dev);
2077 RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2081 rte_eth_dev_probing_finish(eth_dev);
2085 if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2086 if (access("/tmp/fmc.bin", F_OK) == -1) {
2087 DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2091 if (!(default_q || fmc_q)) {
2092 if (dpaa_fm_init()) {
2093 DPAA_PMD_ERR("FM init failed\n");
2098 /* disabling the default push mode for LS1043 */
2099 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2100 dpaa_push_mode_max_queue = 0;
2102 /* if push mode queues to be enabled. Currenly we are allowing
2103 * only one queue per thread.
2105 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2106 dpaa_push_mode_max_queue =
2107 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2108 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2109 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2115 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2116 ret = rte_dpaa_portal_init((void *)1);
2118 DPAA_PMD_ERR("Unable to initialize portal");
2123 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2127 eth_dev->data->dev_private =
2128 rte_zmalloc("ethdev private structure",
2129 sizeof(struct dpaa_if),
2130 RTE_CACHE_LINE_SIZE);
2131 if (!eth_dev->data->dev_private) {
2132 DPAA_PMD_ERR("Cannot allocate memzone for port data");
2133 rte_eth_dev_release_port(eth_dev);
2137 eth_dev->device = &dpaa_dev->device;
2138 dpaa_dev->eth_dev = eth_dev;
2140 qman_ern_register_cb(dpaa_free_mbuf);
2142 if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2143 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2145 /* Invoke PMD device initialization function */
2146 diag = dpaa_dev_init(eth_dev);
2148 rte_eth_dev_probing_finish(eth_dev);
2152 rte_eth_dev_release_port(eth_dev);
2157 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2159 struct rte_eth_dev *eth_dev;
2162 PMD_INIT_FUNC_TRACE();
2164 eth_dev = dpaa_dev->eth_dev;
2165 dpaa_eth_dev_close(eth_dev);
2166 ret = rte_eth_dev_release_port(eth_dev);
2171 static void __attribute__((destructor(102))) dpaa_finish(void)
2173 /* For secondary, primary will do all the cleanup */
2174 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2177 if (!(default_q || fmc_q)) {
2180 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2181 if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2182 struct rte_eth_dev *dev = &rte_eth_devices[i];
2183 struct dpaa_if *dpaa_intf =
2184 dev->data->dev_private;
2185 struct fman_if *fif =
2186 dev->process_private;
2187 if (dpaa_intf->port_handle)
2188 if (dpaa_fm_deconfig(dpaa_intf, fif))
2189 DPAA_PMD_WARN("DPAA FM "
2190 "deconfig failed\n");
2191 if (fif->num_profiles) {
2192 if (dpaa_port_vsp_cleanup(dpaa_intf,
2194 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2200 DPAA_PMD_WARN("DPAA FM term failed\n");
2204 DPAA_PMD_INFO("DPAA fman cleaned up");
2208 static struct rte_dpaa_driver rte_dpaa_pmd = {
2209 .drv_flags = RTE_DPAA_DRV_INTR_LSC,
2210 .drv_type = FSL_DPAA_ETH,
2211 .probe = rte_dpaa_probe,
2212 .remove = rte_dpaa_remove,
2215 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2216 RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE);