1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2020 NXP
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
23 #include <rte_debug.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <ethdev_driver.h>
33 #include <rte_malloc.h>
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <dpaa_flow.h>
43 #include <rte_pmd_dpaa.h>
50 #include <fmlib/fm_ext.h>
52 #define CHECK_INTERVAL 100 /* 100ms */
53 #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */
55 /* Supported Rx offloads */
56 static uint64_t dev_rx_offloads_sup =
57 DEV_RX_OFFLOAD_JUMBO_FRAME |
58 DEV_RX_OFFLOAD_SCATTER;
60 /* Rx offloads which cannot be disabled */
61 static uint64_t dev_rx_offloads_nodis =
62 DEV_RX_OFFLOAD_IPV4_CKSUM |
63 DEV_RX_OFFLOAD_UDP_CKSUM |
64 DEV_RX_OFFLOAD_TCP_CKSUM |
65 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_RSS_HASH;
68 /* Supported Tx offloads */
69 static uint64_t dev_tx_offloads_sup =
70 DEV_TX_OFFLOAD_MT_LOCKFREE |
71 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
73 /* Tx offloads which cannot be disabled */
74 static uint64_t dev_tx_offloads_nodis =
75 DEV_TX_OFFLOAD_IPV4_CKSUM |
76 DEV_TX_OFFLOAD_UDP_CKSUM |
77 DEV_TX_OFFLOAD_TCP_CKSUM |
78 DEV_TX_OFFLOAD_SCTP_CKSUM |
79 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
80 DEV_TX_OFFLOAD_MULTI_SEGS;
82 /* Keep track of whether QMAN and BMAN have been globally initialized */
83 static int is_global_init;
84 static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
85 static int default_q; /* use default queue - FMC is not executed*/
86 /* At present we only allow up to 4 push mode queues as default - as each of
87 * this queue need dedicated portal and we are short of portals.
89 #define DPAA_MAX_PUSH_MODE_QUEUE 8
90 #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
92 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
93 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
96 /* Per RX FQ Taildrop in frame count */
97 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
99 /* Per TX FQ Taildrop in frame count, disabled by default */
100 static unsigned int td_tx_threshold;
102 struct rte_dpaa_xstats_name_off {
103 char name[RTE_ETH_XSTATS_NAME_SIZE];
107 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
109 offsetof(struct dpaa_if_stats, raln)},
111 offsetof(struct dpaa_if_stats, rxpf)},
113 offsetof(struct dpaa_if_stats, rfcs)},
115 offsetof(struct dpaa_if_stats, rvlan)},
117 offsetof(struct dpaa_if_stats, rerr)},
119 offsetof(struct dpaa_if_stats, rdrp)},
121 offsetof(struct dpaa_if_stats, rund)},
123 offsetof(struct dpaa_if_stats, rovr)},
125 offsetof(struct dpaa_if_stats, rfrg)},
127 offsetof(struct dpaa_if_stats, txpf)},
129 offsetof(struct dpaa_if_stats, terr)},
131 offsetof(struct dpaa_if_stats, tvlan)},
133 offsetof(struct dpaa_if_stats, tund)},
136 static struct rte_dpaa_driver rte_dpaa_pmd;
139 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
141 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
142 int wait_to_complete __rte_unused);
144 static void dpaa_interrupt_handler(void *param);
147 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
149 memset(opts, 0, sizeof(struct qm_mcc_initfq));
150 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
151 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
152 QM_FQCTRL_PREFERINCACHE;
153 opts->fqd.context_a.stashing.exclusive = 0;
154 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
155 opts->fqd.context_a.stashing.annotation_cl =
156 DPAA_IF_RX_ANNOTATION_STASH;
157 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
158 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
162 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
164 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
166 uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
168 PMD_INIT_FUNC_TRACE();
170 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
173 * Refuse mtu that requires the support of scattered packets
174 * when this feature has not been enabled before.
176 if (dev->data->min_rx_buf_size &&
177 !dev->data->scattered_rx && frame_size > buffsz) {
178 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
182 /* check <seg size> * <max_seg> >= max_frame */
183 if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
184 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
185 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
186 buffsz * DPAA_SGT_MAX_ENTRIES);
190 if (mtu > RTE_ETHER_MTU)
191 dev->data->dev_conf.rxmode.offloads |=
192 DEV_RX_OFFLOAD_JUMBO_FRAME;
194 dev->data->dev_conf.rxmode.offloads &=
195 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
197 fman_if_set_maxfrm(dev->process_private, frame_size);
203 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
205 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
206 uint64_t rx_offloads = eth_conf->rxmode.offloads;
207 uint64_t tx_offloads = eth_conf->txmode.offloads;
208 struct rte_device *rdev = dev->device;
209 struct rte_eth_link *link = &dev->data->dev_link;
210 struct rte_dpaa_device *dpaa_dev;
211 struct fman_if *fif = dev->process_private;
212 struct __fman_if *__fif;
213 struct rte_intr_handle *intr_handle;
214 uint32_t max_rx_pktlen;
218 PMD_INIT_FUNC_TRACE();
220 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
221 intr_handle = &dpaa_dev->intr_handle;
222 __fif = container_of(fif, struct __fman_if, __if);
224 /* Rx offloads which are enabled by default */
225 if (dev_rx_offloads_nodis & ~rx_offloads) {
227 "Some of rx offloads enabled by default - requested 0x%" PRIx64
228 " fixed are 0x%" PRIx64,
229 rx_offloads, dev_rx_offloads_nodis);
232 /* Tx offloads which are enabled by default */
233 if (dev_tx_offloads_nodis & ~tx_offloads) {
235 "Some of tx offloads enabled by default - requested 0x%" PRIx64
236 " fixed are 0x%" PRIx64,
237 tx_offloads, dev_tx_offloads_nodis);
240 max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
241 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
242 if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
243 DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
245 max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
246 max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
249 fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
251 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
252 DPAA_PMD_DEBUG("enabling scatter mode");
253 fman_if_set_sg(dev->process_private, 1);
254 dev->data->scattered_rx = 1;
257 if (!(default_q || fmc_q)) {
258 if (dpaa_fm_config(dev,
259 eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
260 dpaa_write_fm_config_to_file();
261 DPAA_PMD_ERR("FM port configuration: Failed\n");
264 dpaa_write_fm_config_to_file();
267 /* if the interrupts were configured on this devices*/
268 if (intr_handle && intr_handle->fd) {
269 if (dev->data->dev_conf.intr_conf.lsc != 0)
270 rte_intr_callback_register(intr_handle,
271 dpaa_interrupt_handler,
274 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
276 if (dev->data->dev_conf.intr_conf.lsc != 0) {
277 rte_intr_callback_unregister(intr_handle,
278 dpaa_interrupt_handler,
281 printf("Failed to enable interrupt: Not Supported\n");
283 printf("Failed to enable interrupt\n");
285 dev->data->dev_conf.intr_conf.lsc = 0;
286 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
290 /* Wait for link status to get updated */
291 if (!link->link_status)
294 /* Configure link only if link is UP*/
295 if (link->link_status) {
296 if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
297 /* Start autoneg only if link is not in autoneg mode */
298 if (!link->link_autoneg)
299 dpaa_restart_link_autoneg(__fif->node_name);
300 } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
301 switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
302 case ETH_LINK_SPEED_10M_HD:
303 speed = ETH_SPEED_NUM_10M;
304 duplex = ETH_LINK_HALF_DUPLEX;
306 case ETH_LINK_SPEED_10M:
307 speed = ETH_SPEED_NUM_10M;
308 duplex = ETH_LINK_FULL_DUPLEX;
310 case ETH_LINK_SPEED_100M_HD:
311 speed = ETH_SPEED_NUM_100M;
312 duplex = ETH_LINK_HALF_DUPLEX;
314 case ETH_LINK_SPEED_100M:
315 speed = ETH_SPEED_NUM_100M;
316 duplex = ETH_LINK_FULL_DUPLEX;
318 case ETH_LINK_SPEED_1G:
319 speed = ETH_SPEED_NUM_1G;
320 duplex = ETH_LINK_FULL_DUPLEX;
322 case ETH_LINK_SPEED_2_5G:
323 speed = ETH_SPEED_NUM_2_5G;
324 duplex = ETH_LINK_FULL_DUPLEX;
326 case ETH_LINK_SPEED_10G:
327 speed = ETH_SPEED_NUM_10G;
328 duplex = ETH_LINK_FULL_DUPLEX;
331 speed = ETH_SPEED_NUM_NONE;
332 duplex = ETH_LINK_FULL_DUPLEX;
336 dpaa_update_link_speed(__fif->node_name, speed, duplex);
338 /* Manual autoneg - custom advertisement speed. */
339 printf("Custom Advertisement speeds not supported\n");
346 static const uint32_t *
347 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
349 static const uint32_t ptypes[] = {
351 RTE_PTYPE_L2_ETHER_VLAN,
352 RTE_PTYPE_L2_ETHER_ARP,
353 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
354 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
364 PMD_INIT_FUNC_TRACE();
366 if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
371 static void dpaa_interrupt_handler(void *param)
373 struct rte_eth_dev *dev = param;
374 struct rte_device *rdev = dev->device;
375 struct rte_dpaa_device *dpaa_dev;
376 struct rte_intr_handle *intr_handle;
380 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
381 intr_handle = &dpaa_dev->intr_handle;
383 bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
385 DPAA_PMD_ERR("Error reading eventfd\n");
386 dpaa_eth_link_update(dev, 0);
387 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
390 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
392 struct dpaa_if *dpaa_intf = dev->data->dev_private;
394 PMD_INIT_FUNC_TRACE();
396 if (!(default_q || fmc_q))
397 dpaa_write_fm_config_to_file();
399 /* Change tx callback to the real one */
400 if (dpaa_intf->cgr_tx)
401 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
403 dev->tx_pkt_burst = dpaa_eth_queue_tx;
405 fman_if_enable_rx(dev->process_private);
410 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
412 struct fman_if *fif = dev->process_private;
414 PMD_INIT_FUNC_TRACE();
415 dev->data->dev_started = 0;
417 if (!fif->is_shared_mac)
418 fman_if_disable_rx(fif);
419 dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
424 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
426 struct fman_if *fif = dev->process_private;
427 struct __fman_if *__fif;
428 struct rte_device *rdev = dev->device;
429 struct rte_dpaa_device *dpaa_dev;
430 struct rte_intr_handle *intr_handle;
431 struct rte_eth_link *link = &dev->data->dev_link;
432 struct dpaa_if *dpaa_intf = dev->data->dev_private;
436 PMD_INIT_FUNC_TRACE();
438 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
442 DPAA_PMD_WARN("Already closed or not started");
446 /* DPAA FM deconfig */
447 if (!(default_q || fmc_q)) {
448 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
449 DPAA_PMD_WARN("DPAA FM deconfig failed\n");
452 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
453 intr_handle = &dpaa_dev->intr_handle;
454 __fif = container_of(fif, struct __fman_if, __if);
456 ret = dpaa_eth_dev_stop(dev);
458 /* Reset link to autoneg */
459 if (link->link_status && !link->link_autoneg)
460 dpaa_restart_link_autoneg(__fif->node_name);
462 if (intr_handle && intr_handle->fd &&
463 dev->data->dev_conf.intr_conf.lsc != 0) {
464 dpaa_intr_disable(__fif->node_name);
465 rte_intr_callback_unregister(intr_handle,
466 dpaa_interrupt_handler,
470 /* release configuration memory */
471 if (dpaa_intf->fc_conf)
472 rte_free(dpaa_intf->fc_conf);
474 /* Release RX congestion Groups */
475 if (dpaa_intf->cgr_rx) {
476 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
477 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
480 rte_free(dpaa_intf->cgr_rx);
481 dpaa_intf->cgr_rx = NULL;
482 /* Release TX congestion Groups */
483 if (dpaa_intf->cgr_tx) {
484 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
485 qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
486 rte_free(dpaa_intf->cgr_tx);
487 dpaa_intf->cgr_tx = NULL;
490 rte_free(dpaa_intf->rx_queues);
491 dpaa_intf->rx_queues = NULL;
493 rte_free(dpaa_intf->tx_queues);
494 dpaa_intf->tx_queues = NULL;
500 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
505 FILE *svr_file = NULL;
506 unsigned int svr_ver = 0;
508 PMD_INIT_FUNC_TRACE();
510 svr_file = fopen(DPAA_SOC_ID_FILE, "r");
512 DPAA_PMD_ERR("Unable to open SoC device");
513 return -ENOTSUP; /* Not supported on this infra */
515 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
516 dpaa_svr_family = svr_ver & SVR_MASK;
518 DPAA_PMD_ERR("Unable to read SoC device");
522 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
523 svr_ver, fman_ip_rev);
527 ret += 1; /* add the size of '\0' */
528 if (fw_size < (size_t)ret)
534 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
535 struct rte_eth_dev_info *dev_info)
537 struct dpaa_if *dpaa_intf = dev->data->dev_private;
538 struct fman_if *fif = dev->process_private;
540 DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
542 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
543 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
544 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
545 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
546 dev_info->max_hash_mac_addrs = 0;
547 dev_info->max_vfs = 0;
548 dev_info->max_vmdq_pools = ETH_16_POOLS;
549 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
551 if (fif->mac_type == fman_mac_1g) {
552 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
554 | ETH_LINK_SPEED_100M_HD
555 | ETH_LINK_SPEED_100M
557 } else if (fif->mac_type == fman_mac_2_5g) {
558 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
560 | ETH_LINK_SPEED_100M_HD
561 | ETH_LINK_SPEED_100M
563 | ETH_LINK_SPEED_2_5G;
564 } else if (fif->mac_type == fman_mac_10g) {
565 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
567 | ETH_LINK_SPEED_100M_HD
568 | ETH_LINK_SPEED_100M
570 | ETH_LINK_SPEED_2_5G
571 | ETH_LINK_SPEED_10G;
573 DPAA_PMD_ERR("invalid link_speed: %s, %d",
574 dpaa_intf->name, fif->mac_type);
578 dev_info->rx_offload_capa = dev_rx_offloads_sup |
579 dev_rx_offloads_nodis;
580 dev_info->tx_offload_capa = dev_tx_offloads_sup |
581 dev_tx_offloads_nodis;
582 dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
583 dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
584 dev_info->default_rxportconf.nb_queues = 1;
585 dev_info->default_txportconf.nb_queues = 1;
586 dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
587 dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
593 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
594 __rte_unused uint16_t queue_id,
595 struct rte_eth_burst_mode *mode)
597 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
600 const struct burst_info {
603 } rx_offload_map[] = {
604 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
605 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
606 {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
607 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
608 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
609 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
610 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
613 /* Update Rx offload info */
614 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
615 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
616 snprintf(mode->info, sizeof(mode->info), "%s",
617 rx_offload_map[i].output);
626 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
627 __rte_unused uint16_t queue_id,
628 struct rte_eth_burst_mode *mode)
630 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
633 const struct burst_info {
636 } tx_offload_map[] = {
637 {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
638 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
639 {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
640 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
641 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
642 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
643 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
644 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
647 /* Update Tx offload info */
648 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
649 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
650 snprintf(mode->info, sizeof(mode->info), "%s",
651 tx_offload_map[i].output);
659 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
660 int wait_to_complete)
662 struct dpaa_if *dpaa_intf = dev->data->dev_private;
663 struct rte_eth_link *link = &dev->data->dev_link;
664 struct fman_if *fif = dev->process_private;
665 struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
666 int ret, ioctl_version;
669 PMD_INIT_FUNC_TRACE();
671 ioctl_version = dpaa_get_ioctl_version_number();
673 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
674 for (count = 0; count <= MAX_REPEAT_TIME; count++) {
675 ret = dpaa_get_link_status(__fif->node_name, link);
678 if (link->link_status == ETH_LINK_DOWN &&
680 rte_delay_ms(CHECK_INTERVAL);
685 link->link_status = dpaa_intf->valid;
688 if (ioctl_version < 2) {
689 link->link_duplex = ETH_LINK_FULL_DUPLEX;
690 link->link_autoneg = ETH_LINK_AUTONEG;
692 if (fif->mac_type == fman_mac_1g)
693 link->link_speed = ETH_SPEED_NUM_1G;
694 else if (fif->mac_type == fman_mac_2_5g)
695 link->link_speed = ETH_SPEED_NUM_2_5G;
696 else if (fif->mac_type == fman_mac_10g)
697 link->link_speed = ETH_SPEED_NUM_10G;
699 DPAA_PMD_ERR("invalid link_speed: %s, %d",
700 dpaa_intf->name, fif->mac_type);
703 DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
704 link->link_status ? "Up" : "Down");
708 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
709 struct rte_eth_stats *stats)
711 PMD_INIT_FUNC_TRACE();
713 fman_if_stats_get(dev->process_private, stats);
717 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
719 PMD_INIT_FUNC_TRACE();
721 fman_if_stats_reset(dev->process_private);
727 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
730 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
731 uint64_t values[sizeof(struct dpaa_if_stats) / 8];
739 fman_if_stats_get_all(dev->process_private, values,
740 sizeof(struct dpaa_if_stats) / 8);
742 for (i = 0; i < num; i++) {
744 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
750 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
751 struct rte_eth_xstat_name *xstats_names,
754 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
756 if (limit < stat_cnt)
759 if (xstats_names != NULL)
760 for (i = 0; i < stat_cnt; i++)
761 strlcpy(xstats_names[i].name,
762 dpaa_xstats_strings[i].name,
763 sizeof(xstats_names[i].name));
769 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
770 uint64_t *values, unsigned int n)
772 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
773 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
782 fman_if_stats_get_all(dev->process_private, values_copy,
783 sizeof(struct dpaa_if_stats) / 8);
785 for (i = 0; i < stat_cnt; i++)
787 values_copy[dpaa_xstats_strings[i].offset / 8];
792 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
794 for (i = 0; i < n; i++) {
795 if (ids[i] >= stat_cnt) {
796 DPAA_PMD_ERR("id value isn't valid");
799 values[i] = values_copy[ids[i]];
805 dpaa_xstats_get_names_by_id(
806 struct rte_eth_dev *dev,
808 struct rte_eth_xstat_name *xstats_names,
811 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
812 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
815 return dpaa_xstats_get_names(dev, xstats_names, limit);
817 dpaa_xstats_get_names(dev, xstats_names_copy, limit);
819 for (i = 0; i < limit; i++) {
820 if (ids[i] >= stat_cnt) {
821 DPAA_PMD_ERR("id value isn't valid");
824 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
829 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
831 PMD_INIT_FUNC_TRACE();
833 fman_if_promiscuous_enable(dev->process_private);
838 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
840 PMD_INIT_FUNC_TRACE();
842 fman_if_promiscuous_disable(dev->process_private);
847 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
849 PMD_INIT_FUNC_TRACE();
851 fman_if_set_mcast_filter_table(dev->process_private);
856 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
858 PMD_INIT_FUNC_TRACE();
860 fman_if_reset_mcast_filter_table(dev->process_private);
865 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
867 struct dpaa_if *dpaa_intf = dev->data->dev_private;
868 struct fman_if_ic_params icp;
872 memset(&icp, 0, sizeof(icp));
873 /* set ICEOF for to the default value , which is 0*/
874 icp.iciof = DEFAULT_ICIOF;
875 icp.iceof = DEFAULT_RX_ICEOF;
876 icp.icsz = DEFAULT_ICSZ;
877 fman_if_set_ic_params(dev->process_private, &icp);
879 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
880 fman_if_set_fdoff(dev->process_private, fd_offset);
882 /* Buffer pool size should be equal to Dataroom Size*/
883 bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
885 fman_if_set_bp(dev->process_private,
886 dpaa_intf->bp_info->mp->size,
887 dpaa_intf->bp_info->bpid, bp_size);
890 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
891 int8_t vsp_id, uint32_t bpid)
893 struct dpaa_if *dpaa_intf = dev->data->dev_private;
894 struct fman_if *fif = dev->process_private;
896 if (fif->num_profiles) {
898 vsp_id = fif->base_profile_id;
904 if (dpaa_intf->vsp_bpid[vsp_id] &&
905 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
906 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
915 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
917 unsigned int socket_id __rte_unused,
918 const struct rte_eth_rxconf *rx_conf,
919 struct rte_mempool *mp)
921 struct dpaa_if *dpaa_intf = dev->data->dev_private;
922 struct fman_if *fif = dev->process_private;
923 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
924 struct qm_mcc_initfq opts = {0};
927 u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
928 uint32_t max_rx_pktlen;
930 PMD_INIT_FUNC_TRACE();
932 if (queue_idx >= dev->data->nb_rx_queues) {
933 rte_errno = EOVERFLOW;
934 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
935 (void *)dev, queue_idx, dev->data->nb_rx_queues);
939 /* Rx deferred start is not supported */
940 if (rx_conf->rx_deferred_start) {
941 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
944 rxq->nb_desc = UINT16_MAX;
945 rxq->offloads = rx_conf->offloads;
947 DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
948 queue_idx, rxq->fqid);
950 if (!fif->num_profiles) {
951 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
952 dpaa_intf->bp_info->mp != mp) {
953 DPAA_PMD_WARN("Multiple pools on same interface not"
958 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
959 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
964 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
965 dpaa_intf->bp_info->mp != mp) {
966 DPAA_PMD_WARN("Multiple pools on same interface not supported");
970 max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
972 /* Max packet can fit in single buffer */
973 if (max_rx_pktlen <= buffsz) {
975 } else if (dev->data->dev_conf.rxmode.offloads &
976 DEV_RX_OFFLOAD_SCATTER) {
977 if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
978 DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
980 max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
981 rte_errno = EOVERFLOW;
985 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
986 " larger than a single mbuf (%u) and scattered"
987 " mode has not been requested",
988 max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
991 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
993 /* For shared interface, it's done in kernel, skip.*/
994 if (!fif->is_shared_mac)
995 dpaa_fman_if_pool_setup(dev);
997 if (fif->num_profiles) {
998 int8_t vsp_id = rxq->vsp_id;
1001 ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1002 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
1005 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1009 DPAA_PMD_INFO("Base profile is associated to"
1010 " RXQ fqid:%d\r\n", rxq->fqid);
1011 if (fif->is_shared_mac) {
1012 DPAA_PMD_ERR("Fatal: Base profile is associated"
1013 " to shared interface on DPDK.");
1016 dpaa_intf->vsp_bpid[fif->base_profile_id] =
1017 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1020 dpaa_intf->vsp_bpid[0] =
1021 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1024 dpaa_intf->valid = 1;
1025 DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
1026 fman_if_get_sg_enable(fif), max_rx_pktlen);
1027 /* checking if push mode only, no error check for now */
1028 if (!rxq->is_static &&
1029 dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
1030 struct qman_portal *qp;
1033 dpaa_push_queue_idx++;
1034 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1035 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
1036 QM_FQCTRL_CTXASTASHING |
1037 QM_FQCTRL_PREFERINCACHE;
1038 opts.fqd.context_a.stashing.exclusive = 0;
1039 /* In muticore scenario stashing becomes a bottleneck on LS1046.
1040 * So do not enable stashing in this case
1042 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1043 opts.fqd.context_a.stashing.annotation_cl =
1044 DPAA_IF_RX_ANNOTATION_STASH;
1045 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1046 opts.fqd.context_a.stashing.context_cl =
1047 DPAA_IF_RX_CONTEXT_STASH;
1049 /*Create a channel and associate given queue with the channel*/
1050 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
1051 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1052 opts.fqd.dest.channel = rxq->ch_id;
1053 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
1054 flags = QMAN_INITFQ_FLAG_SCHED;
1056 /* Configure tail drop */
1057 if (dpaa_intf->cgr_rx) {
1058 opts.we_mask |= QM_INITFQ_WE_CGID;
1059 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
1060 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1062 ret = qman_init_fq(rxq, flags, &opts);
1064 DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
1065 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1068 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
1069 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
1071 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1072 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
1075 rxq->is_static = true;
1077 /* Allocate qman specific portals */
1078 qp = fsl_qman_fq_portal_create(&q_fd);
1080 DPAA_PMD_ERR("Unable to alloc fq portal");
1085 /* Set up the device interrupt handler */
1086 if (!dev->intr_handle) {
1087 struct rte_dpaa_device *dpaa_dev;
1088 struct rte_device *rdev = dev->device;
1090 dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1092 dev->intr_handle = &dpaa_dev->intr_handle;
1093 dev->intr_handle->intr_vec = rte_zmalloc(NULL,
1094 dpaa_push_mode_max_queue, 0);
1095 if (!dev->intr_handle->intr_vec) {
1096 DPAA_PMD_ERR("intr_vec alloc failed");
1099 dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
1100 dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
1103 dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
1104 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
1105 dev->intr_handle->efds[queue_idx] = q_fd;
1108 rxq->bp_array = rte_dpaa_bpid_info;
1109 dev->data->rx_queues[queue_idx] = rxq;
1111 /* configure the CGR size as per the desc size */
1112 if (dpaa_intf->cgr_rx) {
1113 struct qm_mcc_initcgr cgr_opts = {0};
1115 rxq->nb_desc = nb_desc;
1116 /* Enable tail drop with cgr on this queue */
1117 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1118 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1121 "rx taildrop modify fail on fqid %d (ret=%d)",
1125 /* Enable main queue to receive error packets also by default */
1126 fman_if_set_err_fqid(fif, rxq->fqid);
1131 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1132 int eth_rx_queue_id,
1134 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1138 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1139 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1140 struct qm_mcc_initfq opts = {0};
1142 if (dpaa_push_mode_max_queue)
1143 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1144 "PUSH mode already enabled for first %d queues.\n"
1145 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1146 dpaa_push_mode_max_queue);
1148 dpaa_poll_queue_default_config(&opts);
1150 switch (queue_conf->ev.sched_type) {
1151 case RTE_SCHED_TYPE_ATOMIC:
1152 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1153 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1154 * configuration with HOLD_ACTIVE setting
1156 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1157 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1159 case RTE_SCHED_TYPE_ORDERED:
1160 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1163 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1164 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1168 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1169 opts.fqd.dest.channel = ch_id;
1170 opts.fqd.dest.wq = queue_conf->ev.priority;
1172 if (dpaa_intf->cgr_rx) {
1173 opts.we_mask |= QM_INITFQ_WE_CGID;
1174 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1175 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1178 flags = QMAN_INITFQ_FLAG_SCHED;
1180 ret = qman_init_fq(rxq, flags, &opts);
1182 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1183 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1187 /* copy configuration which needs to be filled during dequeue */
1188 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1189 dev->data->rx_queues[eth_rx_queue_id] = rxq;
1195 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1196 int eth_rx_queue_id)
1198 struct qm_mcc_initfq opts;
1201 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1202 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1204 dpaa_poll_queue_default_config(&opts);
1206 if (dpaa_intf->cgr_rx) {
1207 opts.we_mask |= QM_INITFQ_WE_CGID;
1208 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1209 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1212 ret = qman_init_fq(rxq, flags, &opts);
1214 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1218 rxq->cb.dqrr_dpdk_cb = NULL;
1219 dev->data->rx_queues[eth_rx_queue_id] = NULL;
1225 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1226 uint16_t nb_desc __rte_unused,
1227 unsigned int socket_id __rte_unused,
1228 const struct rte_eth_txconf *tx_conf)
1230 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1231 struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
1233 PMD_INIT_FUNC_TRACE();
1235 /* Tx deferred start is not supported */
1236 if (tx_conf->tx_deferred_start) {
1237 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1240 txq->nb_desc = UINT16_MAX;
1241 txq->offloads = tx_conf->offloads;
1243 if (queue_idx >= dev->data->nb_tx_queues) {
1244 rte_errno = EOVERFLOW;
1245 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1246 (void *)dev, queue_idx, dev->data->nb_tx_queues);
1250 DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1251 queue_idx, txq->fqid);
1252 dev->data->tx_queues[queue_idx] = txq;
1258 dpaa_dev_rx_queue_count(void *rx_queue)
1260 struct qman_fq *rxq = rx_queue;
1263 PMD_INIT_FUNC_TRACE();
1265 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1266 DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
1272 static int dpaa_link_down(struct rte_eth_dev *dev)
1274 struct fman_if *fif = dev->process_private;
1275 struct __fman_if *__fif;
1277 PMD_INIT_FUNC_TRACE();
1279 __fif = container_of(fif, struct __fman_if, __if);
1281 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1282 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1284 return dpaa_eth_dev_stop(dev);
1288 static int dpaa_link_up(struct rte_eth_dev *dev)
1290 struct fman_if *fif = dev->process_private;
1291 struct __fman_if *__fif;
1293 PMD_INIT_FUNC_TRACE();
1295 __fif = container_of(fif, struct __fman_if, __if);
1297 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1298 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1300 dpaa_eth_dev_start(dev);
1305 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1306 struct rte_eth_fc_conf *fc_conf)
1308 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1309 struct rte_eth_fc_conf *net_fc;
1311 PMD_INIT_FUNC_TRACE();
1313 if (!(dpaa_intf->fc_conf)) {
1314 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1315 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1316 if (!dpaa_intf->fc_conf) {
1317 DPAA_PMD_ERR("unable to save flow control info");
1321 net_fc = dpaa_intf->fc_conf;
1323 if (fc_conf->high_water < fc_conf->low_water) {
1324 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1328 if (fc_conf->mode == RTE_FC_NONE) {
1330 } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
1331 fc_conf->mode == RTE_FC_FULL) {
1332 fman_if_set_fc_threshold(dev->process_private,
1333 fc_conf->high_water,
1335 dpaa_intf->bp_info->bpid);
1336 if (fc_conf->pause_time)
1337 fman_if_set_fc_quanta(dev->process_private,
1338 fc_conf->pause_time);
1341 /* Save the information in dpaa device */
1342 net_fc->pause_time = fc_conf->pause_time;
1343 net_fc->high_water = fc_conf->high_water;
1344 net_fc->low_water = fc_conf->low_water;
1345 net_fc->send_xon = fc_conf->send_xon;
1346 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1347 net_fc->mode = fc_conf->mode;
1348 net_fc->autoneg = fc_conf->autoneg;
1354 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1355 struct rte_eth_fc_conf *fc_conf)
1357 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1358 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1361 PMD_INIT_FUNC_TRACE();
1364 fc_conf->pause_time = net_fc->pause_time;
1365 fc_conf->high_water = net_fc->high_water;
1366 fc_conf->low_water = net_fc->low_water;
1367 fc_conf->send_xon = net_fc->send_xon;
1368 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1369 fc_conf->mode = net_fc->mode;
1370 fc_conf->autoneg = net_fc->autoneg;
1373 ret = fman_if_get_fc_threshold(dev->process_private);
1375 fc_conf->mode = RTE_FC_TX_PAUSE;
1376 fc_conf->pause_time =
1377 fman_if_get_fc_quanta(dev->process_private);
1379 fc_conf->mode = RTE_FC_NONE;
1386 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1387 struct rte_ether_addr *addr,
1389 __rte_unused uint32_t pool)
1393 PMD_INIT_FUNC_TRACE();
1395 ret = fman_if_add_mac_addr(dev->process_private,
1396 addr->addr_bytes, index);
1399 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1404 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1407 PMD_INIT_FUNC_TRACE();
1409 fman_if_clear_mac_addr(dev->process_private, index);
1413 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1414 struct rte_ether_addr *addr)
1418 PMD_INIT_FUNC_TRACE();
1420 ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1422 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1428 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1429 struct rte_eth_rss_conf *rss_conf)
1431 struct rte_eth_dev_data *data = dev->data;
1432 struct rte_eth_conf *eth_conf = &data->dev_conf;
1434 PMD_INIT_FUNC_TRACE();
1436 if (!(default_q || fmc_q)) {
1437 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1438 DPAA_PMD_ERR("FM port configuration: Failed\n");
1441 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1443 DPAA_PMD_ERR("Function not supported\n");
1450 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1451 struct rte_eth_rss_conf *rss_conf)
1453 struct rte_eth_dev_data *data = dev->data;
1454 struct rte_eth_conf *eth_conf = &data->dev_conf;
1456 /* dpaa does not support rss_key, so length should be 0*/
1457 rss_conf->rss_key_len = 0;
1458 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1462 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1465 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1466 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1468 if (!rxq->is_static)
1471 return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1474 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1477 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1478 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1482 if (!rxq->is_static)
1485 qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1487 temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1488 if (temp1 != sizeof(temp))
1489 DPAA_PMD_ERR("irq read error");
1491 qman_fq_portal_thread_irq(rxq->qp);
1497 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1498 struct rte_eth_rxq_info *qinfo)
1500 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1501 struct qman_fq *rxq;
1504 rxq = dev->data->rx_queues[queue_id];
1506 qinfo->mp = dpaa_intf->bp_info->mp;
1507 qinfo->scattered_rx = dev->data->scattered_rx;
1508 qinfo->nb_desc = rxq->nb_desc;
1510 /* Report the HW Rx buffer length to user */
1511 ret = fman_if_get_maxfrm(dev->process_private);
1513 qinfo->rx_buf_size = ret;
1515 qinfo->conf.rx_free_thresh = 1;
1516 qinfo->conf.rx_drop_en = 1;
1517 qinfo->conf.rx_deferred_start = 0;
1518 qinfo->conf.offloads = rxq->offloads;
1522 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1523 struct rte_eth_txq_info *qinfo)
1525 struct qman_fq *txq;
1527 txq = dev->data->tx_queues[queue_id];
1529 qinfo->nb_desc = txq->nb_desc;
1530 qinfo->conf.tx_thresh.pthresh = 0;
1531 qinfo->conf.tx_thresh.hthresh = 0;
1532 qinfo->conf.tx_thresh.wthresh = 0;
1534 qinfo->conf.tx_free_thresh = 0;
1535 qinfo->conf.tx_rs_thresh = 0;
1536 qinfo->conf.offloads = txq->offloads;
1537 qinfo->conf.tx_deferred_start = 0;
1540 static struct eth_dev_ops dpaa_devops = {
1541 .dev_configure = dpaa_eth_dev_configure,
1542 .dev_start = dpaa_eth_dev_start,
1543 .dev_stop = dpaa_eth_dev_stop,
1544 .dev_close = dpaa_eth_dev_close,
1545 .dev_infos_get = dpaa_eth_dev_info,
1546 .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1548 .rx_queue_setup = dpaa_eth_rx_queue_setup,
1549 .tx_queue_setup = dpaa_eth_tx_queue_setup,
1550 .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
1551 .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
1552 .rxq_info_get = dpaa_rxq_info_get,
1553 .txq_info_get = dpaa_txq_info_get,
1555 .flow_ctrl_get = dpaa_flow_ctrl_get,
1556 .flow_ctrl_set = dpaa_flow_ctrl_set,
1558 .link_update = dpaa_eth_link_update,
1559 .stats_get = dpaa_eth_stats_get,
1560 .xstats_get = dpaa_dev_xstats_get,
1561 .xstats_get_by_id = dpaa_xstats_get_by_id,
1562 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
1563 .xstats_get_names = dpaa_xstats_get_names,
1564 .xstats_reset = dpaa_eth_stats_reset,
1565 .stats_reset = dpaa_eth_stats_reset,
1566 .promiscuous_enable = dpaa_eth_promiscuous_enable,
1567 .promiscuous_disable = dpaa_eth_promiscuous_disable,
1568 .allmulticast_enable = dpaa_eth_multicast_enable,
1569 .allmulticast_disable = dpaa_eth_multicast_disable,
1570 .mtu_set = dpaa_mtu_set,
1571 .dev_set_link_down = dpaa_link_down,
1572 .dev_set_link_up = dpaa_link_up,
1573 .mac_addr_add = dpaa_dev_add_mac_addr,
1574 .mac_addr_remove = dpaa_dev_remove_mac_addr,
1575 .mac_addr_set = dpaa_dev_set_mac_addr,
1577 .fw_version_get = dpaa_fw_version_get,
1579 .rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
1580 .rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
1581 .rss_hash_update = dpaa_dev_rss_hash_update,
1582 .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
1586 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1588 if (strcmp(dev->device->driver->name,
1596 is_dpaa_supported(struct rte_eth_dev *dev)
1598 return is_device_supported(dev, &rte_dpaa_pmd);
1602 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1604 struct rte_eth_dev *dev;
1606 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1608 dev = &rte_eth_devices[port];
1610 if (!is_dpaa_supported(dev))
1614 fman_if_loopback_enable(dev->process_private);
1616 fman_if_loopback_disable(dev->process_private);
1621 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1622 struct fman_if *fman_intf)
1624 struct rte_eth_fc_conf *fc_conf;
1627 PMD_INIT_FUNC_TRACE();
1629 if (!(dpaa_intf->fc_conf)) {
1630 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1631 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1632 if (!dpaa_intf->fc_conf) {
1633 DPAA_PMD_ERR("unable to save flow control info");
1637 fc_conf = dpaa_intf->fc_conf;
1638 ret = fman_if_get_fc_threshold(fman_intf);
1640 fc_conf->mode = RTE_FC_TX_PAUSE;
1641 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1643 fc_conf->mode = RTE_FC_NONE;
1649 /* Initialise an Rx FQ */
1650 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1653 struct qm_mcc_initfq opts = {0};
1655 u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1656 struct qm_mcc_initcgr cgr_opts = {
1657 .we_mask = QM_CGR_WE_CS_THRES |
1661 .cstd_en = QM_CGR_EN,
1662 .mode = QMAN_CGR_MODE_FRAME
1666 if (fmc_q || default_q) {
1667 ret = qman_reserve_fqid(fqid);
1669 DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
1675 DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1676 ret = qman_create_fq(fqid, flags, fq);
1678 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1682 fq->is_static = false;
1684 dpaa_poll_queue_default_config(&opts);
1687 /* Enable tail drop with cgr on this queue */
1688 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1690 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1694 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1698 opts.we_mask |= QM_INITFQ_WE_CGID;
1699 opts.fqd.cgid = cgr_rx->cgrid;
1700 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1703 ret = qman_init_fq(fq, 0, &opts);
1705 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1709 /* Initialise a Tx FQ */
1710 static int dpaa_tx_queue_init(struct qman_fq *fq,
1711 struct fman_if *fman_intf,
1712 struct qman_cgr *cgr_tx)
1714 struct qm_mcc_initfq opts = {0};
1715 struct qm_mcc_initcgr cgr_opts = {
1716 .we_mask = QM_CGR_WE_CS_THRES |
1720 .cstd_en = QM_CGR_EN,
1721 .mode = QMAN_CGR_MODE_FRAME
1726 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1727 QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1729 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1732 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1733 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1734 opts.fqd.dest.channel = fman_intf->tx_channel_id;
1735 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1736 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1737 opts.fqd.context_b = 0;
1738 /* no tx-confirmation */
1739 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1740 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1741 DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1744 /* Enable tail drop with cgr on this queue */
1745 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1746 td_tx_threshold, 0);
1748 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1752 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1756 opts.we_mask |= QM_INITFQ_WE_CGID;
1757 opts.fqd.cgid = cgr_tx->cgrid;
1758 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1759 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1763 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1765 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1769 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1770 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1771 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1773 struct qm_mcc_initfq opts = {0};
1776 PMD_INIT_FUNC_TRACE();
1778 ret = qman_reserve_fqid(fqid);
1780 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1784 /* "map" this Rx FQ to one of the interfaces Tx FQID */
1785 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1786 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1788 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1792 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1793 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1794 ret = qman_init_fq(fq, 0, &opts);
1796 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1802 /* Initialise a network interface */
1804 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1806 struct rte_dpaa_device *dpaa_device;
1807 struct fm_eth_port_cfg *cfg;
1808 struct dpaa_if *dpaa_intf;
1809 struct fman_if *fman_intf;
1812 PMD_INIT_FUNC_TRACE();
1814 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1815 dev_id = dpaa_device->id.dev_id;
1816 cfg = dpaa_get_eth_port_cfg(dev_id);
1817 fman_intf = cfg->fman_if;
1818 eth_dev->process_private = fman_intf;
1820 /* Plugging of UCODE burst API not supported in Secondary */
1821 dpaa_intf = eth_dev->data->dev_private;
1822 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1823 if (dpaa_intf->cgr_tx)
1824 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1826 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1827 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1828 qman_set_fq_lookup_table(
1829 dpaa_intf->rx_queues->qman_fq_lookup_table);
1835 /* Initialise a network interface */
1837 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1839 int num_rx_fqs, fqid;
1842 struct rte_dpaa_device *dpaa_device;
1843 struct dpaa_if *dpaa_intf;
1844 struct fm_eth_port_cfg *cfg;
1845 struct fman_if *fman_intf;
1846 struct fman_if_bpool *bp, *tmp_bp;
1847 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1848 uint32_t cgrid_tx[MAX_DPAA_CORES];
1849 uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1850 int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1853 PMD_INIT_FUNC_TRACE();
1855 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1856 dev_id = dpaa_device->id.dev_id;
1857 dpaa_intf = eth_dev->data->dev_private;
1858 cfg = dpaa_get_eth_port_cfg(dev_id);
1859 fman_intf = cfg->fman_if;
1861 dpaa_intf->name = dpaa_device->name;
1863 /* save fman_if & cfg in the interface struture */
1864 eth_dev->process_private = fman_intf;
1865 dpaa_intf->ifid = dev_id;
1866 dpaa_intf->cfg = cfg;
1868 memset((char *)dev_rx_fqids, 0,
1869 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1871 memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1873 /* Initialize Rx FQ's */
1875 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1877 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1879 DPAA_MAX_NUM_PCD_QUEUES);
1880 if (num_rx_fqs < 0) {
1881 DPAA_PMD_ERR("%s FMC initializes failed!",
1886 DPAA_PMD_WARN("%s is not configured by FMC.",
1890 /* FMCLESS mode, load balance to multiple cores.*/
1891 num_rx_fqs = rte_lcore_count();
1894 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1897 if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1898 DPAA_PMD_ERR("Invalid number of RX queues\n");
1902 if (num_rx_fqs > 0) {
1903 dpaa_intf->rx_queues = rte_zmalloc(NULL,
1904 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1905 if (!dpaa_intf->rx_queues) {
1906 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1910 dpaa_intf->rx_queues = NULL;
1913 memset(cgrid, 0, sizeof(cgrid));
1914 memset(cgrid_tx, 0, sizeof(cgrid_tx));
1916 /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1917 * Tx tail drop is disabled.
1919 if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1920 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1921 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1923 /* if a very large value is being configured */
1924 if (td_tx_threshold > UINT16_MAX)
1925 td_tx_threshold = CGR_RX_PERFQ_THRESH;
1928 /* If congestion control is enabled globally*/
1929 if (num_rx_fqs > 0 && td_threshold) {
1930 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1931 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1932 if (!dpaa_intf->cgr_rx) {
1933 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1938 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1939 if (ret != num_rx_fqs) {
1940 DPAA_PMD_WARN("insufficient CGRIDs available");
1945 dpaa_intf->cgr_rx = NULL;
1948 if (!fmc_q && !default_q) {
1949 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1952 DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1957 for (loop = 0; loop < num_rx_fqs; loop++) {
1961 fqid = dev_rx_fqids[loop];
1963 vsp_id = dev_vspids[loop];
1965 if (dpaa_intf->cgr_rx)
1966 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1968 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1969 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1973 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
1974 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1976 dpaa_intf->nb_rx_queues = num_rx_fqs;
1978 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1979 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1980 MAX_DPAA_CORES, MAX_CACHELINE);
1981 if (!dpaa_intf->tx_queues) {
1982 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1987 /* If congestion control is enabled globally*/
1988 if (td_tx_threshold) {
1989 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
1990 sizeof(struct qman_cgr) * MAX_DPAA_CORES,
1992 if (!dpaa_intf->cgr_tx) {
1993 DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
1998 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
2000 if (ret != MAX_DPAA_CORES) {
2001 DPAA_PMD_WARN("insufficient CGRIDs available");
2006 dpaa_intf->cgr_tx = NULL;
2010 for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
2011 if (dpaa_intf->cgr_tx)
2012 dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
2014 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
2016 dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
2019 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
2021 dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
2023 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2024 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2025 [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
2027 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
2030 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
2031 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2032 [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
2034 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
2037 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
2040 DPAA_PMD_DEBUG("All frame queues created");
2042 /* Get the initial configuration for flow control */
2043 dpaa_fc_set_default(dpaa_intf, fman_intf);
2045 /* reset bpool list, initialize bpool dynamically */
2046 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
2047 list_del(&bp->node);
2051 /* Populate ethdev structure */
2052 eth_dev->dev_ops = &dpaa_devops;
2053 eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
2054 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
2055 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
2057 /* Allocate memory for storing MAC addresses */
2058 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2059 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
2060 if (eth_dev->data->mac_addrs == NULL) {
2061 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
2062 "store MAC addresses",
2063 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
2068 /* copy the primary mac address */
2069 rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
2071 RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
2072 dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr));
2074 if (!fman_intf->is_shared_mac) {
2075 /* Configure error packet handling */
2076 fman_if_receive_rx_errors(fman_intf,
2077 FM_FD_RX_STATUS_ERR_MASK);
2078 /* Disable RX mode */
2079 fman_if_disable_rx(fman_intf);
2080 /* Disable promiscuous mode */
2081 fman_if_promiscuous_disable(fman_intf);
2082 /* Disable multicast */
2083 fman_if_reset_mcast_filter_table(fman_intf);
2084 /* Reset interface statistics */
2085 fman_if_stats_reset(fman_intf);
2086 /* Disable SG by default */
2087 fman_if_set_sg(fman_intf, 0);
2088 fman_if_set_maxfrm(fman_intf,
2089 RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2095 rte_free(dpaa_intf->tx_queues);
2096 dpaa_intf->tx_queues = NULL;
2097 dpaa_intf->nb_tx_queues = 0;
2100 rte_free(dpaa_intf->cgr_rx);
2101 rte_free(dpaa_intf->cgr_tx);
2102 rte_free(dpaa_intf->rx_queues);
2103 dpaa_intf->rx_queues = NULL;
2104 dpaa_intf->nb_rx_queues = 0;
2109 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2110 struct rte_dpaa_device *dpaa_dev)
2114 struct rte_eth_dev *eth_dev;
2116 PMD_INIT_FUNC_TRACE();
2118 if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
2119 RTE_PKTMBUF_HEADROOM) {
2121 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
2122 RTE_PKTMBUF_HEADROOM,
2123 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
2128 /* In case of secondary process, the device is already configured
2129 * and no further action is required, except portal initialization
2130 * and verifying secondary attachment to port name.
2132 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2133 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2136 eth_dev->device = &dpaa_dev->device;
2137 eth_dev->dev_ops = &dpaa_devops;
2139 ret = dpaa_dev_init_secondary(eth_dev);
2141 RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2145 rte_eth_dev_probing_finish(eth_dev);
2149 if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2150 if (access("/tmp/fmc.bin", F_OK) == -1) {
2151 DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2155 if (!(default_q || fmc_q)) {
2156 if (dpaa_fm_init()) {
2157 DPAA_PMD_ERR("FM init failed\n");
2162 /* disabling the default push mode for LS1043 */
2163 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2164 dpaa_push_mode_max_queue = 0;
2166 /* if push mode queues to be enabled. Currenly we are allowing
2167 * only one queue per thread.
2169 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2170 dpaa_push_mode_max_queue =
2171 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2172 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2173 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2179 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2180 ret = rte_dpaa_portal_init((void *)1);
2182 DPAA_PMD_ERR("Unable to initialize portal");
2187 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2191 eth_dev->data->dev_private =
2192 rte_zmalloc("ethdev private structure",
2193 sizeof(struct dpaa_if),
2194 RTE_CACHE_LINE_SIZE);
2195 if (!eth_dev->data->dev_private) {
2196 DPAA_PMD_ERR("Cannot allocate memzone for port data");
2197 rte_eth_dev_release_port(eth_dev);
2201 eth_dev->device = &dpaa_dev->device;
2202 dpaa_dev->eth_dev = eth_dev;
2204 qman_ern_register_cb(dpaa_free_mbuf);
2206 if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2207 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2209 /* Invoke PMD device initialization function */
2210 diag = dpaa_dev_init(eth_dev);
2212 rte_eth_dev_probing_finish(eth_dev);
2216 rte_eth_dev_release_port(eth_dev);
2221 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2223 struct rte_eth_dev *eth_dev;
2226 PMD_INIT_FUNC_TRACE();
2228 eth_dev = dpaa_dev->eth_dev;
2229 dpaa_eth_dev_close(eth_dev);
2230 ret = rte_eth_dev_release_port(eth_dev);
2235 static void __attribute__((destructor(102))) dpaa_finish(void)
2237 /* For secondary, primary will do all the cleanup */
2238 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2241 if (!(default_q || fmc_q)) {
2244 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2245 if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2246 struct rte_eth_dev *dev = &rte_eth_devices[i];
2247 struct dpaa_if *dpaa_intf =
2248 dev->data->dev_private;
2249 struct fman_if *fif =
2250 dev->process_private;
2251 if (dpaa_intf->port_handle)
2252 if (dpaa_fm_deconfig(dpaa_intf, fif))
2253 DPAA_PMD_WARN("DPAA FM "
2254 "deconfig failed\n");
2255 if (fif->num_profiles) {
2256 if (dpaa_port_vsp_cleanup(dpaa_intf,
2258 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2264 DPAA_PMD_WARN("DPAA FM term failed\n");
2268 DPAA_PMD_INFO("DPAA fman cleaned up");
2272 static struct rte_dpaa_driver rte_dpaa_pmd = {
2273 .drv_flags = RTE_DPAA_DRV_INTR_LSC,
2274 .drv_type = FSL_DPAA_ETH,
2275 .probe = rte_dpaa_probe,
2276 .remove = rte_dpaa_remove,
2279 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2280 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);