1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
22 #include <rte_debug.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_malloc.h>
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
47 /* Keep track of whether QMAN and BMAN have been globally initialized */
48 static int is_global_init;
50 /* Per FQ Taildrop in frame count */
51 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
53 struct rte_dpaa_xstats_name_off {
54 char name[RTE_ETH_XSTATS_NAME_SIZE];
58 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
60 offsetof(struct dpaa_if_stats, raln)},
62 offsetof(struct dpaa_if_stats, rxpf)},
64 offsetof(struct dpaa_if_stats, rfcs)},
66 offsetof(struct dpaa_if_stats, rvlan)},
68 offsetof(struct dpaa_if_stats, rerr)},
70 offsetof(struct dpaa_if_stats, rdrp)},
72 offsetof(struct dpaa_if_stats, rund)},
74 offsetof(struct dpaa_if_stats, rovr)},
76 offsetof(struct dpaa_if_stats, rfrg)},
78 offsetof(struct dpaa_if_stats, txpf)},
80 offsetof(struct dpaa_if_stats, terr)},
82 offsetof(struct dpaa_if_stats, tvlan)},
84 offsetof(struct dpaa_if_stats, tund)},
88 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
90 struct dpaa_if *dpaa_intf = dev->data->dev_private;
91 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
94 PMD_INIT_FUNC_TRACE();
96 if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
98 if (frame_size > ETHER_MAX_LEN)
99 dev->data->dev_conf.rxmode.jumbo_frame = 1;
101 dev->data->dev_conf.rxmode.jumbo_frame = 0;
103 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
105 fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
111 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
113 struct dpaa_if *dpaa_intf = dev->data->dev_private;
115 PMD_INIT_FUNC_TRACE();
117 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
118 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
119 DPAA_MAX_RX_PKT_LEN) {
120 fman_if_set_maxfrm(dpaa_intf->fif,
121 dev->data->dev_conf.rxmode.max_rx_pkt_len);
130 static const uint32_t *
131 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
133 static const uint32_t ptypes[] = {
134 /*todo -= add more types */
137 RTE_PTYPE_L3_IPV4_EXT,
139 RTE_PTYPE_L3_IPV6_EXT,
145 PMD_INIT_FUNC_TRACE();
147 if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
152 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
154 struct dpaa_if *dpaa_intf = dev->data->dev_private;
156 PMD_INIT_FUNC_TRACE();
158 /* Change tx callback to the real one */
159 dev->tx_pkt_burst = dpaa_eth_queue_tx;
160 fman_if_enable_rx(dpaa_intf->fif);
165 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
167 struct dpaa_if *dpaa_intf = dev->data->dev_private;
169 PMD_INIT_FUNC_TRACE();
171 fman_if_disable_rx(dpaa_intf->fif);
172 dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
175 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
177 PMD_INIT_FUNC_TRACE();
179 dpaa_eth_dev_stop(dev);
183 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
188 FILE *svr_file = NULL;
189 unsigned int svr_ver = 0;
191 PMD_INIT_FUNC_TRACE();
193 svr_file = fopen(DPAA_SOC_ID_FILE, "r");
195 DPAA_PMD_ERR("Unable to open SoC device");
196 return -ENOTSUP; /* Not supported on this infra */
198 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
199 dpaa_svr_family = svr_ver & SVR_MASK;
201 DPAA_PMD_ERR("Unable to read SoC device");
205 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
206 svr_ver, fman_ip_rev);
207 ret += 1; /* add the size of '\0' */
209 if (fw_size < (uint32_t)ret)
215 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
216 struct rte_eth_dev_info *dev_info)
218 struct dpaa_if *dpaa_intf = dev->data->dev_private;
220 PMD_INIT_FUNC_TRACE();
222 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
223 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
224 dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
225 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
226 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
227 dev_info->max_hash_mac_addrs = 0;
228 dev_info->max_vfs = 0;
229 dev_info->max_vmdq_pools = ETH_16_POOLS;
230 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
231 dev_info->speed_capa = (ETH_LINK_SPEED_1G |
233 dev_info->rx_offload_capa =
234 (DEV_RX_OFFLOAD_IPV4_CKSUM |
235 DEV_RX_OFFLOAD_UDP_CKSUM |
236 DEV_RX_OFFLOAD_TCP_CKSUM);
237 dev_info->tx_offload_capa =
238 (DEV_TX_OFFLOAD_IPV4_CKSUM |
239 DEV_TX_OFFLOAD_UDP_CKSUM |
240 DEV_TX_OFFLOAD_TCP_CKSUM);
243 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
244 int wait_to_complete __rte_unused)
246 struct dpaa_if *dpaa_intf = dev->data->dev_private;
247 struct rte_eth_link *link = &dev->data->dev_link;
249 PMD_INIT_FUNC_TRACE();
251 if (dpaa_intf->fif->mac_type == fman_mac_1g)
252 link->link_speed = 1000;
253 else if (dpaa_intf->fif->mac_type == fman_mac_10g)
254 link->link_speed = 10000;
256 DPAA_PMD_ERR("invalid link_speed: %s, %d",
257 dpaa_intf->name, dpaa_intf->fif->mac_type);
259 link->link_status = dpaa_intf->valid;
260 link->link_duplex = ETH_LINK_FULL_DUPLEX;
261 link->link_autoneg = ETH_LINK_AUTONEG;
265 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
266 struct rte_eth_stats *stats)
268 struct dpaa_if *dpaa_intf = dev->data->dev_private;
270 PMD_INIT_FUNC_TRACE();
272 fman_if_stats_get(dpaa_intf->fif, stats);
276 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
278 struct dpaa_if *dpaa_intf = dev->data->dev_private;
280 PMD_INIT_FUNC_TRACE();
282 fman_if_stats_reset(dpaa_intf->fif);
286 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
289 struct dpaa_if *dpaa_intf = dev->data->dev_private;
290 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
291 uint64_t values[sizeof(struct dpaa_if_stats) / 8];
299 fman_if_stats_get_all(dpaa_intf->fif, values,
300 sizeof(struct dpaa_if_stats) / 8);
302 for (i = 0; i < num; i++) {
304 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
310 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
311 struct rte_eth_xstat_name *xstats_names,
312 __rte_unused unsigned int limit)
314 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
316 if (xstats_names != NULL)
317 for (i = 0; i < stat_cnt; i++)
318 snprintf(xstats_names[i].name,
319 sizeof(xstats_names[i].name),
321 dpaa_xstats_strings[i].name);
327 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
328 uint64_t *values, unsigned int n)
330 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
331 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
334 struct dpaa_if *dpaa_intf = dev->data->dev_private;
342 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
343 sizeof(struct dpaa_if_stats));
345 for (i = 0; i < stat_cnt; i++)
347 values_copy[dpaa_xstats_strings[i].offset / 8];
352 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
354 for (i = 0; i < n; i++) {
355 if (ids[i] >= stat_cnt) {
356 DPAA_PMD_ERR("id value isn't valid");
359 values[i] = values_copy[ids[i]];
365 dpaa_xstats_get_names_by_id(
366 struct rte_eth_dev *dev,
367 struct rte_eth_xstat_name *xstats_names,
371 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
372 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
375 return dpaa_xstats_get_names(dev, xstats_names, limit);
377 dpaa_xstats_get_names(dev, xstats_names_copy, limit);
379 for (i = 0; i < limit; i++) {
380 if (ids[i] >= stat_cnt) {
381 DPAA_PMD_ERR("id value isn't valid");
384 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
389 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
391 struct dpaa_if *dpaa_intf = dev->data->dev_private;
393 PMD_INIT_FUNC_TRACE();
395 fman_if_promiscuous_enable(dpaa_intf->fif);
398 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
400 struct dpaa_if *dpaa_intf = dev->data->dev_private;
402 PMD_INIT_FUNC_TRACE();
404 fman_if_promiscuous_disable(dpaa_intf->fif);
407 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
409 struct dpaa_if *dpaa_intf = dev->data->dev_private;
411 PMD_INIT_FUNC_TRACE();
413 fman_if_set_mcast_filter_table(dpaa_intf->fif);
416 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
418 struct dpaa_if *dpaa_intf = dev->data->dev_private;
420 PMD_INIT_FUNC_TRACE();
422 fman_if_reset_mcast_filter_table(dpaa_intf->fif);
426 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
428 unsigned int socket_id __rte_unused,
429 const struct rte_eth_rxconf *rx_conf __rte_unused,
430 struct rte_mempool *mp)
432 struct dpaa_if *dpaa_intf = dev->data->dev_private;
433 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
435 PMD_INIT_FUNC_TRACE();
437 DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
439 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
440 struct fman_if_ic_params icp;
444 if (!mp->pool_data) {
445 DPAA_PMD_ERR("Not an offloaded buffer pool!");
448 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
450 memset(&icp, 0, sizeof(icp));
451 /* set ICEOF for to the default value , which is 0*/
452 icp.iciof = DEFAULT_ICIOF;
453 icp.iceof = DEFAULT_RX_ICEOF;
454 icp.icsz = DEFAULT_ICSZ;
455 fman_if_set_ic_params(dpaa_intf->fif, &icp);
457 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
458 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
460 /* Buffer pool size should be equal to Dataroom Size*/
461 bp_size = rte_pktmbuf_data_room_size(mp);
462 fman_if_set_bp(dpaa_intf->fif, mp->size,
463 dpaa_intf->bp_info->bpid, bp_size);
464 dpaa_intf->valid = 1;
465 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
466 dpaa_intf->name, fd_offset,
467 fman_if_get_fdoff(dpaa_intf->fif));
470 dev->data->rx_queues[queue_idx] = rxq;
472 /* configure the CGR size as per the desc size */
473 if (dpaa_intf->cgr_rx) {
474 struct qm_mcc_initcgr cgr_opts = {0};
477 /* Enable tail drop with cgr on this queue */
478 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
479 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
482 "rx taildrop modify fail on fqid %d (ret=%d)",
491 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
493 PMD_INIT_FUNC_TRACE();
497 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
498 uint16_t nb_desc __rte_unused,
499 unsigned int socket_id __rte_unused,
500 const struct rte_eth_txconf *tx_conf __rte_unused)
502 struct dpaa_if *dpaa_intf = dev->data->dev_private;
504 PMD_INIT_FUNC_TRACE();
506 DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
507 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
511 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
513 PMD_INIT_FUNC_TRACE();
516 static int dpaa_link_down(struct rte_eth_dev *dev)
518 PMD_INIT_FUNC_TRACE();
520 dpaa_eth_dev_stop(dev);
524 static int dpaa_link_up(struct rte_eth_dev *dev)
526 PMD_INIT_FUNC_TRACE();
528 dpaa_eth_dev_start(dev);
533 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
534 struct rte_eth_fc_conf *fc_conf)
536 struct dpaa_if *dpaa_intf = dev->data->dev_private;
537 struct rte_eth_fc_conf *net_fc;
539 PMD_INIT_FUNC_TRACE();
541 if (!(dpaa_intf->fc_conf)) {
542 dpaa_intf->fc_conf = rte_zmalloc(NULL,
543 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
544 if (!dpaa_intf->fc_conf) {
545 DPAA_PMD_ERR("unable to save flow control info");
549 net_fc = dpaa_intf->fc_conf;
551 if (fc_conf->high_water < fc_conf->low_water) {
552 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
556 if (fc_conf->mode == RTE_FC_NONE) {
558 } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
559 fc_conf->mode == RTE_FC_FULL) {
560 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
562 dpaa_intf->bp_info->bpid);
563 if (fc_conf->pause_time)
564 fman_if_set_fc_quanta(dpaa_intf->fif,
565 fc_conf->pause_time);
568 /* Save the information in dpaa device */
569 net_fc->pause_time = fc_conf->pause_time;
570 net_fc->high_water = fc_conf->high_water;
571 net_fc->low_water = fc_conf->low_water;
572 net_fc->send_xon = fc_conf->send_xon;
573 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
574 net_fc->mode = fc_conf->mode;
575 net_fc->autoneg = fc_conf->autoneg;
581 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
582 struct rte_eth_fc_conf *fc_conf)
584 struct dpaa_if *dpaa_intf = dev->data->dev_private;
585 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
588 PMD_INIT_FUNC_TRACE();
591 fc_conf->pause_time = net_fc->pause_time;
592 fc_conf->high_water = net_fc->high_water;
593 fc_conf->low_water = net_fc->low_water;
594 fc_conf->send_xon = net_fc->send_xon;
595 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
596 fc_conf->mode = net_fc->mode;
597 fc_conf->autoneg = net_fc->autoneg;
600 ret = fman_if_get_fc_threshold(dpaa_intf->fif);
602 fc_conf->mode = RTE_FC_TX_PAUSE;
603 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
605 fc_conf->mode = RTE_FC_NONE;
612 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
613 struct ether_addr *addr,
615 __rte_unused uint32_t pool)
618 struct dpaa_if *dpaa_intf = dev->data->dev_private;
620 PMD_INIT_FUNC_TRACE();
622 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
625 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
631 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
634 struct dpaa_if *dpaa_intf = dev->data->dev_private;
636 PMD_INIT_FUNC_TRACE();
638 fman_if_clear_mac_addr(dpaa_intf->fif, index);
642 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
643 struct ether_addr *addr)
646 struct dpaa_if *dpaa_intf = dev->data->dev_private;
648 PMD_INIT_FUNC_TRACE();
650 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
652 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
655 static struct eth_dev_ops dpaa_devops = {
656 .dev_configure = dpaa_eth_dev_configure,
657 .dev_start = dpaa_eth_dev_start,
658 .dev_stop = dpaa_eth_dev_stop,
659 .dev_close = dpaa_eth_dev_close,
660 .dev_infos_get = dpaa_eth_dev_info,
661 .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
663 .rx_queue_setup = dpaa_eth_rx_queue_setup,
664 .tx_queue_setup = dpaa_eth_tx_queue_setup,
665 .rx_queue_release = dpaa_eth_rx_queue_release,
666 .tx_queue_release = dpaa_eth_tx_queue_release,
668 .flow_ctrl_get = dpaa_flow_ctrl_get,
669 .flow_ctrl_set = dpaa_flow_ctrl_set,
671 .link_update = dpaa_eth_link_update,
672 .stats_get = dpaa_eth_stats_get,
673 .xstats_get = dpaa_dev_xstats_get,
674 .xstats_get_by_id = dpaa_xstats_get_by_id,
675 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
676 .xstats_get_names = dpaa_xstats_get_names,
677 .xstats_reset = dpaa_eth_stats_reset,
678 .stats_reset = dpaa_eth_stats_reset,
679 .promiscuous_enable = dpaa_eth_promiscuous_enable,
680 .promiscuous_disable = dpaa_eth_promiscuous_disable,
681 .allmulticast_enable = dpaa_eth_multicast_enable,
682 .allmulticast_disable = dpaa_eth_multicast_disable,
683 .mtu_set = dpaa_mtu_set,
684 .dev_set_link_down = dpaa_link_down,
685 .dev_set_link_up = dpaa_link_up,
686 .mac_addr_add = dpaa_dev_add_mac_addr,
687 .mac_addr_remove = dpaa_dev_remove_mac_addr,
688 .mac_addr_set = dpaa_dev_set_mac_addr,
690 .fw_version_get = dpaa_fw_version_get,
693 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
695 struct rte_eth_fc_conf *fc_conf;
698 PMD_INIT_FUNC_TRACE();
700 if (!(dpaa_intf->fc_conf)) {
701 dpaa_intf->fc_conf = rte_zmalloc(NULL,
702 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
703 if (!dpaa_intf->fc_conf) {
704 DPAA_PMD_ERR("unable to save flow control info");
708 fc_conf = dpaa_intf->fc_conf;
709 ret = fman_if_get_fc_threshold(dpaa_intf->fif);
711 fc_conf->mode = RTE_FC_TX_PAUSE;
712 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
714 fc_conf->mode = RTE_FC_NONE;
720 /* Initialise an Rx FQ */
721 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
724 struct qm_mcc_initfq opts = {0};
727 struct qm_mcc_initcgr cgr_opts = {
728 .we_mask = QM_CGR_WE_CS_THRES |
732 .cstd_en = QM_CGR_EN,
733 .mode = QMAN_CGR_MODE_FRAME
737 PMD_INIT_FUNC_TRACE();
739 ret = qman_reserve_fqid(fqid);
741 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
746 DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
747 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
749 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
754 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
755 QM_INITFQ_WE_CONTEXTA;
757 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
758 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
759 QM_FQCTRL_PREFERINCACHE;
760 opts.fqd.context_a.stashing.exclusive = 0;
761 opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
762 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
763 opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
766 /* Enable tail drop with cgr on this queue */
767 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
769 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
773 "rx taildrop init fail on rx fqid %d (ret=%d)",
777 opts.we_mask |= QM_INITFQ_WE_CGID;
778 opts.fqd.cgid = cgr_rx->cgrid;
779 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
782 ret = qman_init_fq(fq, flags, &opts);
784 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
788 /* Initialise a Tx FQ */
789 static int dpaa_tx_queue_init(struct qman_fq *fq,
790 struct fman_if *fman_intf)
792 struct qm_mcc_initfq opts = {0};
795 PMD_INIT_FUNC_TRACE();
797 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
798 QMAN_FQ_FLAG_TO_DCPORTAL, fq);
800 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
803 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
804 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
805 opts.fqd.dest.channel = fman_intf->tx_channel_id;
806 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
807 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
808 opts.fqd.context_b = 0;
809 /* no tx-confirmation */
810 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
811 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
812 DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
813 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
815 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
819 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
820 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
821 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
823 struct qm_mcc_initfq opts = {0};
826 PMD_INIT_FUNC_TRACE();
828 ret = qman_reserve_fqid(fqid);
830 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
834 /* "map" this Rx FQ to one of the interfaces Tx FQID */
835 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
836 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
838 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
842 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
843 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
844 ret = qman_init_fq(fq, 0, &opts);
846 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
852 /* Initialise a network interface */
854 dpaa_dev_init(struct rte_eth_dev *eth_dev)
856 int num_cores, num_rx_fqs, fqid;
859 struct rte_dpaa_device *dpaa_device;
860 struct dpaa_if *dpaa_intf;
861 struct fm_eth_port_cfg *cfg;
862 struct fman_if *fman_intf;
863 struct fman_if_bpool *bp, *tmp_bp;
864 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
866 PMD_INIT_FUNC_TRACE();
868 /* For secondary processes, the primary has done all the work */
869 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
872 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
873 dev_id = dpaa_device->id.dev_id;
874 dpaa_intf = eth_dev->data->dev_private;
875 cfg = &dpaa_netcfg->port_cfg[dev_id];
876 fman_intf = cfg->fman_if;
878 dpaa_intf->name = dpaa_device->name;
880 /* save fman_if & cfg in the interface struture */
881 dpaa_intf->fif = fman_intf;
882 dpaa_intf->ifid = dev_id;
883 dpaa_intf->cfg = cfg;
885 /* Initialize Rx FQ's */
886 if (getenv("DPAA_NUM_RX_QUEUES"))
887 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
889 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
891 /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
894 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
895 DPAA_PMD_ERR("Invalid number of RX queues\n");
899 dpaa_intf->rx_queues = rte_zmalloc(NULL,
900 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
902 /* If congestion control is enabled globally*/
904 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
905 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
907 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
908 if (ret != num_rx_fqs) {
909 DPAA_PMD_WARN("insufficient CGRIDs available");
913 dpaa_intf->cgr_rx = NULL;
916 for (loop = 0; loop < num_rx_fqs; loop++) {
917 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
918 DPAA_PCD_FQID_MULTIPLIER + loop;
920 if (dpaa_intf->cgr_rx)
921 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
923 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
924 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
928 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
930 dpaa_intf->nb_rx_queues = num_rx_fqs;
932 /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
933 num_cores = rte_lcore_count();
934 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
935 num_cores, MAX_CACHELINE);
936 if (!dpaa_intf->tx_queues)
939 for (loop = 0; loop < num_cores; loop++) {
940 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
944 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
946 dpaa_intf->nb_tx_queues = num_cores;
948 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
949 dpaa_debug_queue_init(&dpaa_intf->debug_queues[
950 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
951 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
952 dpaa_debug_queue_init(&dpaa_intf->debug_queues[
953 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
954 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
957 DPAA_PMD_DEBUG("All frame queues created");
959 /* Get the initial configuration for flow control */
960 dpaa_fc_set_default(dpaa_intf);
962 /* reset bpool list, initialize bpool dynamically */
963 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
968 /* Populate ethdev structure */
969 eth_dev->dev_ops = &dpaa_devops;
970 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
971 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
973 /* Allocate memory for storing MAC addresses */
974 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
975 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
976 if (eth_dev->data->mac_addrs == NULL) {
977 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
978 "store MAC addresses",
979 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
980 rte_free(dpaa_intf->cgr_rx);
981 rte_free(dpaa_intf->rx_queues);
982 rte_free(dpaa_intf->tx_queues);
983 dpaa_intf->rx_queues = NULL;
984 dpaa_intf->tx_queues = NULL;
985 dpaa_intf->nb_rx_queues = 0;
986 dpaa_intf->nb_tx_queues = 0;
990 /* copy the primary mac address */
991 ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
993 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
995 fman_intf->mac_addr.addr_bytes[0],
996 fman_intf->mac_addr.addr_bytes[1],
997 fman_intf->mac_addr.addr_bytes[2],
998 fman_intf->mac_addr.addr_bytes[3],
999 fman_intf->mac_addr.addr_bytes[4],
1000 fman_intf->mac_addr.addr_bytes[5]);
1002 /* Disable RX mode */
1003 fman_if_discard_rx_errors(fman_intf);
1004 fman_if_disable_rx(fman_intf);
1005 /* Disable promiscuous mode */
1006 fman_if_promiscuous_disable(fman_intf);
1007 /* Disable multicast */
1008 fman_if_reset_mcast_filter_table(fman_intf);
1009 /* Reset interface statistics */
1010 fman_if_stats_reset(fman_intf);
1016 dpaa_dev_uninit(struct rte_eth_dev *dev)
1018 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1021 PMD_INIT_FUNC_TRACE();
1023 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1027 DPAA_PMD_WARN("Already closed or not started");
1031 dpaa_eth_dev_close(dev);
1033 /* release configuration memory */
1034 if (dpaa_intf->fc_conf)
1035 rte_free(dpaa_intf->fc_conf);
1037 /* Release RX congestion Groups */
1038 if (dpaa_intf->cgr_rx) {
1039 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1040 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1042 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1043 dpaa_intf->nb_rx_queues);
1046 rte_free(dpaa_intf->cgr_rx);
1047 dpaa_intf->cgr_rx = NULL;
1049 rte_free(dpaa_intf->rx_queues);
1050 dpaa_intf->rx_queues = NULL;
1052 rte_free(dpaa_intf->tx_queues);
1053 dpaa_intf->tx_queues = NULL;
1055 /* free memory for storing MAC addresses */
1056 rte_free(dev->data->mac_addrs);
1057 dev->data->mac_addrs = NULL;
1059 dev->dev_ops = NULL;
1060 dev->rx_pkt_burst = NULL;
1061 dev->tx_pkt_burst = NULL;
1067 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1068 struct rte_dpaa_device *dpaa_dev)
1072 struct rte_eth_dev *eth_dev;
1074 PMD_INIT_FUNC_TRACE();
1076 /* In case of secondary process, the device is already configured
1077 * and no further action is required, except portal initialization
1078 * and verifying secondary attachment to port name.
1080 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1081 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1087 if (!is_global_init) {
1088 /* One time load of Qman/Bman drivers */
1089 ret = qman_global_init();
1091 DPAA_PMD_ERR("QMAN initialization failed: %d",
1095 ret = bman_global_init();
1097 DPAA_PMD_ERR("BMAN initialization failed: %d",
1105 ret = rte_dpaa_portal_init((void *)1);
1107 DPAA_PMD_ERR("Unable to initialize portal");
1111 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1112 if (eth_dev == NULL)
1115 eth_dev->data->dev_private = rte_zmalloc(
1116 "ethdev private structure",
1117 sizeof(struct dpaa_if),
1118 RTE_CACHE_LINE_SIZE);
1119 if (!eth_dev->data->dev_private) {
1120 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1121 rte_eth_dev_release_port(eth_dev);
1125 eth_dev->device = &dpaa_dev->device;
1126 eth_dev->device->driver = &dpaa_drv->driver;
1127 dpaa_dev->eth_dev = eth_dev;
1129 /* Invoke PMD device initialization function */
1130 diag = dpaa_dev_init(eth_dev);
1134 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1135 rte_free(eth_dev->data->dev_private);
1137 rte_eth_dev_release_port(eth_dev);
1142 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1144 struct rte_eth_dev *eth_dev;
1146 PMD_INIT_FUNC_TRACE();
1148 eth_dev = dpaa_dev->eth_dev;
1149 dpaa_dev_uninit(eth_dev);
1151 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1152 rte_free(eth_dev->data->dev_private);
1154 rte_eth_dev_release_port(eth_dev);
1159 static struct rte_dpaa_driver rte_dpaa_pmd = {
1160 .drv_type = FSL_DPAA_ETH,
1161 .probe = rte_dpaa_probe,
1162 .remove = rte_dpaa_remove,
1165 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);