1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
22 #include <rte_debug.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_malloc.h>
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
47 /* Keep track of whether QMAN and BMAN have been globally initialized */
48 static int is_global_init;
50 /* Per FQ Taildrop in frame count */
51 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
53 struct rte_dpaa_xstats_name_off {
54 char name[RTE_ETH_XSTATS_NAME_SIZE];
58 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
60 offsetof(struct dpaa_if_stats, raln)},
62 offsetof(struct dpaa_if_stats, rxpf)},
64 offsetof(struct dpaa_if_stats, rfcs)},
66 offsetof(struct dpaa_if_stats, rvlan)},
68 offsetof(struct dpaa_if_stats, rerr)},
70 offsetof(struct dpaa_if_stats, rdrp)},
72 offsetof(struct dpaa_if_stats, rund)},
74 offsetof(struct dpaa_if_stats, rovr)},
76 offsetof(struct dpaa_if_stats, rfrg)},
78 offsetof(struct dpaa_if_stats, txpf)},
80 offsetof(struct dpaa_if_stats, terr)},
82 offsetof(struct dpaa_if_stats, tvlan)},
84 offsetof(struct dpaa_if_stats, tund)},
88 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
90 struct dpaa_if *dpaa_intf = dev->data->dev_private;
91 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
94 PMD_INIT_FUNC_TRACE();
96 if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
98 if (frame_size > ETHER_MAX_LEN)
99 dev->data->dev_conf.rxmode.jumbo_frame = 1;
101 dev->data->dev_conf.rxmode.jumbo_frame = 0;
103 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
105 fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
111 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
113 struct dpaa_if *dpaa_intf = dev->data->dev_private;
115 PMD_INIT_FUNC_TRACE();
117 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
118 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
119 DPAA_MAX_RX_PKT_LEN) {
120 fman_if_set_maxfrm(dpaa_intf->fif,
121 dev->data->dev_conf.rxmode.max_rx_pkt_len);
130 static const uint32_t *
131 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
133 static const uint32_t ptypes[] = {
134 /*todo -= add more types */
137 RTE_PTYPE_L3_IPV4_EXT,
139 RTE_PTYPE_L3_IPV6_EXT,
145 PMD_INIT_FUNC_TRACE();
147 if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
152 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
154 struct dpaa_if *dpaa_intf = dev->data->dev_private;
156 PMD_INIT_FUNC_TRACE();
158 /* Change tx callback to the real one */
159 dev->tx_pkt_burst = dpaa_eth_queue_tx;
160 fman_if_enable_rx(dpaa_intf->fif);
165 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
167 struct dpaa_if *dpaa_intf = dev->data->dev_private;
169 PMD_INIT_FUNC_TRACE();
171 fman_if_disable_rx(dpaa_intf->fif);
172 dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
175 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
177 PMD_INIT_FUNC_TRACE();
179 dpaa_eth_dev_stop(dev);
183 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
188 FILE *svr_file = NULL;
189 unsigned int svr_ver = 0;
191 PMD_INIT_FUNC_TRACE();
193 svr_file = fopen(DPAA_SOC_ID_FILE, "r");
195 DPAA_PMD_ERR("Unable to open SoC device");
196 return -ENOTSUP; /* Not supported on this infra */
198 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
199 dpaa_svr_family = svr_ver & SVR_MASK;
201 DPAA_PMD_ERR("Unable to read SoC device");
205 ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
206 svr_ver, fman_ip_rev);
207 ret += 1; /* add the size of '\0' */
209 if (fw_size < (uint32_t)ret)
215 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
216 struct rte_eth_dev_info *dev_info)
218 struct dpaa_if *dpaa_intf = dev->data->dev_private;
220 PMD_INIT_FUNC_TRACE();
222 dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
223 dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
224 dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
225 dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
226 dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
227 dev_info->max_hash_mac_addrs = 0;
228 dev_info->max_vfs = 0;
229 dev_info->max_vmdq_pools = ETH_16_POOLS;
230 dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
231 dev_info->speed_capa = (ETH_LINK_SPEED_1G |
233 dev_info->rx_offload_capa =
234 (DEV_RX_OFFLOAD_IPV4_CKSUM |
235 DEV_RX_OFFLOAD_UDP_CKSUM |
236 DEV_RX_OFFLOAD_TCP_CKSUM);
237 dev_info->tx_offload_capa =
238 (DEV_TX_OFFLOAD_IPV4_CKSUM |
239 DEV_TX_OFFLOAD_UDP_CKSUM |
240 DEV_TX_OFFLOAD_TCP_CKSUM);
243 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
244 int wait_to_complete __rte_unused)
246 struct dpaa_if *dpaa_intf = dev->data->dev_private;
247 struct rte_eth_link *link = &dev->data->dev_link;
249 PMD_INIT_FUNC_TRACE();
251 if (dpaa_intf->fif->mac_type == fman_mac_1g)
252 link->link_speed = 1000;
253 else if (dpaa_intf->fif->mac_type == fman_mac_10g)
254 link->link_speed = 10000;
256 DPAA_PMD_ERR("invalid link_speed: %s, %d",
257 dpaa_intf->name, dpaa_intf->fif->mac_type);
259 link->link_status = dpaa_intf->valid;
260 link->link_duplex = ETH_LINK_FULL_DUPLEX;
261 link->link_autoneg = ETH_LINK_AUTONEG;
265 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
266 struct rte_eth_stats *stats)
268 struct dpaa_if *dpaa_intf = dev->data->dev_private;
270 PMD_INIT_FUNC_TRACE();
272 fman_if_stats_get(dpaa_intf->fif, stats);
276 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
278 struct dpaa_if *dpaa_intf = dev->data->dev_private;
280 PMD_INIT_FUNC_TRACE();
282 fman_if_stats_reset(dpaa_intf->fif);
286 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
289 struct dpaa_if *dpaa_intf = dev->data->dev_private;
290 unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
291 uint64_t values[sizeof(struct dpaa_if_stats) / 8];
299 fman_if_stats_get_all(dpaa_intf->fif, values,
300 sizeof(struct dpaa_if_stats) / 8);
302 for (i = 0; i < num; i++) {
304 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
310 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
311 struct rte_eth_xstat_name *xstats_names,
312 __rte_unused unsigned int limit)
314 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
316 if (xstats_names != NULL)
317 for (i = 0; i < stat_cnt; i++)
318 snprintf(xstats_names[i].name,
319 sizeof(xstats_names[i].name),
321 dpaa_xstats_strings[i].name);
327 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
328 uint64_t *values, unsigned int n)
330 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
331 uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
334 struct dpaa_if *dpaa_intf = dev->data->dev_private;
342 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
343 sizeof(struct dpaa_if_stats));
345 for (i = 0; i < stat_cnt; i++)
347 values_copy[dpaa_xstats_strings[i].offset / 8];
352 dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
354 for (i = 0; i < n; i++) {
355 if (ids[i] >= stat_cnt) {
356 DPAA_PMD_ERR("id value isn't valid");
359 values[i] = values_copy[ids[i]];
365 dpaa_xstats_get_names_by_id(
366 struct rte_eth_dev *dev,
367 struct rte_eth_xstat_name *xstats_names,
371 unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
372 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
375 return dpaa_xstats_get_names(dev, xstats_names, limit);
377 dpaa_xstats_get_names(dev, xstats_names_copy, limit);
379 for (i = 0; i < limit; i++) {
380 if (ids[i] >= stat_cnt) {
381 DPAA_PMD_ERR("id value isn't valid");
384 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
389 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
391 struct dpaa_if *dpaa_intf = dev->data->dev_private;
393 PMD_INIT_FUNC_TRACE();
395 fman_if_promiscuous_enable(dpaa_intf->fif);
398 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
400 struct dpaa_if *dpaa_intf = dev->data->dev_private;
402 PMD_INIT_FUNC_TRACE();
404 fman_if_promiscuous_disable(dpaa_intf->fif);
407 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
409 struct dpaa_if *dpaa_intf = dev->data->dev_private;
411 PMD_INIT_FUNC_TRACE();
413 fman_if_set_mcast_filter_table(dpaa_intf->fif);
416 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
418 struct dpaa_if *dpaa_intf = dev->data->dev_private;
420 PMD_INIT_FUNC_TRACE();
422 fman_if_reset_mcast_filter_table(dpaa_intf->fif);
426 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
428 unsigned int socket_id __rte_unused,
429 const struct rte_eth_rxconf *rx_conf __rte_unused,
430 struct rte_mempool *mp)
432 struct dpaa_if *dpaa_intf = dev->data->dev_private;
433 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
435 PMD_INIT_FUNC_TRACE();
437 DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
439 if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
440 struct fman_if_ic_params icp;
444 if (!mp->pool_data) {
445 DPAA_PMD_ERR("Not an offloaded buffer pool!");
448 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
450 memset(&icp, 0, sizeof(icp));
451 /* set ICEOF for to the default value , which is 0*/
452 icp.iciof = DEFAULT_ICIOF;
453 icp.iceof = DEFAULT_RX_ICEOF;
454 icp.icsz = DEFAULT_ICSZ;
455 fman_if_set_ic_params(dpaa_intf->fif, &icp);
457 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
458 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
460 /* Buffer pool size should be equal to Dataroom Size*/
461 bp_size = rte_pktmbuf_data_room_size(mp);
462 fman_if_set_bp(dpaa_intf->fif, mp->size,
463 dpaa_intf->bp_info->bpid, bp_size);
464 dpaa_intf->valid = 1;
465 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
466 dpaa_intf->name, fd_offset,
467 fman_if_get_fdoff(dpaa_intf->fif));
470 dev->data->rx_queues[queue_idx] = rxq;
472 /* configure the CGR size as per the desc size */
473 if (dpaa_intf->cgr_rx) {
474 struct qm_mcc_initcgr cgr_opts = {0};
477 /* Enable tail drop with cgr on this queue */
478 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
479 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
482 "rx taildrop modify fail on fqid %d (ret=%d)",
491 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
493 PMD_INIT_FUNC_TRACE();
497 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
498 uint16_t nb_desc __rte_unused,
499 unsigned int socket_id __rte_unused,
500 const struct rte_eth_txconf *tx_conf __rte_unused)
502 struct dpaa_if *dpaa_intf = dev->data->dev_private;
504 PMD_INIT_FUNC_TRACE();
506 DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
507 dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
511 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
513 PMD_INIT_FUNC_TRACE();
517 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
519 struct dpaa_if *dpaa_intf = dev->data->dev_private;
520 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
523 PMD_INIT_FUNC_TRACE();
525 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
526 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
527 rx_queue_id, frm_cnt);
532 static int dpaa_link_down(struct rte_eth_dev *dev)
534 PMD_INIT_FUNC_TRACE();
536 dpaa_eth_dev_stop(dev);
540 static int dpaa_link_up(struct rte_eth_dev *dev)
542 PMD_INIT_FUNC_TRACE();
544 dpaa_eth_dev_start(dev);
549 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
550 struct rte_eth_fc_conf *fc_conf)
552 struct dpaa_if *dpaa_intf = dev->data->dev_private;
553 struct rte_eth_fc_conf *net_fc;
555 PMD_INIT_FUNC_TRACE();
557 if (!(dpaa_intf->fc_conf)) {
558 dpaa_intf->fc_conf = rte_zmalloc(NULL,
559 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
560 if (!dpaa_intf->fc_conf) {
561 DPAA_PMD_ERR("unable to save flow control info");
565 net_fc = dpaa_intf->fc_conf;
567 if (fc_conf->high_water < fc_conf->low_water) {
568 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
572 if (fc_conf->mode == RTE_FC_NONE) {
574 } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
575 fc_conf->mode == RTE_FC_FULL) {
576 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
578 dpaa_intf->bp_info->bpid);
579 if (fc_conf->pause_time)
580 fman_if_set_fc_quanta(dpaa_intf->fif,
581 fc_conf->pause_time);
584 /* Save the information in dpaa device */
585 net_fc->pause_time = fc_conf->pause_time;
586 net_fc->high_water = fc_conf->high_water;
587 net_fc->low_water = fc_conf->low_water;
588 net_fc->send_xon = fc_conf->send_xon;
589 net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
590 net_fc->mode = fc_conf->mode;
591 net_fc->autoneg = fc_conf->autoneg;
597 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
598 struct rte_eth_fc_conf *fc_conf)
600 struct dpaa_if *dpaa_intf = dev->data->dev_private;
601 struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
604 PMD_INIT_FUNC_TRACE();
607 fc_conf->pause_time = net_fc->pause_time;
608 fc_conf->high_water = net_fc->high_water;
609 fc_conf->low_water = net_fc->low_water;
610 fc_conf->send_xon = net_fc->send_xon;
611 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
612 fc_conf->mode = net_fc->mode;
613 fc_conf->autoneg = net_fc->autoneg;
616 ret = fman_if_get_fc_threshold(dpaa_intf->fif);
618 fc_conf->mode = RTE_FC_TX_PAUSE;
619 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
621 fc_conf->mode = RTE_FC_NONE;
628 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
629 struct ether_addr *addr,
631 __rte_unused uint32_t pool)
634 struct dpaa_if *dpaa_intf = dev->data->dev_private;
636 PMD_INIT_FUNC_TRACE();
638 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
641 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
647 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
650 struct dpaa_if *dpaa_intf = dev->data->dev_private;
652 PMD_INIT_FUNC_TRACE();
654 fman_if_clear_mac_addr(dpaa_intf->fif, index);
658 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
659 struct ether_addr *addr)
662 struct dpaa_if *dpaa_intf = dev->data->dev_private;
664 PMD_INIT_FUNC_TRACE();
666 ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
668 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
671 static struct eth_dev_ops dpaa_devops = {
672 .dev_configure = dpaa_eth_dev_configure,
673 .dev_start = dpaa_eth_dev_start,
674 .dev_stop = dpaa_eth_dev_stop,
675 .dev_close = dpaa_eth_dev_close,
676 .dev_infos_get = dpaa_eth_dev_info,
677 .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
679 .rx_queue_setup = dpaa_eth_rx_queue_setup,
680 .tx_queue_setup = dpaa_eth_tx_queue_setup,
681 .rx_queue_release = dpaa_eth_rx_queue_release,
682 .tx_queue_release = dpaa_eth_tx_queue_release,
683 .rx_queue_count = dpaa_dev_rx_queue_count,
685 .flow_ctrl_get = dpaa_flow_ctrl_get,
686 .flow_ctrl_set = dpaa_flow_ctrl_set,
688 .link_update = dpaa_eth_link_update,
689 .stats_get = dpaa_eth_stats_get,
690 .xstats_get = dpaa_dev_xstats_get,
691 .xstats_get_by_id = dpaa_xstats_get_by_id,
692 .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
693 .xstats_get_names = dpaa_xstats_get_names,
694 .xstats_reset = dpaa_eth_stats_reset,
695 .stats_reset = dpaa_eth_stats_reset,
696 .promiscuous_enable = dpaa_eth_promiscuous_enable,
697 .promiscuous_disable = dpaa_eth_promiscuous_disable,
698 .allmulticast_enable = dpaa_eth_multicast_enable,
699 .allmulticast_disable = dpaa_eth_multicast_disable,
700 .mtu_set = dpaa_mtu_set,
701 .dev_set_link_down = dpaa_link_down,
702 .dev_set_link_up = dpaa_link_up,
703 .mac_addr_add = dpaa_dev_add_mac_addr,
704 .mac_addr_remove = dpaa_dev_remove_mac_addr,
705 .mac_addr_set = dpaa_dev_set_mac_addr,
707 .fw_version_get = dpaa_fw_version_get,
710 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
712 struct rte_eth_fc_conf *fc_conf;
715 PMD_INIT_FUNC_TRACE();
717 if (!(dpaa_intf->fc_conf)) {
718 dpaa_intf->fc_conf = rte_zmalloc(NULL,
719 sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
720 if (!dpaa_intf->fc_conf) {
721 DPAA_PMD_ERR("unable to save flow control info");
725 fc_conf = dpaa_intf->fc_conf;
726 ret = fman_if_get_fc_threshold(dpaa_intf->fif);
728 fc_conf->mode = RTE_FC_TX_PAUSE;
729 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
731 fc_conf->mode = RTE_FC_NONE;
737 /* Initialise an Rx FQ */
738 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
741 struct qm_mcc_initfq opts = {0};
744 struct qm_mcc_initcgr cgr_opts = {
745 .we_mask = QM_CGR_WE_CS_THRES |
749 .cstd_en = QM_CGR_EN,
750 .mode = QMAN_CGR_MODE_FRAME
754 PMD_INIT_FUNC_TRACE();
756 ret = qman_reserve_fqid(fqid);
758 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
763 DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
764 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
766 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
771 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
772 QM_INITFQ_WE_CONTEXTA;
774 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
775 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
776 QM_FQCTRL_PREFERINCACHE;
777 opts.fqd.context_a.stashing.exclusive = 0;
778 opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
779 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
780 opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
783 /* Enable tail drop with cgr on this queue */
784 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
786 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
790 "rx taildrop init fail on rx fqid %d (ret=%d)",
794 opts.we_mask |= QM_INITFQ_WE_CGID;
795 opts.fqd.cgid = cgr_rx->cgrid;
796 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
799 ret = qman_init_fq(fq, flags, &opts);
801 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
805 /* Initialise a Tx FQ */
806 static int dpaa_tx_queue_init(struct qman_fq *fq,
807 struct fman_if *fman_intf)
809 struct qm_mcc_initfq opts = {0};
812 PMD_INIT_FUNC_TRACE();
814 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
815 QMAN_FQ_FLAG_TO_DCPORTAL, fq);
817 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
820 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
821 QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
822 opts.fqd.dest.channel = fman_intf->tx_channel_id;
823 opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
824 opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
825 opts.fqd.context_b = 0;
826 /* no tx-confirmation */
827 opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
828 opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
829 DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
830 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
832 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
836 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
837 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
838 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
840 struct qm_mcc_initfq opts = {0};
843 PMD_INIT_FUNC_TRACE();
845 ret = qman_reserve_fqid(fqid);
847 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
851 /* "map" this Rx FQ to one of the interfaces Tx FQID */
852 DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
853 ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
855 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
859 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
860 opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
861 ret = qman_init_fq(fq, 0, &opts);
863 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
869 /* Initialise a network interface */
871 dpaa_dev_init(struct rte_eth_dev *eth_dev)
873 int num_cores, num_rx_fqs, fqid;
876 struct rte_dpaa_device *dpaa_device;
877 struct dpaa_if *dpaa_intf;
878 struct fm_eth_port_cfg *cfg;
879 struct fman_if *fman_intf;
880 struct fman_if_bpool *bp, *tmp_bp;
881 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
883 PMD_INIT_FUNC_TRACE();
885 /* For secondary processes, the primary has done all the work */
886 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
889 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
890 dev_id = dpaa_device->id.dev_id;
891 dpaa_intf = eth_dev->data->dev_private;
892 cfg = &dpaa_netcfg->port_cfg[dev_id];
893 fman_intf = cfg->fman_if;
895 dpaa_intf->name = dpaa_device->name;
897 /* save fman_if & cfg in the interface struture */
898 dpaa_intf->fif = fman_intf;
899 dpaa_intf->ifid = dev_id;
900 dpaa_intf->cfg = cfg;
902 /* Initialize Rx FQ's */
903 if (getenv("DPAA_NUM_RX_QUEUES"))
904 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
906 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
908 /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
911 if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
912 DPAA_PMD_ERR("Invalid number of RX queues\n");
916 dpaa_intf->rx_queues = rte_zmalloc(NULL,
917 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
919 /* If congestion control is enabled globally*/
921 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
922 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
924 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
925 if (ret != num_rx_fqs) {
926 DPAA_PMD_WARN("insufficient CGRIDs available");
930 dpaa_intf->cgr_rx = NULL;
933 for (loop = 0; loop < num_rx_fqs; loop++) {
934 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
935 DPAA_PCD_FQID_MULTIPLIER + loop;
937 if (dpaa_intf->cgr_rx)
938 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
940 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
941 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
945 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
947 dpaa_intf->nb_rx_queues = num_rx_fqs;
949 /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
950 num_cores = rte_lcore_count();
951 dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
952 num_cores, MAX_CACHELINE);
953 if (!dpaa_intf->tx_queues)
956 for (loop = 0; loop < num_cores; loop++) {
957 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
961 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
963 dpaa_intf->nb_tx_queues = num_cores;
965 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
966 dpaa_debug_queue_init(&dpaa_intf->debug_queues[
967 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
968 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
969 dpaa_debug_queue_init(&dpaa_intf->debug_queues[
970 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
971 dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
974 DPAA_PMD_DEBUG("All frame queues created");
976 /* Get the initial configuration for flow control */
977 dpaa_fc_set_default(dpaa_intf);
979 /* reset bpool list, initialize bpool dynamically */
980 list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
985 /* Populate ethdev structure */
986 eth_dev->dev_ops = &dpaa_devops;
987 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
988 eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
990 /* Allocate memory for storing MAC addresses */
991 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
992 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
993 if (eth_dev->data->mac_addrs == NULL) {
994 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
995 "store MAC addresses",
996 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
997 rte_free(dpaa_intf->cgr_rx);
998 rte_free(dpaa_intf->rx_queues);
999 rte_free(dpaa_intf->tx_queues);
1000 dpaa_intf->rx_queues = NULL;
1001 dpaa_intf->tx_queues = NULL;
1002 dpaa_intf->nb_rx_queues = 0;
1003 dpaa_intf->nb_tx_queues = 0;
1007 /* copy the primary mac address */
1008 ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
1010 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1012 fman_intf->mac_addr.addr_bytes[0],
1013 fman_intf->mac_addr.addr_bytes[1],
1014 fman_intf->mac_addr.addr_bytes[2],
1015 fman_intf->mac_addr.addr_bytes[3],
1016 fman_intf->mac_addr.addr_bytes[4],
1017 fman_intf->mac_addr.addr_bytes[5]);
1019 /* Disable RX mode */
1020 fman_if_discard_rx_errors(fman_intf);
1021 fman_if_disable_rx(fman_intf);
1022 /* Disable promiscuous mode */
1023 fman_if_promiscuous_disable(fman_intf);
1024 /* Disable multicast */
1025 fman_if_reset_mcast_filter_table(fman_intf);
1026 /* Reset interface statistics */
1027 fman_if_stats_reset(fman_intf);
1033 dpaa_dev_uninit(struct rte_eth_dev *dev)
1035 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1038 PMD_INIT_FUNC_TRACE();
1040 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1044 DPAA_PMD_WARN("Already closed or not started");
1048 dpaa_eth_dev_close(dev);
1050 /* release configuration memory */
1051 if (dpaa_intf->fc_conf)
1052 rte_free(dpaa_intf->fc_conf);
1054 /* Release RX congestion Groups */
1055 if (dpaa_intf->cgr_rx) {
1056 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1057 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1059 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1060 dpaa_intf->nb_rx_queues);
1063 rte_free(dpaa_intf->cgr_rx);
1064 dpaa_intf->cgr_rx = NULL;
1066 rte_free(dpaa_intf->rx_queues);
1067 dpaa_intf->rx_queues = NULL;
1069 rte_free(dpaa_intf->tx_queues);
1070 dpaa_intf->tx_queues = NULL;
1072 /* free memory for storing MAC addresses */
1073 rte_free(dev->data->mac_addrs);
1074 dev->data->mac_addrs = NULL;
1076 dev->dev_ops = NULL;
1077 dev->rx_pkt_burst = NULL;
1078 dev->tx_pkt_burst = NULL;
1084 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1085 struct rte_dpaa_device *dpaa_dev)
1089 struct rte_eth_dev *eth_dev;
1091 PMD_INIT_FUNC_TRACE();
1093 /* In case of secondary process, the device is already configured
1094 * and no further action is required, except portal initialization
1095 * and verifying secondary attachment to port name.
1097 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1098 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1104 if (!is_global_init) {
1105 /* One time load of Qman/Bman drivers */
1106 ret = qman_global_init();
1108 DPAA_PMD_ERR("QMAN initialization failed: %d",
1112 ret = bman_global_init();
1114 DPAA_PMD_ERR("BMAN initialization failed: %d",
1122 ret = rte_dpaa_portal_init((void *)1);
1124 DPAA_PMD_ERR("Unable to initialize portal");
1128 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1129 if (eth_dev == NULL)
1132 eth_dev->data->dev_private = rte_zmalloc(
1133 "ethdev private structure",
1134 sizeof(struct dpaa_if),
1135 RTE_CACHE_LINE_SIZE);
1136 if (!eth_dev->data->dev_private) {
1137 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1138 rte_eth_dev_release_port(eth_dev);
1142 eth_dev->device = &dpaa_dev->device;
1143 eth_dev->device->driver = &dpaa_drv->driver;
1144 dpaa_dev->eth_dev = eth_dev;
1146 /* Invoke PMD device initialization function */
1147 diag = dpaa_dev_init(eth_dev);
1151 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1152 rte_free(eth_dev->data->dev_private);
1154 rte_eth_dev_release_port(eth_dev);
1159 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1161 struct rte_eth_dev *eth_dev;
1163 PMD_INIT_FUNC_TRACE();
1165 eth_dev = dpaa_dev->eth_dev;
1166 dpaa_dev_uninit(eth_dev);
1168 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1169 rte_free(eth_dev->data->dev_private);
1171 rte_eth_dev_release_port(eth_dev);
1176 static struct rte_dpaa_driver rte_dpaa_pmd = {
1177 .drv_type = FSL_DPAA_ETH,
1178 .probe = rte_dpaa_probe,
1179 .remove = rte_dpaa_remove,
1182 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);