1 /* * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_fslmc.h>
21 #include <fslmc_logs.h>
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_mempool.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <mc/fsl_dpmng.h>
27 #include "dpaa2_ethdev.h"
28 #include <fsl_qbman_debug.h>
30 struct rte_dpaa2_xstats_name_off {
31 char name[RTE_ETH_XSTATS_NAME_SIZE];
32 uint8_t page_id; /* dpni statistics page id */
33 uint8_t stats_id; /* stats id in the given page */
36 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
37 {"ingress_multicast_frames", 0, 2},
38 {"ingress_multicast_bytes", 0, 3},
39 {"ingress_broadcast_frames", 0, 4},
40 {"ingress_broadcast_bytes", 0, 5},
41 {"egress_multicast_frames", 1, 2},
42 {"egress_multicast_bytes", 1, 3},
43 {"egress_broadcast_frames", 1, 4},
44 {"egress_broadcast_bytes", 1, 5},
45 {"ingress_filtered_frames", 2, 0},
46 {"ingress_discarded_frames", 2, 1},
47 {"ingress_nobuffer_discards", 2, 2},
48 {"egress_discarded_frames", 2, 3},
49 {"egress_confirmed_frames", 2, 4},
52 static struct rte_dpaa2_driver rte_dpaa2_pmd;
53 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
54 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
55 int wait_to_complete);
56 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
57 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
58 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
61 * Atomically reads the link status information from global
62 * structure rte_eth_dev.
65 * - Pointer to the structure rte_eth_dev to read from.
66 * - Pointer to the buffer to be saved with the link status.
70 * - On failure, negative value.
73 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
74 struct rte_eth_link *link)
76 struct rte_eth_link *dst = link;
77 struct rte_eth_link *src = &dev->data->dev_link;
79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
80 *(uint64_t *)src) == 0)
87 * Atomically writes the link status information into global
88 * structure rte_eth_dev.
91 * - Pointer to the structure rte_eth_dev to read from.
92 * - Pointer to the buffer to be saved with the link status.
96 * - On failure, negative value.
99 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
100 struct rte_eth_link *link)
102 struct rte_eth_link *dst = &dev->data->dev_link;
103 struct rte_eth_link *src = link;
105 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
106 *(uint64_t *)src) == 0)
113 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
116 struct dpaa2_dev_priv *priv = dev->data->dev_private;
117 struct fsl_mc_io *dpni = priv->hw;
119 PMD_INIT_FUNC_TRACE();
122 RTE_LOG(ERR, PMD, "dpni is NULL\n");
127 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
128 priv->token, vlan_id);
130 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
131 priv->token, vlan_id);
134 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
135 ret, vlan_id, priv->hw_id);
141 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
143 struct dpaa2_dev_priv *priv = dev->data->dev_private;
144 struct fsl_mc_io *dpni = priv->hw;
147 PMD_INIT_FUNC_TRACE();
149 if (mask & ETH_VLAN_FILTER_MASK) {
150 /* VLAN Filter not avaialble */
151 if (!priv->max_vlan_filters) {
152 RTE_LOG(INFO, PMD, "VLAN filter not available\n");
156 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
157 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
160 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
163 RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
167 if (mask & ETH_VLAN_EXTEND_MASK) {
168 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
170 "VLAN extend offload not supported\n");
177 dpaa2_fw_version_get(struct rte_eth_dev *dev,
182 struct dpaa2_dev_priv *priv = dev->data->dev_private;
183 struct fsl_mc_io *dpni = priv->hw;
184 struct mc_soc_version mc_plat_info = {0};
185 struct mc_version mc_ver_info = {0};
187 PMD_INIT_FUNC_TRACE();
189 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
190 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
192 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
193 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
195 ret = snprintf(fw_version, fw_size,
200 mc_ver_info.revision);
202 ret += 1; /* add the size of '\0' */
203 if (fw_size < (uint32_t)ret)
210 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
212 struct dpaa2_dev_priv *priv = dev->data->dev_private;
214 PMD_INIT_FUNC_TRACE();
216 dev_info->if_index = priv->hw_id;
218 dev_info->max_mac_addrs = priv->max_mac_filters;
219 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
220 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
221 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
222 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
223 dev_info->rx_offload_capa =
224 DEV_RX_OFFLOAD_IPV4_CKSUM |
225 DEV_RX_OFFLOAD_UDP_CKSUM |
226 DEV_RX_OFFLOAD_TCP_CKSUM |
227 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
228 dev_info->tx_offload_capa =
229 DEV_TX_OFFLOAD_IPV4_CKSUM |
230 DEV_TX_OFFLOAD_UDP_CKSUM |
231 DEV_TX_OFFLOAD_TCP_CKSUM |
232 DEV_TX_OFFLOAD_SCTP_CKSUM |
233 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
234 dev_info->speed_capa = ETH_LINK_SPEED_1G |
235 ETH_LINK_SPEED_2_5G |
240 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
242 struct dpaa2_dev_priv *priv = dev->data->dev_private;
245 struct dpaa2_queue *mc_q, *mcq;
248 struct dpaa2_queue *dpaa2_q;
250 PMD_INIT_FUNC_TRACE();
252 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
253 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
254 RTE_CACHE_LINE_SIZE);
256 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
260 for (i = 0; i < priv->nb_rx_queues; i++) {
262 priv->rx_vq[i] = mc_q++;
263 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
264 dpaa2_q->q_storage = rte_malloc("dq_storage",
265 sizeof(struct queue_storage_info_t),
266 RTE_CACHE_LINE_SIZE);
267 if (!dpaa2_q->q_storage)
270 memset(dpaa2_q->q_storage, 0,
271 sizeof(struct queue_storage_info_t));
272 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
276 for (i = 0; i < priv->nb_tx_queues; i++) {
278 mc_q->flow_id = 0xffff;
279 priv->tx_vq[i] = mc_q++;
280 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
281 dpaa2_q->cscn = rte_malloc(NULL,
282 sizeof(struct qbman_result), 16);
288 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
289 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
290 mcq->tc_index = DPAA2_DEF_TC;
291 mcq->flow_id = dist_idx;
299 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
300 rte_free(dpaa2_q->cscn);
301 priv->tx_vq[i--] = NULL;
303 i = priv->nb_rx_queues;
306 mc_q = priv->rx_vq[0];
308 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
309 dpaa2_free_dq_storage(dpaa2_q->q_storage);
310 rte_free(dpaa2_q->q_storage);
311 priv->rx_vq[i--] = NULL;
318 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
320 struct dpaa2_dev_priv *priv = dev->data->dev_private;
321 struct fsl_mc_io *dpni = priv->hw;
322 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
323 int rx_ip_csum_offload = false;
326 PMD_INIT_FUNC_TRACE();
328 if (eth_conf->rxmode.jumbo_frame == 1) {
329 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
330 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
331 priv->token, eth_conf->rxmode.max_rx_pkt_len);
334 "unable to set mtu. check config\n");
342 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
343 ret = dpaa2_setup_flow_dist(dev,
344 eth_conf->rx_adv_conf.rss_conf.rss_hf);
346 PMD_INIT_LOG(ERR, "unable to set flow distribution."
347 "please check queue config\n");
352 if (eth_conf->rxmode.hw_ip_checksum)
353 rx_ip_csum_offload = true;
355 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
356 DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
358 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
362 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
363 DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
365 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
369 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
370 DPNI_OFF_TX_L3_CSUM, true);
372 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
376 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
377 DPNI_OFF_TX_L4_CSUM, true);
379 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
383 if (eth_conf->rxmode.hw_vlan_filter)
384 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
386 /* update the current status */
387 dpaa2_dev_link_update(dev, 0);
392 /* Function to setup RX flow information. It contains traffic class ID,
393 * flow ID, destination configuration etc.
396 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
397 uint16_t rx_queue_id,
398 uint16_t nb_rx_desc __rte_unused,
399 unsigned int socket_id __rte_unused,
400 const struct rte_eth_rxconf *rx_conf __rte_unused,
401 struct rte_mempool *mb_pool)
403 struct dpaa2_dev_priv *priv = dev->data->dev_private;
404 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
405 struct dpaa2_queue *dpaa2_q;
406 struct dpni_queue cfg;
412 PMD_INIT_FUNC_TRACE();
414 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
415 dev, rx_queue_id, mb_pool, rx_conf);
417 if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
418 bpid = mempool_to_bpid(mb_pool);
419 ret = dpaa2_attach_bp_list(priv,
420 rte_dpaa2_bpid_info[bpid].bp_list);
424 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
425 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
427 /*Get the flow id from given VQ id*/
428 flow_id = rx_queue_id % priv->nb_rx_queues;
429 memset(&cfg, 0, sizeof(struct dpni_queue));
431 options = options | DPNI_QUEUE_OPT_USER_CTX;
432 cfg.user_context = (uint64_t)(dpaa2_q);
434 /*if ls2088 or rev2 device, enable the stashing */
436 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
437 options |= DPNI_QUEUE_OPT_FLC;
438 cfg.flc.stash_control = true;
439 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
440 /* 00 00 00 - last 6 bit represent annotation, context stashing,
441 * data stashing setting 01 01 00 (0x14)
442 * (in following order ->DS AS CS)
443 * to enable 1 line data, 1 line annotation.
444 * For LX2, this setting should be 01 00 00 (0x10)
446 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
447 cfg.flc.value |= 0x10;
449 cfg.flc.value |= 0x14;
451 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
452 dpaa2_q->tc_index, flow_id, options, &cfg);
454 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
458 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
459 struct dpni_taildrop taildrop;
462 /*enabling per rx queue congestion control */
463 taildrop.threshold = CONG_THRESHOLD_RX_Q;
464 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
465 taildrop.oal = CONG_RX_OAL;
466 PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d",
468 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
469 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
470 dpaa2_q->tc_index, flow_id, &taildrop);
472 PMD_INIT_LOG(ERR, "Error in setting the rx flow"
473 " err : = %d\n", ret);
478 dev->data->rx_queues[rx_queue_id] = dpaa2_q;
483 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
484 uint16_t tx_queue_id,
485 uint16_t nb_tx_desc __rte_unused,
486 unsigned int socket_id __rte_unused,
487 const struct rte_eth_txconf *tx_conf __rte_unused)
489 struct dpaa2_dev_priv *priv = dev->data->dev_private;
490 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
491 priv->tx_vq[tx_queue_id];
492 struct fsl_mc_io *dpni = priv->hw;
493 struct dpni_queue tx_conf_cfg;
494 struct dpni_queue tx_flow_cfg;
495 uint8_t options = 0, flow_id;
499 PMD_INIT_FUNC_TRACE();
501 /* Return if queue already configured */
502 if (dpaa2_q->flow_id != 0xffff) {
503 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
507 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
508 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
513 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
514 tc_id, flow_id, options, &tx_flow_cfg);
516 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
517 "tc_id=%d, flow =%d ErrorCode = %x\n",
518 tc_id, flow_id, -ret);
522 dpaa2_q->flow_id = flow_id;
524 if (tx_queue_id == 0) {
525 /*Set tx-conf and error configuration*/
526 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
530 PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
531 " ErrorCode = %x", ret);
535 dpaa2_q->tc_index = tc_id;
537 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
538 struct dpni_congestion_notification_cfg cong_notif_cfg;
540 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
541 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
542 /* Notify that the queue is not congested when the data in
543 * the queue is below this thershold.
545 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
546 cong_notif_cfg.message_ctx = 0;
547 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
548 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
549 cong_notif_cfg.notification_mode =
550 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
551 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
552 DPNI_CONG_OPT_COHERENT_WRITE;
554 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
561 "Error in setting tx congestion notification: = %d",
566 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
571 dpaa2_dev_rx_queue_release(void *q __rte_unused)
573 PMD_INIT_FUNC_TRACE();
577 dpaa2_dev_tx_queue_release(void *q __rte_unused)
579 PMD_INIT_FUNC_TRACE();
583 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
586 struct dpaa2_dev_priv *priv = dev->data->dev_private;
587 struct dpaa2_queue *dpaa2_q;
588 struct qbman_swp *swp;
589 struct qbman_fq_query_np_rslt state;
590 uint32_t frame_cnt = 0;
592 PMD_INIT_FUNC_TRACE();
594 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
595 ret = dpaa2_affine_qbman_swp();
597 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
601 swp = DPAA2_PER_LCORE_PORTAL;
603 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
605 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
606 frame_cnt = qbman_fq_state_frame_count(&state);
607 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
608 rx_queue_id, frame_cnt);
613 static const uint32_t *
614 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
616 static const uint32_t ptypes[] = {
617 /*todo -= add more types */
620 RTE_PTYPE_L3_IPV4_EXT,
622 RTE_PTYPE_L3_IPV6_EXT,
630 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
636 * Dpaa2 link Interrupt handler
639 * The address of parameter (struct rte_eth_dev *) regsitered before.
645 dpaa2_interrupt_handler(void *param)
647 struct rte_eth_dev *dev = param;
648 struct dpaa2_dev_priv *priv = dev->data->dev_private;
649 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
651 int irq_index = DPNI_IRQ_INDEX;
652 unsigned int status = 0, clear = 0;
654 PMD_INIT_FUNC_TRACE();
657 RTE_LOG(ERR, PMD, "dpni is NULL");
661 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
664 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret);
669 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
670 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
671 dpaa2_dev_link_update(dev, 0);
672 /* calling all the apps registered for link status event */
673 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
677 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
680 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret);
684 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
687 struct dpaa2_dev_priv *priv = dev->data->dev_private;
688 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
689 int irq_index = DPNI_IRQ_INDEX;
690 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
692 PMD_INIT_FUNC_TRACE();
694 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
697 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err,
702 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
705 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err,
712 dpaa2_dev_start(struct rte_eth_dev *dev)
714 struct rte_device *rdev = dev->device;
715 struct rte_dpaa2_device *dpaa2_dev;
716 struct rte_eth_dev_data *data = dev->data;
717 struct dpaa2_dev_priv *priv = data->dev_private;
718 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
719 struct dpni_queue cfg;
720 struct dpni_error_cfg err_cfg;
722 struct dpni_queue_id qid;
723 struct dpaa2_queue *dpaa2_q;
725 struct rte_intr_handle *intr_handle;
727 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
728 intr_handle = &dpaa2_dev->intr_handle;
730 PMD_INIT_FUNC_TRACE();
732 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
734 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
739 /* Power up the phy. Needed to make the link go UP */
740 dpaa2_dev_set_link_up(dev);
742 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
743 DPNI_QUEUE_TX, &qdid);
745 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
750 for (i = 0; i < data->nb_rx_queues; i++) {
751 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
752 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
753 DPNI_QUEUE_RX, dpaa2_q->tc_index,
754 dpaa2_q->flow_id, &cfg, &qid);
756 PMD_INIT_LOG(ERR, "Error to get flow "
757 "information Error code = %d\n", ret);
760 dpaa2_q->fqid = qid.fqid;
763 /*checksum errors, send them to normal path and set it in annotation */
764 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
766 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
767 err_cfg.set_frame_annotation = true;
769 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
770 priv->token, &err_cfg);
772 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
777 /* if the interrupts were configured on this devices*/
778 if (intr_handle && (intr_handle->fd) &&
779 (dev->data->dev_conf.intr_conf.lsc != 0)) {
780 /* Registering LSC interrupt handler */
781 rte_intr_callback_register(intr_handle,
782 dpaa2_interrupt_handler,
785 /* enable vfio intr/eventfd mapping
786 * Interrupt index 0 is required, so we can not use
789 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
791 /* enable dpni_irqs */
792 dpaa2_eth_setup_irqs(dev, 1);
799 * This routine disables all traffic on the adapter by issuing a
800 * global reset on the MAC.
803 dpaa2_dev_stop(struct rte_eth_dev *dev)
805 struct dpaa2_dev_priv *priv = dev->data->dev_private;
806 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
808 struct rte_eth_link link;
809 struct rte_intr_handle *intr_handle = dev->intr_handle;
811 PMD_INIT_FUNC_TRACE();
813 /* reset interrupt callback */
814 if (intr_handle && (intr_handle->fd) &&
815 (dev->data->dev_conf.intr_conf.lsc != 0)) {
816 /*disable dpni irqs */
817 dpaa2_eth_setup_irqs(dev, 0);
819 /* disable vfio intr before callback unregister */
820 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
822 /* Unregistering LSC interrupt handler */
823 rte_intr_callback_unregister(intr_handle,
824 dpaa2_interrupt_handler,
828 dpaa2_dev_set_link_down(dev);
830 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
832 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
837 /* clear the recorded link status */
838 memset(&link, 0, sizeof(link));
839 dpaa2_dev_atomic_write_link_status(dev, &link);
843 dpaa2_dev_close(struct rte_eth_dev *dev)
845 struct rte_eth_dev_data *data = dev->data;
846 struct dpaa2_dev_priv *priv = dev->data->dev_private;
847 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
849 struct rte_eth_link link;
850 struct dpaa2_queue *dpaa2_q;
852 PMD_INIT_FUNC_TRACE();
854 for (i = 0; i < data->nb_tx_queues; i++) {
855 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
856 if (!dpaa2_q->cscn) {
857 rte_free(dpaa2_q->cscn);
858 dpaa2_q->cscn = NULL;
862 /* Clean the device first */
863 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
865 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
866 " error code %d\n", ret);
870 memset(&link, 0, sizeof(link));
871 dpaa2_dev_atomic_write_link_status(dev, &link);
875 dpaa2_dev_promiscuous_enable(
876 struct rte_eth_dev *dev)
879 struct dpaa2_dev_priv *priv = dev->data->dev_private;
880 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
882 PMD_INIT_FUNC_TRACE();
885 RTE_LOG(ERR, PMD, "dpni is NULL\n");
889 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
891 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
893 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
895 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
899 dpaa2_dev_promiscuous_disable(
900 struct rte_eth_dev *dev)
903 struct dpaa2_dev_priv *priv = dev->data->dev_private;
904 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
906 PMD_INIT_FUNC_TRACE();
909 RTE_LOG(ERR, PMD, "dpni is NULL\n");
913 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
915 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
917 if (dev->data->all_multicast == 0) {
918 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
922 "Unable to disable M promisc mode %d\n",
928 dpaa2_dev_allmulticast_enable(
929 struct rte_eth_dev *dev)
932 struct dpaa2_dev_priv *priv = dev->data->dev_private;
933 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
935 PMD_INIT_FUNC_TRACE();
938 RTE_LOG(ERR, PMD, "dpni is NULL\n");
942 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
944 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
948 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
951 struct dpaa2_dev_priv *priv = dev->data->dev_private;
952 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
954 PMD_INIT_FUNC_TRACE();
957 RTE_LOG(ERR, PMD, "dpni is NULL\n");
961 /* must remain on for all promiscuous */
962 if (dev->data->promiscuous == 1)
965 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
967 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
971 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
974 struct dpaa2_dev_priv *priv = dev->data->dev_private;
975 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
976 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
979 PMD_INIT_FUNC_TRACE();
982 RTE_LOG(ERR, PMD, "dpni is NULL\n");
986 /* check that mtu is within the allowed range */
987 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
990 if (frame_size > ETHER_MAX_LEN)
991 dev->data->dev_conf.rxmode.jumbo_frame = 1;
993 dev->data->dev_conf.rxmode.jumbo_frame = 0;
995 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
997 /* Set the Max Rx frame length as 'mtu' +
998 * Maximum Ethernet header length
1000 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1003 PMD_DRV_LOG(ERR, "setting the max frame length failed");
1006 PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu);
1011 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1012 struct ether_addr *addr,
1013 __rte_unused uint32_t index,
1014 __rte_unused uint32_t pool)
1017 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1018 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1020 PMD_INIT_FUNC_TRACE();
1023 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1027 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1028 priv->token, addr->addr_bytes);
1031 "error: Adding the MAC ADDR failed: err = %d\n", ret);
1036 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1040 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1041 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1042 struct rte_eth_dev_data *data = dev->data;
1043 struct ether_addr *macaddr;
1045 PMD_INIT_FUNC_TRACE();
1047 macaddr = &data->mac_addrs[index];
1050 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1054 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1055 priv->token, macaddr->addr_bytes);
1058 "error: Removing the MAC ADDR failed: err = %d\n", ret);
1062 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1063 struct ether_addr *addr)
1066 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1067 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1069 PMD_INIT_FUNC_TRACE();
1072 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1076 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1077 priv->token, addr->addr_bytes);
1081 "error: Setting the MAC ADDR failed %d\n", ret);
1084 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1085 struct rte_eth_stats *stats)
1087 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1088 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1090 uint8_t page0 = 0, page1 = 1, page2 = 2;
1091 union dpni_statistics value;
1093 memset(&value, 0, sizeof(union dpni_statistics));
1095 PMD_INIT_FUNC_TRACE();
1098 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1103 RTE_LOG(ERR, PMD, "stats is NULL\n");
1107 /*Get Counters from page_0*/
1108 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1113 stats->ipackets = value.page_0.ingress_all_frames;
1114 stats->ibytes = value.page_0.ingress_all_bytes;
1116 /*Get Counters from page_1*/
1117 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1122 stats->opackets = value.page_1.egress_all_frames;
1123 stats->obytes = value.page_1.egress_all_bytes;
1125 /*Get Counters from page_2*/
1126 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1131 /* Ingress drop frame count due to configured rules */
1132 stats->ierrors = value.page_2.ingress_filtered_frames;
1133 /* Ingress drop frame count due to error */
1134 stats->ierrors += value.page_2.ingress_discarded_frames;
1136 stats->oerrors = value.page_2.egress_discarded_frames;
1137 stats->imissed = value.page_2.ingress_nobuffer_discards;
1142 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
1147 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1150 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1151 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1153 union dpni_statistics value[3] = {};
1154 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1162 /* Get Counters from page_0*/
1163 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1168 /* Get Counters from page_1*/
1169 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1174 /* Get Counters from page_2*/
1175 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1180 for (i = 0; i < num; i++) {
1182 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1183 raw.counter[dpaa2_xstats_strings[i].stats_id];
1187 RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode);
1192 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1193 struct rte_eth_xstat_name *xstats_names,
1194 __rte_unused unsigned int limit)
1196 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1198 if (xstats_names != NULL)
1199 for (i = 0; i < stat_cnt; i++)
1200 snprintf(xstats_names[i].name,
1201 sizeof(xstats_names[i].name),
1203 dpaa2_xstats_strings[i].name);
1209 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1210 uint64_t *values, unsigned int n)
1212 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1213 uint64_t values_copy[stat_cnt];
1216 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1217 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1219 union dpni_statistics value[3] = {};
1227 /* Get Counters from page_0*/
1228 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1233 /* Get Counters from page_1*/
1234 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1239 /* Get Counters from page_2*/
1240 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1245 for (i = 0; i < stat_cnt; i++) {
1246 values[i] = value[dpaa2_xstats_strings[i].page_id].
1247 raw.counter[dpaa2_xstats_strings[i].stats_id];
1252 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1254 for (i = 0; i < n; i++) {
1255 if (ids[i] >= stat_cnt) {
1256 PMD_INIT_LOG(ERR, "id value isn't valid");
1259 values[i] = values_copy[ids[i]];
1265 dpaa2_xstats_get_names_by_id(
1266 struct rte_eth_dev *dev,
1267 struct rte_eth_xstat_name *xstats_names,
1268 const uint64_t *ids,
1271 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1272 struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1275 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1277 dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1279 for (i = 0; i < limit; i++) {
1280 if (ids[i] >= stat_cnt) {
1281 PMD_INIT_LOG(ERR, "id value isn't valid");
1284 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1290 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1292 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1293 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1296 PMD_INIT_FUNC_TRACE();
1299 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1303 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1310 RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
1314 /* return 0 means link status changed, -1 means not changed */
1316 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1317 int wait_to_complete __rte_unused)
1320 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1321 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1322 struct rte_eth_link link, old;
1323 struct dpni_link_state state = {0};
1326 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1329 memset(&old, 0, sizeof(old));
1330 dpaa2_dev_atomic_read_link_status(dev, &old);
1332 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1334 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1338 if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
1339 RTE_LOG(DEBUG, PMD, "No change in status\n");
1343 memset(&link, 0, sizeof(struct rte_eth_link));
1344 link.link_status = state.up;
1345 link.link_speed = state.rate;
1347 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1348 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1350 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1352 dpaa2_dev_atomic_write_link_status(dev, &link);
1354 if (link.link_status)
1355 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
1357 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id);
1362 * Toggle the DPNI to enable, if not already enabled.
1363 * This is not strictly PHY up/down - it is more of logical toggling.
1366 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1369 struct dpaa2_dev_priv *priv;
1370 struct fsl_mc_io *dpni;
1372 struct dpni_link_state state = {0};
1374 priv = dev->data->dev_private;
1375 dpni = (struct fsl_mc_io *)priv->hw;
1378 RTE_LOG(ERR, PMD, "DPNI is NULL\n");
1382 /* Check if DPNI is currently enabled */
1383 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1385 /* Unable to obtain dpni status; Not continuing */
1386 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1390 /* Enable link if not already enabled */
1392 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1394 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1398 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1400 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1404 /* changing tx burst function to start enqueues */
1405 dev->tx_pkt_burst = dpaa2_dev_tx;
1406 dev->data->dev_link.link_status = state.up;
1409 PMD_DRV_LOG(INFO, "Port %d Link is set as UP",
1410 dev->data->port_id);
1412 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id);
1417 * Toggle the DPNI to disable, if not already disabled.
1418 * This is not strictly PHY up/down - it is more of logical toggling.
1421 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1424 struct dpaa2_dev_priv *priv;
1425 struct fsl_mc_io *dpni;
1426 int dpni_enabled = 0;
1429 PMD_INIT_FUNC_TRACE();
1431 priv = dev->data->dev_private;
1432 dpni = (struct fsl_mc_io *)priv->hw;
1435 RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
1439 /*changing tx burst function to avoid any more enqueues */
1440 dev->tx_pkt_burst = dummy_dev_tx;
1442 /* Loop while dpni_disable() attempts to drain the egress FQs
1443 * and confirm them back to us.
1446 ret = dpni_disable(dpni, 0, priv->token);
1448 PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
1451 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1453 PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
1457 /* Allow the MC some slack */
1458 rte_delay_us(100 * 1000);
1459 } while (dpni_enabled && --retries);
1462 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
1463 /* todo- we may have to manually cleanup queues.
1466 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
1467 dev->data->port_id);
1470 dev->data->dev_link.link_status = 0;
1476 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1479 struct dpaa2_dev_priv *priv;
1480 struct fsl_mc_io *dpni;
1481 struct dpni_link_state state = {0};
1483 PMD_INIT_FUNC_TRACE();
1485 priv = dev->data->dev_private;
1486 dpni = (struct fsl_mc_io *)priv->hw;
1488 if (dpni == NULL || fc_conf == NULL) {
1489 RTE_LOG(ERR, PMD, "device not configured\n");
1493 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1495 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1499 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1500 if (state.options & DPNI_LINK_OPT_PAUSE) {
1501 /* DPNI_LINK_OPT_PAUSE set
1502 * if ASYM_PAUSE not set,
1503 * RX Side flow control (handle received Pause frame)
1504 * TX side flow control (send Pause frame)
1505 * if ASYM_PAUSE set,
1506 * RX Side flow control (handle received Pause frame)
1507 * No TX side flow control (send Pause frame disabled)
1509 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1510 fc_conf->mode = RTE_FC_FULL;
1512 fc_conf->mode = RTE_FC_RX_PAUSE;
1514 /* DPNI_LINK_OPT_PAUSE not set
1515 * if ASYM_PAUSE set,
1516 * TX side flow control (send Pause frame)
1517 * No RX side flow control (No action on pause frame rx)
1518 * if ASYM_PAUSE not set,
1519 * Flow control disabled
1521 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1522 fc_conf->mode = RTE_FC_TX_PAUSE;
1524 fc_conf->mode = RTE_FC_NONE;
1531 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1534 struct dpaa2_dev_priv *priv;
1535 struct fsl_mc_io *dpni;
1536 struct dpni_link_state state = {0};
1537 struct dpni_link_cfg cfg = {0};
1539 PMD_INIT_FUNC_TRACE();
1541 priv = dev->data->dev_private;
1542 dpni = (struct fsl_mc_io *)priv->hw;
1545 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1549 /* It is necessary to obtain the current state before setting fc_conf
1550 * as MC would return error in case rate, autoneg or duplex values are
1553 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1555 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
1559 /* Disable link before setting configuration */
1560 dpaa2_dev_set_link_down(dev);
1562 /* Based on fc_conf, update cfg */
1563 cfg.rate = state.rate;
1564 cfg.options = state.options;
1566 /* update cfg with fc_conf */
1567 switch (fc_conf->mode) {
1569 /* Full flow control;
1570 * OPT_PAUSE set, ASYM_PAUSE not set
1572 cfg.options |= DPNI_LINK_OPT_PAUSE;
1573 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1575 case RTE_FC_TX_PAUSE:
1576 /* Enable RX flow control
1577 * OPT_PAUSE not set;
1580 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1581 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1583 case RTE_FC_RX_PAUSE:
1584 /* Enable TX Flow control
1588 cfg.options |= DPNI_LINK_OPT_PAUSE;
1589 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1592 /* Disable Flow control
1594 * ASYM_PAUSE not set
1596 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1597 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1600 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
1605 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1608 "Unable to set Link configuration (err=%d)\n",
1612 dpaa2_dev_set_link_up(dev);
1618 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1619 struct rte_eth_rss_conf *rss_conf)
1621 struct rte_eth_dev_data *data = dev->data;
1622 struct rte_eth_conf *eth_conf = &data->dev_conf;
1625 PMD_INIT_FUNC_TRACE();
1627 if (rss_conf->rss_hf) {
1628 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1630 PMD_INIT_LOG(ERR, "unable to set flow dist");
1634 ret = dpaa2_remove_flow_dist(dev, 0);
1636 PMD_INIT_LOG(ERR, "unable to remove flow dist");
1640 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1645 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1646 struct rte_eth_rss_conf *rss_conf)
1648 struct rte_eth_dev_data *data = dev->data;
1649 struct rte_eth_conf *eth_conf = &data->dev_conf;
1651 /* dpaa2 does not support rss_key, so length should be 0*/
1652 rss_conf->rss_key_len = 0;
1653 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1657 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1658 int eth_rx_queue_id,
1660 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1662 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1663 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1664 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1665 uint8_t flow_id = dpaa2_ethq->flow_id;
1666 struct dpni_queue cfg;
1670 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1671 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1672 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1673 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1677 memset(&cfg, 0, sizeof(struct dpni_queue));
1678 options = DPNI_QUEUE_OPT_DEST;
1679 cfg.destination.type = DPNI_DEST_DPCON;
1680 cfg.destination.id = dpcon_id;
1681 cfg.destination.priority = queue_conf->ev.priority;
1683 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1684 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1685 cfg.destination.hold_active = 1;
1688 options |= DPNI_QUEUE_OPT_USER_CTX;
1689 cfg.user_context = (uint64_t)(dpaa2_ethq);
1691 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1692 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1694 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
1698 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1703 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1704 int eth_rx_queue_id)
1706 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1707 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1708 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1709 uint8_t flow_id = dpaa2_ethq->flow_id;
1710 struct dpni_queue cfg;
1714 memset(&cfg, 0, sizeof(struct dpni_queue));
1715 options = DPNI_QUEUE_OPT_DEST;
1716 cfg.destination.type = DPNI_DEST_NONE;
1718 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1719 dpaa2_ethq->tc_index, flow_id, options, &cfg);
1721 RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
1726 static struct eth_dev_ops dpaa2_ethdev_ops = {
1727 .dev_configure = dpaa2_eth_dev_configure,
1728 .dev_start = dpaa2_dev_start,
1729 .dev_stop = dpaa2_dev_stop,
1730 .dev_close = dpaa2_dev_close,
1731 .promiscuous_enable = dpaa2_dev_promiscuous_enable,
1732 .promiscuous_disable = dpaa2_dev_promiscuous_disable,
1733 .allmulticast_enable = dpaa2_dev_allmulticast_enable,
1734 .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1735 .dev_set_link_up = dpaa2_dev_set_link_up,
1736 .dev_set_link_down = dpaa2_dev_set_link_down,
1737 .link_update = dpaa2_dev_link_update,
1738 .stats_get = dpaa2_dev_stats_get,
1739 .xstats_get = dpaa2_dev_xstats_get,
1740 .xstats_get_by_id = dpaa2_xstats_get_by_id,
1741 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1742 .xstats_get_names = dpaa2_xstats_get_names,
1743 .stats_reset = dpaa2_dev_stats_reset,
1744 .xstats_reset = dpaa2_dev_stats_reset,
1745 .fw_version_get = dpaa2_fw_version_get,
1746 .dev_infos_get = dpaa2_dev_info_get,
1747 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1748 .mtu_set = dpaa2_dev_mtu_set,
1749 .vlan_filter_set = dpaa2_vlan_filter_set,
1750 .vlan_offload_set = dpaa2_vlan_offload_set,
1751 .rx_queue_setup = dpaa2_dev_rx_queue_setup,
1752 .rx_queue_release = dpaa2_dev_rx_queue_release,
1753 .tx_queue_setup = dpaa2_dev_tx_queue_setup,
1754 .tx_queue_release = dpaa2_dev_tx_queue_release,
1755 .rx_queue_count = dpaa2_dev_rx_queue_count,
1756 .flow_ctrl_get = dpaa2_flow_ctrl_get,
1757 .flow_ctrl_set = dpaa2_flow_ctrl_set,
1758 .mac_addr_add = dpaa2_dev_add_mac_addr,
1759 .mac_addr_remove = dpaa2_dev_remove_mac_addr,
1760 .mac_addr_set = dpaa2_dev_set_mac_addr,
1761 .rss_hash_update = dpaa2_dev_rss_hash_update,
1762 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
1766 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1768 struct rte_device *dev = eth_dev->device;
1769 struct rte_dpaa2_device *dpaa2_dev;
1770 struct fsl_mc_io *dpni_dev;
1771 struct dpni_attr attr;
1772 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1773 struct dpni_buffer_layout layout;
1776 PMD_INIT_FUNC_TRACE();
1778 /* For secondary processes, the primary has done all the work */
1779 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1782 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1784 hw_id = dpaa2_dev->object_id;
1786 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1788 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
1792 dpni_dev->regs = rte_mcp_ptr_list[0];
1793 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1796 "Failure in opening dpni@%d with err code %d\n",
1802 /* Clean the device first */
1803 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1806 "Failure cleaning dpni@%d with err code %d\n",
1811 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1814 "Failure in get dpni@%d attribute, err code %d\n",
1819 priv->num_rx_tc = attr.num_rx_tcs;
1821 /* Resetting the "num_rx_queues" to equal number of queues in first TC
1822 * as only one TC is supported on Rx Side. Once Multiple TCs will be
1823 * in use for Rx processing then this will be changed or removed.
1825 priv->nb_rx_queues = attr.num_queues;
1827 /* Using number of TX queues as number of TX TCs */
1828 priv->nb_tx_queues = attr.num_tx_tcs;
1830 PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
1831 priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues);
1833 priv->hw = dpni_dev;
1834 priv->hw_id = hw_id;
1835 priv->options = attr.options;
1836 priv->max_mac_filters = attr.mac_filter_entries;
1837 priv->max_vlan_filters = attr.vlan_filter_entries;
1840 /* Allocate memory for hardware structure for queues */
1841 ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1843 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
1847 /* Allocate memory for storing MAC addresses */
1848 eth_dev->data->mac_addrs = rte_zmalloc("dpni",
1849 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
1850 if (eth_dev->data->mac_addrs == NULL) {
1852 "Failed to allocate %d bytes needed to store MAC addresses",
1853 ETHER_ADDR_LEN * attr.mac_filter_entries);
1858 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1860 (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
1862 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
1867 /* ... tx buffer layout ... */
1868 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1869 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1870 layout.pass_frame_status = 1;
1871 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1872 DPNI_QUEUE_TX, &layout);
1874 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
1879 /* ... tx-conf and error buffer layout ... */
1880 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1881 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1882 layout.pass_frame_status = 1;
1883 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1884 DPNI_QUEUE_TX_CONFIRM, &layout);
1886 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
1891 eth_dev->dev_ops = &dpaa2_ethdev_ops;
1893 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1894 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1895 rte_fslmc_vfio_dmamap();
1897 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
1900 dpaa2_dev_uninit(eth_dev);
1905 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
1907 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1908 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1910 struct dpaa2_queue *dpaa2_q;
1912 PMD_INIT_FUNC_TRACE();
1914 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1918 PMD_INIT_LOG(WARNING, "Already closed or not started");
1922 dpaa2_dev_close(eth_dev);
1924 if (priv->rx_vq[0]) {
1925 /* cleaning up queue storage */
1926 for (i = 0; i < priv->nb_rx_queues; i++) {
1927 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1928 if (dpaa2_q->q_storage)
1929 rte_free(dpaa2_q->q_storage);
1931 /*free the all queue memory */
1932 rte_free(priv->rx_vq[0]);
1933 priv->rx_vq[0] = NULL;
1936 /* free memory for storing MAC addresses */
1937 if (eth_dev->data->mac_addrs) {
1938 rte_free(eth_dev->data->mac_addrs);
1939 eth_dev->data->mac_addrs = NULL;
1942 /* Close the device at underlying layer*/
1943 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1946 "Failure closing dpni device with err code %d\n",
1950 /* Free the allocated memory for ethernet private data and dpni*/
1954 eth_dev->dev_ops = NULL;
1955 eth_dev->rx_pkt_burst = NULL;
1956 eth_dev->tx_pkt_burst = NULL;
1958 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
1963 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
1964 struct rte_dpaa2_device *dpaa2_dev)
1966 struct rte_eth_dev *eth_dev;
1969 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1970 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
1973 eth_dev->data->dev_private = rte_zmalloc(
1974 "ethdev private structure",
1975 sizeof(struct dpaa2_dev_priv),
1976 RTE_CACHE_LINE_SIZE);
1977 if (eth_dev->data->dev_private == NULL) {
1978 PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
1979 " private port data\n");
1980 rte_eth_dev_release_port(eth_dev);
1984 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
1989 eth_dev->device = &dpaa2_dev->device;
1990 eth_dev->device->driver = &dpaa2_drv->driver;
1992 dpaa2_dev->eth_dev = eth_dev;
1993 eth_dev->data->rx_mbuf_alloc_failed = 0;
1995 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
1996 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1998 /* Invoke PMD device initialization function */
1999 diag = dpaa2_dev_init(eth_dev);
2003 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2004 rte_free(eth_dev->data->dev_private);
2005 rte_eth_dev_release_port(eth_dev);
2010 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2012 struct rte_eth_dev *eth_dev;
2014 eth_dev = dpaa2_dev->eth_dev;
2015 dpaa2_dev_uninit(eth_dev);
2017 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2018 rte_free(eth_dev->data->dev_private);
2019 rte_eth_dev_release_port(eth_dev);
2024 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2025 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2026 .drv_type = DPAA2_ETH,
2027 .probe = rte_dpaa2_probe,
2028 .remove = rte_dpaa2_remove,
2031 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);