PMD_INIT_FUNC_TRACE();
num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
- tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
+ if (priv->tx_conf_en)
+ tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
+ else
+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
RTE_CACHE_LINE_SIZE);
if (!mc_q) {
goto fail_tx;
}
+ if (priv->tx_conf_en) {
+ /*Setup tx confirmation queues*/
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ mc_q->eth_data = dev->data;
+ mc_q->tc_index = i;
+ mc_q->flow_id = 0;
+ priv->tx_conf_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
+ dpaa2_q->q_storage =
+ rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa2_q->q_storage)
+ goto fail_tx_conf;
+
+ memset(dpaa2_q->q_storage, 0,
+ sizeof(struct queue_storage_info_t));
+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ goto fail_tx_conf;
+ }
+ }
+
vq_id = 0;
for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
}
return 0;
+fail_tx_conf:
+ i -= 1;
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
+ rte_free(dpaa2_q->q_storage);
+ priv->tx_conf_vq[i--] = NULL;
+ }
+ i = priv->nb_tx_queues;
fail_tx:
i -= 1;
while (i >= 0) {
dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
rte_free(dpaa2_q->cscn);
}
+ if (priv->tx_conf_en) {
+ /* cleanup tx conf queue storage */
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)
+ priv->tx_conf_vq[i];
+ rte_free(dpaa2_q->q_storage);
+ }
+ }
/*free memory for all queues (RX+TX) */
rte_free(priv->rx_vq[0]);
priv->rx_vq[0] = NULL;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
priv->tx_vq[tx_queue_id];
+ struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
+ priv->tx_conf_vq[tx_queue_id];
struct fsl_mc_io *dpni = priv->hw;
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
- ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
- priv->token,
- DPNI_CONF_DISABLE);
+ if (priv->tx_conf_en)
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_AFFINE);
+ else
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_DISABLE);
if (ret) {
DPAA2_PMD_ERR("Error in set tx conf mode settings: "
"err=%d", ret);
}
dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+
+ if (priv->tx_conf_en) {
+ dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
+ options = options | DPNI_QUEUE_OPT_USER_CTX;
+ tx_conf_cfg.user_context = (size_t)(dpaa2_q);
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx conf flow: "
+ "tc_index=%d, flow=%d err=%d",
+ dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, ret);
+ return -1;
+ }
+
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
+ return -1;
+ }
+ dpaa2_tx_conf_q->fqid = qid.fqid;
+ }
return 0;
}
/* ... tx buffer layout ... */
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ if (priv->tx_conf_en) {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ layout.pass_timestamp = true;
+ } else {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ }
layout.pass_frame_status = 1;
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
/* ... tx-conf and error buffer layout ... */
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ if (priv->tx_conf_en) {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ layout.pass_timestamp = true;
+ } else {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ }
layout.pass_frame_status = 1;
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_eth_dev *eth_dev;
+ struct dpaa2_dev_priv *priv;
int diag;
if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
return 0;
}
+ priv = eth_dev->data->dev_private;
+ priv->tx_conf_en = 0;
+
rte_eth_dev_release_port(eth_dev);
return diag;
}
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*
*/
return num_rx;
}
+uint16_t dpaa2_dev_tx_conf(void *queue)
+{
+ /* Function receive frames for a given device and VQ */
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_tx_conf = 0, num_pulled;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd, *next_fd;
+ struct qbman_pull_desc pulldesc;
+ struct qbman_release_desc releasedesc;
+ uint32_t bpid;
+ uint64_t buf;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ do {
+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)(size_t)
+ (DPAA2_GET_FD_ADDR(next_fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ bpid = DPAA2_GET_FD_BPID(fd);
+
+ /* Create a release descriptor required for releasing
+ * buffers into QBMAN
+ */
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+ buf = DPAA2_GET_FD_ADDR(fd);
+ /* feed them to bman */
+ do {
+ ret = qbman_swp_release(swp, &releasedesc,
+ &buf, 1);
+ } while (ret == -EBUSY);
+
+ dq_storage++;
+ num_tx_conf++;
+ num_pulled++;
+ } while (pending);
+
+ /* Last VDQ provided all packets and more packets are requested */
+ } while (num_pulled == dpaa2_dqrr_size);
+
+ dpaa2_q->rx_pkts += num_tx_conf;
+
+ return num_tx_conf;
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/