/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(C) 2019 Marvell International Ltd.
+ * Copyright(C) 2019-2021 Marvell.
*/
#include "otx2_evdev.h"
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
- aq->op = NIX_AQ_INSTOP_INIT;
+ aq->op = NIX_AQ_INSTOP_WRITE;
aq->cq.ena = 1;
aq->cq.caching = 1;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
- otx2_err("Failed to init cq context");
+ otx2_err("Failed to enable cq context");
goto fail;
}
return otx2_mbox_process(npa_lf->mbox);
}
+static int
+sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
+ uint16_t eth_port_id, uint16_t tx_queue_id,
+ struct otx2_eth_txq *txq)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ int i;
+
+ for (i = 0; i < event_dev->data->nb_ports; i++) {
+ dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *old_dws;
+ struct otx2_ssogws_dual *dws;
+
+ old_dws = event_dev->data->ports[i];
+ dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
+ sizeof(struct otx2_ssogws_dual)
+ + RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
+ (dev->max_port_id + 1) *
+ RTE_MAX_QUEUES_PER_PORT),
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (dws == NULL)
+ return -ENOMEM;
+
+ /* First cache line is reserved for cookie */
+ dws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
+
+ ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
+ )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
+ (uint64_t)txq;
+ event_dev->data->ports[i] = dws;
+ } else {
+ struct otx2_ssogws *old_ws;
+ struct otx2_ssogws *ws;
+
+ old_ws = event_dev->data->ports[i];
+ ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
+ (dev->max_port_id + 1) *
+ RTE_MAX_QUEUES_PER_PORT),
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (ws == NULL)
+ return -ENOMEM;
+
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
+ ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
+ )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
+ (uint64_t)txq;
+ event_dev->data->ports[i] = ws;
+ }
+ }
+
+ return 0;
+}
+
int
otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
struct otx2_eth_txq *txq;
- int i;
+ int i, ret;
RTE_SET_USED(id);
if (tx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
sso_sqb_aura_limit_edit(txq->sqb_pool,
- OTX2_SSO_SQB_LIMIT);
+ OTX2_SSO_SQB_LIMIT);
+ ret = sso_add_tx_queue_data(event_dev,
+ eth_dev->data->port_id, i,
+ txq);
+ if (ret < 0)
+ return ret;
}
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
+ ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
+ tx_queue_id, txq);
+ if (ret < 0)
+ return ret;
}
dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;