return 0;
}
+static void
+cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cn10k_eth_txq *txq;
+ struct roc_nix_sq *sq;
+ int i;
+
+ if (tx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ cn10k_sso_txq_fc_update(eth_dev, i);
+ } else {
+ uint16_t sqes_per_sqb;
+
+ sq = &cnxk_eth_dev->sqs[tx_queue_id];
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+ sq->nb_sqb_bufs_adj =
+ sq->nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+ sqes_per_sqb;
+ if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+ (sqes_per_sqb - 1));
+ txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+ txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+ }
+}
+
static int
cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
}
dev->tx_offloads |= tx_offloads;
+ cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
rc = cn10k_sso_updt_tx_adptr_data(event_dev);
if (rc < 0)
return rc;
(BIT_ULL(48) - 1));
}
+static __rte_always_inline void
+cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
+{
+ while ((uint64_t)txq->nb_sqb_bufs_adj <=
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+ ;
+}
+
static __rte_always_inline void
cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type,
if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type)
ws->gw_rdata = roc_sso_hws_head_wait(ws->base);
+ cn10k_sso_txq_fc_wait(txq);
roc_lmt_submit_steorl(lmt_id, pa);
}
struct cn10k_eth_txq *txq;
struct rte_mbuf *m;
uintptr_t lmt_addr;
- uint16_t ref_cnt;
uint16_t lmt_id;
lmt_addr = ws->lmt_base;
}
m = ev->mbuf;
- ref_cnt = m->refcnt;
cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data,
flags);
- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
- if (ref_cnt > 1)
- return 1;
- }
-
- cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
- ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
return 1;
}
}
static void
-cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
- bool ena)
+cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cn9k_eth_txq *txq;
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cn9k_sso_txq_fc_update(eth_dev, i, ena);
+ cn9k_sso_txq_fc_update(eth_dev, i);
} else {
- uint16_t sq_limit;
+ uint16_t sqes_per_sqb;
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
- sq_limit =
- ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
- sq->nb_sqb_bufs;
- txq->nb_sqb_bufs_adj =
- sq_limit -
- RTE_ALIGN_MUL_CEIL(sq_limit,
- (1ULL << txq->sqes_per_sqb_log2)) /
- (1ULL << txq->sqes_per_sqb_log2);
+ sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+ sq->nb_sqb_bufs_adj =
+ sq->nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+ sqes_per_sqb;
+ if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+ (sqes_per_sqb - 1));
+ txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
}
}
}
dev->tx_offloads |= tx_offloads;
- cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
+ cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
rc = cn9k_sso_updt_tx_adptr_data(event_dev);
if (rc < 0)
return rc;
rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
if (rc < 0)
return rc;
- cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
+ cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
return cn9k_sso_updt_tx_adptr_data(event_dev);
}
static __rte_always_inline void
cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
{
- while (!((txq->nb_sqb_bufs_adj -
- __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
- << (txq)->sqes_per_sqb_log2))
+ while ((uint64_t)txq->nb_sqb_bufs_adj <=
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
;
}
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
-#define CNXK_SSO_SQB_LIMIT (0x180)
#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
static int
cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
{
- return roc_npa_aura_limit_modify(
- sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+ int rc;
+
+ if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+ rc = roc_npa_aura_limit_modify(
+ sq->aura_handle,
+ RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+ if (rc < 0)
+ return rc;
+
+ sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+ }
+ return 0;
}
static void
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct roc_nix_sq *sq;
- int i, ret;
+ int i, ret = 0;
void *txq;
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
+ ret |= cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev,
+ i);
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sq = &cnxk_eth_dev->sqs[tx_queue_id];
- cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
+ cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
ret = cnxk_sso_updt_tx_queue_data(
event_dev, eth_dev->data->port_id, tx_queue_id, txq);
if (ret < 0)
return ret;
}
+ if (ret < 0) {
+ plt_err("Failed to configure Tx adapter port=%d, q=%d",
+ eth_dev->data->port_id, tx_queue_id);
+ return ret;
+ }
+
return 0;
}
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct roc_nix_sq *sq;
- int i, ret;
+ int i, ret = 0;
RTE_SET_USED(event_dev);
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
+ ret |= cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev,
+ i);
} else {
sq = &cnxk_eth_dev->sqs[tx_queue_id];
cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
return ret;
}
+ if (ret < 0) {
+ plt_err("Failed to clear Tx adapter config port=%d, q=%d",
+ eth_dev->data->port_id, tx_queue_id);
+ return ret;
+ }
+
return 0;
}