1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_ethdev.h"
6 #include "cnxk_eventdev.h"
9 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
15 case RTE_EVENT_TYPE_ETHDEV: {
16 struct cnxk_eth_rxq_sp *rxq = data;
19 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
20 if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
24 dev->rx_adptr_pool_cnt++;
25 old_ptr = dev->rx_adptr_pools;
26 dev->rx_adptr_pools = rte_realloc(
28 sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
29 if (dev->rx_adptr_pools == NULL) {
30 dev->adptr_xae_cnt += rxq->qconf.mp->size;
31 dev->rx_adptr_pools = old_ptr;
32 dev->rx_adptr_pool_cnt--;
35 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
36 (uint64_t)rxq->qconf.mp;
38 dev->adptr_xae_cnt += rxq->qconf.mp->size;
41 case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
42 struct rte_mempool *mp = data;
45 for (i = 0; i < dev->vec_pool_cnt; i++) {
46 if ((uint64_t)mp == dev->vec_pools[i])
51 old_ptr = dev->vec_pools;
53 rte_realloc(dev->vec_pools,
54 sizeof(uint64_t) * dev->vec_pool_cnt, 0);
55 if (dev->vec_pools == NULL) {
56 dev->adptr_xae_cnt += mp->size;
57 dev->vec_pools = old_ptr;
61 dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
63 dev->adptr_xae_cnt += mp->size;
66 case RTE_EVENT_TYPE_TIMER: {
67 struct cnxk_tim_ring *timr = data;
68 uint16_t *old_ring_ptr;
71 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
72 if (timr->ring_id != dev->timer_adptr_rings[i])
74 if (timr->nb_timers == dev->timer_adptr_sz[i])
76 dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
77 dev->adptr_xae_cnt += timr->nb_timers;
78 dev->timer_adptr_sz[i] = timr->nb_timers;
83 dev->tim_adptr_ring_cnt++;
84 old_ring_ptr = dev->timer_adptr_rings;
85 old_sz_ptr = dev->timer_adptr_sz;
87 dev->timer_adptr_rings = rte_realloc(
88 dev->timer_adptr_rings,
89 sizeof(uint16_t) * dev->tim_adptr_ring_cnt, 0);
90 if (dev->timer_adptr_rings == NULL) {
91 dev->adptr_xae_cnt += timr->nb_timers;
92 dev->timer_adptr_rings = old_ring_ptr;
93 dev->tim_adptr_ring_cnt--;
97 dev->timer_adptr_sz = rte_realloc(
99 sizeof(uint64_t) * dev->tim_adptr_ring_cnt, 0);
101 if (dev->timer_adptr_sz == NULL) {
102 dev->adptr_xae_cnt += timr->nb_timers;
103 dev->timer_adptr_sz = old_sz_ptr;
104 dev->tim_adptr_ring_cnt--;
108 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
110 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
113 dev->adptr_xae_cnt += timr->nb_timers;
122 cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
123 uint16_t port_id, const struct rte_event *ev,
124 uint8_t custom_flowid)
126 struct roc_nix_rq *rq;
128 rq = &cnxk_eth_dev->rqs[rq_id];
130 rq->tt = ev->sched_type;
131 rq->hwgrp = ev->queue_id;
132 rq->flow_tag_width = 20;
134 rq->tag_mask = (port_id & 0xF) << 20;
135 rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
139 rq->flow_tag_width = 0;
140 rq->tag_mask |= ev->flow_id;
143 return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
147 cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
149 struct roc_nix_rq *rq;
151 rq = &cnxk_eth_dev->rqs[rq_id];
153 rq->flow_tag_width = 32;
156 return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
160 cnxk_sso_rx_adapter_queue_add(
161 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
163 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
165 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
166 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
167 uint16_t port = eth_dev->data->port_id;
168 struct cnxk_eth_rxq_sp *rxq_sp;
171 if (rx_queue_id < 0) {
172 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
173 rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
176 rxq_sp = cnxk_eth_rxq_to_sp(
177 eth_dev->data->rx_queues[rx_queue_id]);
178 cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
179 rc = cnxk_sso_xae_reconfigure(
180 (struct rte_eventdev *)(uintptr_t)event_dev);
181 rc |= cnxk_sso_rxq_enable(
182 cnxk_eth_dev, (uint16_t)rx_queue_id, port,
184 !!(queue_conf->rx_queue_flags &
185 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
186 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
187 rxq_sp->qconf.mp->pool_id, true,
192 plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
193 queue_conf->ev.queue_id);
197 dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
203 cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
204 const struct rte_eth_dev *eth_dev,
207 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
208 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
209 struct cnxk_eth_rxq_sp *rxq_sp;
212 RTE_SET_USED(event_dev);
213 if (rx_queue_id < 0) {
214 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
215 cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
217 rxq_sp = cnxk_eth_rxq_to_sp(
218 eth_dev->data->rx_queues[rx_queue_id]);
219 rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
220 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
221 rxq_sp->qconf.mp->pool_id, false,
226 plt_err("Failed to clear Rx adapter config port=%d, q=%d",
227 eth_dev->data->port_id, rx_queue_id);
233 cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
234 const struct rte_eth_dev *eth_dev)
236 RTE_SET_USED(event_dev);
237 RTE_SET_USED(eth_dev);
243 cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
244 const struct rte_eth_dev *eth_dev)
246 RTE_SET_USED(event_dev);
247 RTE_SET_USED(eth_dev);
253 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
255 return roc_npa_aura_limit_modify(
256 sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
260 cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
261 uint16_t eth_port_id, uint16_t tx_queue_id,
264 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
265 uint16_t max_port_id = dev->max_port_id;
266 uint64_t *txq_data = dev->tx_adptr_data;
268 if (txq_data == NULL || eth_port_id > max_port_id) {
269 max_port_id = RTE_MAX(max_port_id, eth_port_id);
270 txq_data = rte_realloc_socket(
272 (sizeof(uint64_t) * (max_port_id + 1) *
273 RTE_MAX_QUEUES_PER_PORT),
274 RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
275 if (txq_data == NULL)
279 ((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
280 txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
281 dev->max_port_id = max_port_id;
282 dev->tx_adptr_data = txq_data;
287 cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
288 const struct rte_eth_dev *eth_dev,
291 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
292 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
293 struct roc_nix_sq *sq;
297 if (tx_queue_id < 0) {
298 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
299 cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
301 txq = eth_dev->data->tx_queues[tx_queue_id];
302 sq = &cnxk_eth_dev->sqs[tx_queue_id];
303 cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
304 ret = cnxk_sso_updt_tx_queue_data(
305 event_dev, eth_dev->data->port_id, tx_queue_id, txq);
309 dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
316 cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
317 const struct rte_eth_dev *eth_dev,
320 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
321 struct roc_nix_sq *sq;
324 RTE_SET_USED(event_dev);
325 if (tx_queue_id < 0) {
326 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
327 cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
329 sq = &cnxk_eth_dev->sqs[tx_queue_id];
330 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
331 ret = cnxk_sso_updt_tx_queue_data(
332 event_dev, eth_dev->data->port_id, tx_queue_id, NULL);