1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_ethdev.h"
6 #include "cnxk_eventdev.h"
9 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
15 case RTE_EVENT_TYPE_ETHDEV: {
16 struct cnxk_eth_rxq_sp *rxq = data;
19 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
20 if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
24 dev->rx_adptr_pool_cnt++;
25 old_ptr = dev->rx_adptr_pools;
26 dev->rx_adptr_pools = rte_realloc(
28 sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
29 if (dev->rx_adptr_pools == NULL) {
30 dev->adptr_xae_cnt += rxq->qconf.mp->size;
31 dev->rx_adptr_pools = old_ptr;
32 dev->rx_adptr_pool_cnt--;
35 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
36 (uint64_t)rxq->qconf.mp;
38 dev->adptr_xae_cnt += rxq->qconf.mp->size;
41 case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
42 struct rte_mempool *mp = data;
45 for (i = 0; i < dev->vec_pool_cnt; i++) {
46 if ((uint64_t)mp == dev->vec_pools[i])
51 old_ptr = dev->vec_pools;
53 rte_realloc(dev->vec_pools,
54 sizeof(uint64_t) * dev->vec_pool_cnt, 0);
55 if (dev->vec_pools == NULL) {
56 dev->adptr_xae_cnt += mp->size;
57 dev->vec_pools = old_ptr;
61 dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
63 dev->adptr_xae_cnt += mp->size;
66 case RTE_EVENT_TYPE_TIMER: {
67 struct cnxk_tim_ring *timr = data;
68 uint16_t *old_ring_ptr;
71 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
72 if (timr->ring_id != dev->timer_adptr_rings[i])
74 if (timr->nb_timers == dev->timer_adptr_sz[i])
76 dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
77 dev->adptr_xae_cnt += timr->nb_timers;
78 dev->timer_adptr_sz[i] = timr->nb_timers;
83 dev->tim_adptr_ring_cnt++;
84 old_ring_ptr = dev->timer_adptr_rings;
85 old_sz_ptr = dev->timer_adptr_sz;
87 dev->timer_adptr_rings = rte_realloc(
88 dev->timer_adptr_rings,
89 sizeof(uint16_t) * dev->tim_adptr_ring_cnt, 0);
90 if (dev->timer_adptr_rings == NULL) {
91 dev->adptr_xae_cnt += timr->nb_timers;
92 dev->timer_adptr_rings = old_ring_ptr;
93 dev->tim_adptr_ring_cnt--;
97 dev->timer_adptr_sz = rte_realloc(
99 sizeof(uint64_t) * dev->tim_adptr_ring_cnt, 0);
101 if (dev->timer_adptr_sz == NULL) {
102 dev->adptr_xae_cnt += timr->nb_timers;
103 dev->timer_adptr_sz = old_sz_ptr;
104 dev->tim_adptr_ring_cnt--;
108 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
110 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
113 dev->adptr_xae_cnt += timr->nb_timers;
122 cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
123 uint16_t port_id, const struct rte_event *ev,
124 uint8_t custom_flowid)
126 struct roc_nix *nix = &cnxk_eth_dev->nix;
127 struct roc_nix_rq *rq;
130 rq = &cnxk_eth_dev->rqs[rq_id];
132 rq->tt = ev->sched_type;
133 rq->hwgrp = ev->queue_id;
134 rq->flow_tag_width = 20;
136 rq->tag_mask = (port_id & 0xF) << 20;
137 rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
141 rq->flow_tag_width = 0;
142 rq->tag_mask |= ev->flow_id;
145 rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
149 if (rq_id == 0 && roc_nix_inl_inb_is_enabled(nix)) {
150 uint32_t sec_tag_const;
152 /* IPSec tag const is 8-bit left shifted value of tag_mask
153 * as it applies to bit 32:8 of tag only.
155 sec_tag_const = rq->tag_mask >> 8;
156 rc = roc_nix_inl_inb_tag_update(nix, sec_tag_const,
159 plt_err("Failed to set tag conf for ipsec, rc=%d", rc);
166 cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
168 struct roc_nix_rq *rq;
170 rq = &cnxk_eth_dev->rqs[rq_id];
172 rq->flow_tag_width = 32;
175 return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
179 cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
180 uint16_t port_id, uint16_t rq_id, uint16_t sz,
181 uint64_t tmo_ns, struct rte_mempool *vmp)
183 struct roc_nix_rq *rq;
185 rq = &cnxk_eth_dev->rqs[rq_id];
189 if (rq->flow_tag_width == 0)
193 rq->vwqe_first_skip = 0;
194 rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
195 rq->vwqe_max_sz_exp = rte_log2_u32(sz);
198 ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
199 rq->tag_mask = (port_id & 0xF) << 20;
201 (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
204 return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
208 cnxk_sso_rx_adapter_queue_add(
209 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
211 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
213 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
214 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
215 uint16_t port = eth_dev->data->port_id;
216 struct cnxk_eth_rxq_sp *rxq_sp;
219 if (rx_queue_id < 0) {
220 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
221 rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
224 rxq_sp = cnxk_eth_rxq_to_sp(
225 eth_dev->data->rx_queues[rx_queue_id]);
226 cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
227 rc = cnxk_sso_xae_reconfigure(
228 (struct rte_eventdev *)(uintptr_t)event_dev);
229 rc |= cnxk_sso_rxq_enable(
230 cnxk_eth_dev, (uint16_t)rx_queue_id, port,
232 !!(queue_conf->rx_queue_flags &
233 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
234 if (queue_conf->rx_queue_flags &
235 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
236 cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
237 RTE_EVENT_TYPE_ETHDEV_VECTOR);
238 rc |= cnxk_sso_xae_reconfigure(
239 (struct rte_eventdev *)(uintptr_t)event_dev);
240 rc |= cnxk_sso_rx_adapter_vwqe_enable(
241 cnxk_eth_dev, port, rx_queue_id,
242 queue_conf->vector_sz,
243 queue_conf->vector_timeout_ns,
244 queue_conf->vector_mp);
246 if (cnxk_eth_dev->vec_drop_re_dis)
247 rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
250 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
251 rxq_sp->qconf.mp->pool_id, true,
253 cnxk_eth_dev->nb_rxq_sso++;
257 plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
258 queue_conf->ev.queue_id);
262 dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
264 /* Switch to use PF/VF's NIX LF instead of inline device for inbound
265 * when all the RQ's are switched to event dev mode. We do this only
266 * when using inline device is not forced by dev args.
268 if (!cnxk_eth_dev->inb.force_inl_dev &&
269 cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
270 cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
276 cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
277 const struct rte_eth_dev *eth_dev,
280 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
281 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
282 struct cnxk_eth_rxq_sp *rxq_sp;
285 RTE_SET_USED(event_dev);
286 if (rx_queue_id < 0) {
287 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
288 cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
290 rxq_sp = cnxk_eth_rxq_to_sp(
291 eth_dev->data->rx_queues[rx_queue_id]);
292 rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
293 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
294 rxq_sp->qconf.mp->pool_id, false,
296 cnxk_eth_dev->nb_rxq_sso--;
298 /* Enable drop_re if it was disabled earlier */
299 if (cnxk_eth_dev->vec_drop_re_dis && !cnxk_eth_dev->nb_rxq_sso)
300 rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix, true);
304 plt_err("Failed to clear Rx adapter config port=%d, q=%d",
305 eth_dev->data->port_id, rx_queue_id);
307 /* Removing RQ from Rx adapter implies need to use
308 * inline device for CQ/Poll mode.
310 cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
316 cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
317 const struct rte_eth_dev *eth_dev)
319 RTE_SET_USED(event_dev);
320 RTE_SET_USED(eth_dev);
326 cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
327 const struct rte_eth_dev *eth_dev)
329 RTE_SET_USED(event_dev);
330 RTE_SET_USED(eth_dev);
336 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
338 return roc_npa_aura_limit_modify(
339 sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
343 cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
344 uint16_t eth_port_id, uint16_t tx_queue_id,
347 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
348 uint16_t max_port_id = dev->max_port_id;
349 uint64_t *txq_data = dev->tx_adptr_data;
351 if (txq_data == NULL || eth_port_id > max_port_id) {
352 max_port_id = RTE_MAX(max_port_id, eth_port_id);
353 txq_data = rte_realloc_socket(
355 (sizeof(uint64_t) * (max_port_id + 1) *
356 RTE_MAX_QUEUES_PER_PORT),
357 RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
358 if (txq_data == NULL)
362 ((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
363 txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
364 dev->max_port_id = max_port_id;
365 dev->tx_adptr_data = txq_data;
370 cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
371 const struct rte_eth_dev *eth_dev,
374 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
375 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
376 struct roc_nix_sq *sq;
380 if (tx_queue_id < 0) {
381 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
382 cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
384 txq = eth_dev->data->tx_queues[tx_queue_id];
385 sq = &cnxk_eth_dev->sqs[tx_queue_id];
386 cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
387 ret = cnxk_sso_updt_tx_queue_data(
388 event_dev, eth_dev->data->port_id, tx_queue_id, txq);
392 dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
399 cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
400 const struct rte_eth_dev *eth_dev,
403 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
404 struct roc_nix_sq *sq;
407 RTE_SET_USED(event_dev);
408 if (tx_queue_id < 0) {
409 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
410 cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
412 sq = &cnxk_eth_dev->sqs[tx_queue_id];
413 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
414 ret = cnxk_sso_updt_tx_queue_data(
415 event_dev, eth_dev->data->port_id, tx_queue_id, NULL);