event/cnxk: support vectorized Rx adapter
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev_adptr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_ethdev.h"
6 #include "cnxk_eventdev.h"
7
8 void
9 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
10                       uint32_t event_type)
11 {
12         int i;
13
14         switch (event_type) {
15         case RTE_EVENT_TYPE_ETHDEV: {
16                 struct cnxk_eth_rxq_sp *rxq = data;
17                 uint64_t *old_ptr;
18
19                 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
20                         if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
21                                 return;
22                 }
23
24                 dev->rx_adptr_pool_cnt++;
25                 old_ptr = dev->rx_adptr_pools;
26                 dev->rx_adptr_pools = rte_realloc(
27                         dev->rx_adptr_pools,
28                         sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
29                 if (dev->rx_adptr_pools == NULL) {
30                         dev->adptr_xae_cnt += rxq->qconf.mp->size;
31                         dev->rx_adptr_pools = old_ptr;
32                         dev->rx_adptr_pool_cnt--;
33                         return;
34                 }
35                 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
36                         (uint64_t)rxq->qconf.mp;
37
38                 dev->adptr_xae_cnt += rxq->qconf.mp->size;
39                 break;
40         }
41         case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
42                 struct rte_mempool *mp = data;
43                 uint64_t *old_ptr;
44
45                 for (i = 0; i < dev->vec_pool_cnt; i++) {
46                         if ((uint64_t)mp == dev->vec_pools[i])
47                                 return;
48                 }
49
50                 dev->vec_pool_cnt++;
51                 old_ptr = dev->vec_pools;
52                 dev->vec_pools =
53                         rte_realloc(dev->vec_pools,
54                                     sizeof(uint64_t) * dev->vec_pool_cnt, 0);
55                 if (dev->vec_pools == NULL) {
56                         dev->adptr_xae_cnt += mp->size;
57                         dev->vec_pools = old_ptr;
58                         dev->vec_pool_cnt--;
59                         return;
60                 }
61                 dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
62
63                 dev->adptr_xae_cnt += mp->size;
64                 break;
65         }
66         case RTE_EVENT_TYPE_TIMER: {
67                 struct cnxk_tim_ring *timr = data;
68                 uint16_t *old_ring_ptr;
69                 uint64_t *old_sz_ptr;
70
71                 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
72                         if (timr->ring_id != dev->timer_adptr_rings[i])
73                                 continue;
74                         if (timr->nb_timers == dev->timer_adptr_sz[i])
75                                 return;
76                         dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
77                         dev->adptr_xae_cnt += timr->nb_timers;
78                         dev->timer_adptr_sz[i] = timr->nb_timers;
79
80                         return;
81                 }
82
83                 dev->tim_adptr_ring_cnt++;
84                 old_ring_ptr = dev->timer_adptr_rings;
85                 old_sz_ptr = dev->timer_adptr_sz;
86
87                 dev->timer_adptr_rings = rte_realloc(
88                         dev->timer_adptr_rings,
89                         sizeof(uint16_t) * dev->tim_adptr_ring_cnt, 0);
90                 if (dev->timer_adptr_rings == NULL) {
91                         dev->adptr_xae_cnt += timr->nb_timers;
92                         dev->timer_adptr_rings = old_ring_ptr;
93                         dev->tim_adptr_ring_cnt--;
94                         return;
95                 }
96
97                 dev->timer_adptr_sz = rte_realloc(
98                         dev->timer_adptr_sz,
99                         sizeof(uint64_t) * dev->tim_adptr_ring_cnt, 0);
100
101                 if (dev->timer_adptr_sz == NULL) {
102                         dev->adptr_xae_cnt += timr->nb_timers;
103                         dev->timer_adptr_sz = old_sz_ptr;
104                         dev->tim_adptr_ring_cnt--;
105                         return;
106                 }
107
108                 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
109                         timr->ring_id;
110                 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
111                         timr->nb_timers;
112
113                 dev->adptr_xae_cnt += timr->nb_timers;
114                 break;
115         }
116         default:
117                 break;
118         }
119 }
120
121 static int
122 cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
123                     uint16_t port_id, const struct rte_event *ev,
124                     uint8_t custom_flowid)
125 {
126         struct roc_nix_rq *rq;
127
128         rq = &cnxk_eth_dev->rqs[rq_id];
129         rq->sso_ena = 1;
130         rq->tt = ev->sched_type;
131         rq->hwgrp = ev->queue_id;
132         rq->flow_tag_width = 20;
133         rq->wqe_skip = 1;
134         rq->tag_mask = (port_id & 0xF) << 20;
135         rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
136                         << 24;
137
138         if (custom_flowid) {
139                 rq->flow_tag_width = 0;
140                 rq->tag_mask |= ev->flow_id;
141         }
142
143         return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
144 }
145
146 static int
147 cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
148 {
149         struct roc_nix_rq *rq;
150
151         rq = &cnxk_eth_dev->rqs[rq_id];
152         rq->sso_ena = 0;
153         rq->flow_tag_width = 32;
154         rq->tag_mask = 0;
155
156         return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
157 }
158
159 int
160 cnxk_sso_rx_adapter_queue_add(
161         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
162         int32_t rx_queue_id,
163         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
164 {
165         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
166         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
167         uint16_t port = eth_dev->data->port_id;
168         struct cnxk_eth_rxq_sp *rxq_sp;
169         int i, rc = 0;
170
171         if (rx_queue_id < 0) {
172                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
173                         rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
174                                                             i, queue_conf);
175         } else {
176                 rxq_sp = cnxk_eth_rxq_to_sp(
177                         eth_dev->data->rx_queues[rx_queue_id]);
178                 cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
179                 rc = cnxk_sso_xae_reconfigure(
180                         (struct rte_eventdev *)(uintptr_t)event_dev);
181                 rc |= cnxk_sso_rxq_enable(
182                         cnxk_eth_dev, (uint16_t)rx_queue_id, port,
183                         &queue_conf->ev,
184                         !!(queue_conf->rx_queue_flags &
185                            RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
186                 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
187                                       rxq_sp->qconf.mp->pool_id, true,
188                                       dev->force_ena_bp);
189         }
190
191         if (rc < 0) {
192                 plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
193                         queue_conf->ev.queue_id);
194                 return rc;
195         }
196
197         dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
198
199         return 0;
200 }
201
202 int
203 cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
204                               const struct rte_eth_dev *eth_dev,
205                               int32_t rx_queue_id)
206 {
207         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
208         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
209         struct cnxk_eth_rxq_sp *rxq_sp;
210         int i, rc = 0;
211
212         RTE_SET_USED(event_dev);
213         if (rx_queue_id < 0) {
214                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
215                         cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
216         } else {
217                 rxq_sp = cnxk_eth_rxq_to_sp(
218                         eth_dev->data->rx_queues[rx_queue_id]);
219                 rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
220                 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
221                                       rxq_sp->qconf.mp->pool_id, false,
222                                       dev->force_ena_bp);
223         }
224
225         if (rc < 0)
226                 plt_err("Failed to clear Rx adapter config port=%d, q=%d",
227                         eth_dev->data->port_id, rx_queue_id);
228
229         return rc;
230 }
231
232 int
233 cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
234                           const struct rte_eth_dev *eth_dev)
235 {
236         RTE_SET_USED(event_dev);
237         RTE_SET_USED(eth_dev);
238
239         return 0;
240 }
241
242 int
243 cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
244                          const struct rte_eth_dev *eth_dev)
245 {
246         RTE_SET_USED(event_dev);
247         RTE_SET_USED(eth_dev);
248
249         return 0;
250 }
251
252 static int
253 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
254 {
255         return roc_npa_aura_limit_modify(
256                 sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
257 }
258
259 static int
260 cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
261                             uint16_t eth_port_id, uint16_t tx_queue_id,
262                             void *txq)
263 {
264         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
265         uint16_t max_port_id = dev->max_port_id;
266         uint64_t *txq_data = dev->tx_adptr_data;
267
268         if (txq_data == NULL || eth_port_id > max_port_id) {
269                 max_port_id = RTE_MAX(max_port_id, eth_port_id);
270                 txq_data = rte_realloc_socket(
271                         txq_data,
272                         (sizeof(uint64_t) * (max_port_id + 1) *
273                          RTE_MAX_QUEUES_PER_PORT),
274                         RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
275                 if (txq_data == NULL)
276                         return -ENOMEM;
277         }
278
279         ((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
280                  txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
281         dev->max_port_id = max_port_id;
282         dev->tx_adptr_data = txq_data;
283         return 0;
284 }
285
286 int
287 cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
288                               const struct rte_eth_dev *eth_dev,
289                               int32_t tx_queue_id)
290 {
291         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
292         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
293         struct roc_nix_sq *sq;
294         int i, ret;
295         void *txq;
296
297         if (tx_queue_id < 0) {
298                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
299                         cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
300         } else {
301                 txq = eth_dev->data->tx_queues[tx_queue_id];
302                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
303                 cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
304                 ret = cnxk_sso_updt_tx_queue_data(
305                         event_dev, eth_dev->data->port_id, tx_queue_id, txq);
306                 if (ret < 0)
307                         return ret;
308
309                 dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
310         }
311
312         return 0;
313 }
314
315 int
316 cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
317                               const struct rte_eth_dev *eth_dev,
318                               int32_t tx_queue_id)
319 {
320         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
321         struct roc_nix_sq *sq;
322         int i, ret;
323
324         RTE_SET_USED(event_dev);
325         if (tx_queue_id < 0) {
326                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
327                         cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
328         } else {
329                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
330                 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
331                 ret = cnxk_sso_updt_tx_queue_data(
332                         event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
333                 if (ret < 0)
334                         return ret;
335         }
336
337         return 0;
338 }