1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019-2021 Marvell.
5 #include "otx2_evdev.h"
8 otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
9 const struct rte_eth_dev *eth_dev, uint32_t *caps)
13 RTE_SET_USED(event_dev);
14 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
16 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
18 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
19 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
25 sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
28 struct otx2_mbox *mbox = dev->mbox;
29 struct nix_aq_enq_req *aq;
32 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
34 aq->ctype = NIX_AQ_CTYPE_CQ;
35 aq->op = NIX_AQ_INSTOP_WRITE;
40 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
41 aq->cq_mask.ena = ~(aq->cq_mask.ena);
42 aq->cq_mask.caching = ~(aq->cq_mask.caching);
44 rc = otx2_mbox_process(mbox);
46 otx2_err("Failed to disable cq context");
50 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
52 aq->ctype = NIX_AQ_CTYPE_RQ;
53 aq->op = NIX_AQ_INSTOP_WRITE;
57 aq->rq.sso_grp = ggrp;
59 /* Mbuf Header generation :
60 * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
61 * it already has data related to mbuf size, headroom, private area.
62 * > Using WQE_SKIP we can directly assign
63 * mbuf = wqe - sizeof(struct mbuf);
64 * so that mbuf header will not have unpredicted values while headroom
65 * and private data starts at the beginning of wqe_data.
68 aq->rq.wqe_caching = 1;
70 aq->rq.flow_tagw = 20; /* 20-bits */
72 /* Flow Tag calculation :
74 * rq_tag <31:24> = good/bad_tag<8:0>;
75 * rq_tag <23:0> = [ltag]
77 * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
78 * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
81 * ltag<23:0> = (eth_port_id & 0xF) << 20;
83 * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
85 * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
86 * <27:20> (eth_port_id) | <20:0> [TAG]
89 aq->rq.ltag = (eth_port_id & 0xF) << 20;
90 aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
91 (RTE_EVENT_TYPE_ETHDEV << 4);
92 aq->rq.bad_utag = aq->rq.good_utag;
94 aq->rq.ena = 0; /* Don't enable RQ yet */
95 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
96 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
98 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
99 /* mask the bits to write. */
100 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
101 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
102 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
103 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
104 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
105 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
106 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
107 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
108 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
109 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
110 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
111 aq->rq_mask.ena = ~(aq->rq_mask.ena);
112 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
113 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
115 rc = otx2_mbox_process(mbox);
117 otx2_err("Failed to init rx adapter context");
127 sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
129 struct otx2_mbox *mbox = dev->mbox;
130 struct nix_aq_enq_req *aq;
133 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
135 aq->ctype = NIX_AQ_CTYPE_CQ;
136 aq->op = NIX_AQ_INSTOP_WRITE;
141 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
142 aq->cq_mask.ena = ~(aq->cq_mask.ena);
143 aq->cq_mask.caching = ~(aq->cq_mask.caching);
145 rc = otx2_mbox_process(mbox);
147 otx2_err("Failed to enable cq context");
151 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
153 aq->ctype = NIX_AQ_CTYPE_RQ;
154 aq->op = NIX_AQ_INSTOP_WRITE;
157 aq->rq.sso_tt = SSO_TT_UNTAGGED;
160 aq->rq.wqe_caching = 0;
163 aq->rq.flow_tagw = 0x20;
165 aq->rq.good_utag = 0;
168 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
169 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
171 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
172 /* mask the bits to write. */
173 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
174 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
175 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
176 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
177 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
178 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
179 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
180 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
181 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
182 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
183 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
184 aq->rq_mask.ena = ~(aq->rq_mask.ena);
185 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
186 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
188 rc = otx2_mbox_process(mbox);
190 otx2_err("Failed to clear rx adapter context");
200 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
204 switch (event_type) {
205 case RTE_EVENT_TYPE_ETHDEV:
207 struct otx2_eth_rxq *rxq = data;
210 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
211 if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
215 dev->rx_adptr_pool_cnt++;
216 old_ptr = dev->rx_adptr_pools;
217 dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
219 dev->rx_adptr_pool_cnt, 0);
220 if (dev->rx_adptr_pools == NULL) {
221 dev->adptr_xae_cnt += rxq->pool->size;
222 dev->rx_adptr_pools = old_ptr;
223 dev->rx_adptr_pool_cnt--;
226 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
229 dev->adptr_xae_cnt += rxq->pool->size;
232 case RTE_EVENT_TYPE_TIMER:
234 struct otx2_tim_ring *timr = data;
235 uint16_t *old_ring_ptr;
236 uint64_t *old_sz_ptr;
238 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
239 if (timr->ring_id != dev->timer_adptr_rings[i])
241 if (timr->nb_timers == dev->timer_adptr_sz[i])
243 dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
244 dev->adptr_xae_cnt += timr->nb_timers;
245 dev->timer_adptr_sz[i] = timr->nb_timers;
250 dev->tim_adptr_ring_cnt++;
251 old_ring_ptr = dev->timer_adptr_rings;
252 old_sz_ptr = dev->timer_adptr_sz;
254 dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
256 dev->tim_adptr_ring_cnt,
258 if (dev->timer_adptr_rings == NULL) {
259 dev->adptr_xae_cnt += timr->nb_timers;
260 dev->timer_adptr_rings = old_ring_ptr;
261 dev->tim_adptr_ring_cnt--;
265 dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
267 dev->tim_adptr_ring_cnt,
270 if (dev->timer_adptr_sz == NULL) {
271 dev->adptr_xae_cnt += timr->nb_timers;
272 dev->timer_adptr_sz = old_sz_ptr;
273 dev->tim_adptr_ring_cnt--;
277 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
279 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
282 dev->adptr_xae_cnt += timr->nb_timers;
291 sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
293 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
296 for (i = 0; i < dev->nb_event_ports; i++) {
298 struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
300 ws->lookup_mem = lookup_mem;
302 struct otx2_ssogws *ws = event_dev->data->ports[i];
304 ws->lookup_mem = lookup_mem;
310 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
311 const struct rte_eth_dev *eth_dev,
313 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
315 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
316 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
317 uint16_t port = eth_dev->data->port_id;
318 struct otx2_eth_rxq *rxq;
321 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
325 if (rx_queue_id < 0) {
326 for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
327 rxq = eth_dev->data->rx_queues[i];
328 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
329 rc = sso_xae_reconfigure((struct rte_eventdev *)
330 (uintptr_t)event_dev);
331 rc |= sso_rxq_enable(otx2_eth_dev, i,
332 queue_conf->ev.sched_type,
333 queue_conf->ev.queue_id, port);
335 rxq = eth_dev->data->rx_queues[0];
336 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
338 rxq = eth_dev->data->rx_queues[rx_queue_id];
339 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
340 rc = sso_xae_reconfigure((struct rte_eventdev *)
341 (uintptr_t)event_dev);
342 rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
343 queue_conf->ev.sched_type,
344 queue_conf->ev.queue_id, port);
345 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
349 otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
350 queue_conf->ev.queue_id);
354 dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
355 dev->tstamp = &otx2_eth_dev->tstamp;
356 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
362 otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
363 const struct rte_eth_dev *eth_dev,
366 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
369 RTE_SET_USED(event_dev);
370 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
374 if (rx_queue_id < 0) {
375 for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
376 rc = sso_rxq_disable(dev, i);
378 rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
382 otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
383 eth_dev->data->port_id, rx_queue_id);
389 otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
390 const struct rte_eth_dev *eth_dev)
392 RTE_SET_USED(event_dev);
393 RTE_SET_USED(eth_dev);
399 otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
400 const struct rte_eth_dev *eth_dev)
402 RTE_SET_USED(event_dev);
403 RTE_SET_USED(eth_dev);
409 otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
410 const struct rte_eth_dev *eth_dev, uint32_t *caps)
415 ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
419 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
425 sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
427 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
428 struct npa_aq_enq_req *aura_req;
430 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
431 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
432 aura_req->ctype = NPA_AQ_CTYPE_AURA;
433 aura_req->op = NPA_AQ_INSTOP_WRITE;
435 aura_req->aura.limit = nb_sqb_bufs;
436 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
438 return otx2_mbox_process(npa_lf->mbox);
442 sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
443 uint16_t eth_port_id, uint16_t tx_queue_id,
444 struct otx2_eth_txq *txq)
446 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
449 for (i = 0; i < event_dev->data->nb_ports; i++) {
450 dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
452 struct otx2_ssogws_dual *old_dws;
453 struct otx2_ssogws_dual *dws;
455 old_dws = event_dev->data->ports[i];
456 dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
457 sizeof(struct otx2_ssogws_dual)
458 + RTE_CACHE_LINE_SIZE +
460 (dev->max_port_id + 1) *
461 RTE_MAX_QUEUES_PER_PORT),
463 event_dev->data->socket_id);
467 /* First cache line is reserved for cookie */
468 dws = (struct otx2_ssogws_dual *)
469 ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
471 ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
472 )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
474 event_dev->data->ports[i] = dws;
476 struct otx2_ssogws *old_ws;
477 struct otx2_ssogws *ws;
479 old_ws = event_dev->data->ports[i];
480 ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
481 sizeof(struct otx2_ssogws) +
482 RTE_CACHE_LINE_SIZE +
484 (dev->max_port_id + 1) *
485 RTE_MAX_QUEUES_PER_PORT),
487 event_dev->data->socket_id);
491 /* First cache line is reserved for cookie */
492 ws = (struct otx2_ssogws *)
493 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
495 ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
496 )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
498 event_dev->data->ports[i] = ws;
506 otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
507 const struct rte_eth_dev *eth_dev,
510 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
511 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
512 struct otx2_eth_txq *txq;
516 if (tx_queue_id < 0) {
517 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
518 txq = eth_dev->data->tx_queues[i];
519 sso_sqb_aura_limit_edit(txq->sqb_pool,
521 ret = sso_add_tx_queue_data(event_dev,
522 eth_dev->data->port_id, i,
528 txq = eth_dev->data->tx_queues[tx_queue_id];
529 sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
530 ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
536 dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
537 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
543 otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
544 const struct rte_eth_dev *eth_dev,
547 struct otx2_eth_txq *txq;
551 RTE_SET_USED(eth_dev);
552 RTE_SET_USED(event_dev);
553 if (tx_queue_id < 0) {
554 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
555 txq = eth_dev->data->tx_queues[i];
556 sso_sqb_aura_limit_edit(txq->sqb_pool,
560 txq = eth_dev->data->tx_queues[tx_queue_id];
561 sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);