1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019-2021 Marvell.
5 #include "otx2_evdev.h"
7 #define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
10 otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
11 const struct rte_eth_dev *eth_dev, uint32_t *caps)
15 RTE_SET_USED(event_dev);
16 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
18 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
20 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
21 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
27 sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
30 struct otx2_mbox *mbox = dev->mbox;
31 struct nix_aq_enq_req *aq;
34 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
36 aq->ctype = NIX_AQ_CTYPE_CQ;
37 aq->op = NIX_AQ_INSTOP_WRITE;
42 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
43 aq->cq_mask.ena = ~(aq->cq_mask.ena);
44 aq->cq_mask.caching = ~(aq->cq_mask.caching);
46 rc = otx2_mbox_process(mbox);
48 otx2_err("Failed to disable cq context");
52 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
54 aq->ctype = NIX_AQ_CTYPE_RQ;
55 aq->op = NIX_AQ_INSTOP_WRITE;
59 aq->rq.sso_grp = ggrp;
61 /* Mbuf Header generation :
62 * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
63 * it already has data related to mbuf size, headroom, private area.
64 * > Using WQE_SKIP we can directly assign
65 * mbuf = wqe - sizeof(struct mbuf);
66 * so that mbuf header will not have unpredicted values while headroom
67 * and private data starts at the beginning of wqe_data.
70 aq->rq.wqe_caching = 1;
72 aq->rq.flow_tagw = 20; /* 20-bits */
74 /* Flow Tag calculation :
76 * rq_tag <31:24> = good/bad_tag<8:0>;
77 * rq_tag <23:0> = [ltag]
79 * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
80 * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
83 * ltag<23:0> = (eth_port_id & 0xF) << 20;
85 * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
87 * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
88 * <27:20> (eth_port_id) | <20:0> [TAG]
91 aq->rq.ltag = (eth_port_id & 0xF) << 20;
92 aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
93 (RTE_EVENT_TYPE_ETHDEV << 4);
94 aq->rq.bad_utag = aq->rq.good_utag;
96 aq->rq.ena = 0; /* Don't enable RQ yet */
97 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
98 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
100 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
101 /* mask the bits to write. */
102 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
103 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
104 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
105 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
106 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
107 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
108 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
109 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
110 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
111 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
112 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
113 aq->rq_mask.ena = ~(aq->rq_mask.ena);
114 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
115 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
117 rc = otx2_mbox_process(mbox);
119 otx2_err("Failed to init rx adapter context");
129 sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
131 struct otx2_mbox *mbox = dev->mbox;
132 struct nix_aq_enq_req *aq;
135 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
137 aq->ctype = NIX_AQ_CTYPE_CQ;
138 aq->op = NIX_AQ_INSTOP_WRITE;
143 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
144 aq->cq_mask.ena = ~(aq->cq_mask.ena);
145 aq->cq_mask.caching = ~(aq->cq_mask.caching);
147 rc = otx2_mbox_process(mbox);
149 otx2_err("Failed to enable cq context");
153 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
155 aq->ctype = NIX_AQ_CTYPE_RQ;
156 aq->op = NIX_AQ_INSTOP_WRITE;
159 aq->rq.sso_tt = SSO_TT_UNTAGGED;
162 aq->rq.wqe_caching = 0;
165 aq->rq.flow_tagw = 0x20;
167 aq->rq.good_utag = 0;
170 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
171 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
173 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
174 /* mask the bits to write. */
175 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
176 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
177 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
178 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
179 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
180 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
181 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
182 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
183 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
184 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
185 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
186 aq->rq_mask.ena = ~(aq->rq_mask.ena);
187 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
188 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
190 rc = otx2_mbox_process(mbox);
192 otx2_err("Failed to clear rx adapter context");
202 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
206 switch (event_type) {
207 case RTE_EVENT_TYPE_ETHDEV:
209 struct otx2_eth_rxq *rxq = data;
212 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
213 if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
217 dev->rx_adptr_pool_cnt++;
218 old_ptr = dev->rx_adptr_pools;
219 dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
221 dev->rx_adptr_pool_cnt, 0);
222 if (dev->rx_adptr_pools == NULL) {
223 dev->adptr_xae_cnt += rxq->pool->size;
224 dev->rx_adptr_pools = old_ptr;
225 dev->rx_adptr_pool_cnt--;
228 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
231 dev->adptr_xae_cnt += rxq->pool->size;
234 case RTE_EVENT_TYPE_TIMER:
236 struct otx2_tim_ring *timr = data;
237 uint16_t *old_ring_ptr;
238 uint64_t *old_sz_ptr;
240 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
241 if (timr->ring_id != dev->timer_adptr_rings[i])
243 if (timr->nb_timers == dev->timer_adptr_sz[i])
245 dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
246 dev->adptr_xae_cnt += timr->nb_timers;
247 dev->timer_adptr_sz[i] = timr->nb_timers;
252 dev->tim_adptr_ring_cnt++;
253 old_ring_ptr = dev->timer_adptr_rings;
254 old_sz_ptr = dev->timer_adptr_sz;
256 dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
258 dev->tim_adptr_ring_cnt,
260 if (dev->timer_adptr_rings == NULL) {
261 dev->adptr_xae_cnt += timr->nb_timers;
262 dev->timer_adptr_rings = old_ring_ptr;
263 dev->tim_adptr_ring_cnt--;
267 dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
269 dev->tim_adptr_ring_cnt,
272 if (dev->timer_adptr_sz == NULL) {
273 dev->adptr_xae_cnt += timr->nb_timers;
274 dev->timer_adptr_sz = old_sz_ptr;
275 dev->tim_adptr_ring_cnt--;
279 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
281 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
284 dev->adptr_xae_cnt += timr->nb_timers;
293 sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
295 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
298 for (i = 0; i < dev->nb_event_ports; i++) {
300 struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
302 ws->lookup_mem = lookup_mem;
304 struct otx2_ssogws *ws = event_dev->data->ports[i];
306 ws->lookup_mem = lookup_mem;
312 sso_cfg_nix_mp_bpid(struct otx2_sso_evdev *dev,
313 struct otx2_eth_dev *otx2_eth_dev, struct otx2_eth_rxq *rxq,
316 struct otx2_fc_info *fc = &otx2_eth_dev->fc_info;
317 struct npa_aq_enq_req *req;
318 struct npa_aq_enq_rsp *rsp;
319 struct otx2_npa_lf *lf;
320 struct otx2_mbox *mbox;
324 if (otx2_dev_is_sdp(otx2_eth_dev))
327 lf = otx2_npa_lf_obj_get();
332 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
336 req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
337 req->ctype = NPA_AQ_CTYPE_AURA;
338 req->op = NPA_AQ_INSTOP_READ;
340 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
344 limit = rsp->aura.limit;
345 /* BP is already enabled. */
346 if (rsp->aura.bp_ena) {
347 /* If BP ids don't match disable BP. */
348 if ((rsp->aura.nix0_bpid != fc->bpid[0]) && !dev->force_rx_bp) {
349 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
354 npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
355 req->ctype = NPA_AQ_CTYPE_AURA;
356 req->op = NPA_AQ_INSTOP_WRITE;
358 req->aura.bp_ena = 0;
359 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
361 otx2_mbox_process(mbox);
366 /* BP was previously enabled but now disabled skip. */
370 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
374 req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
375 req->ctype = NPA_AQ_CTYPE_AURA;
376 req->op = NPA_AQ_INSTOP_WRITE;
379 req->aura.nix0_bpid = fc->bpid[0];
380 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
381 req->aura.bp = NIX_RQ_AURA_THRESH(
382 limit > 128 ? 256 : limit); /* 95% of size*/
383 req->aura_mask.bp = ~(req->aura_mask.bp);
386 req->aura.bp_ena = !!ena;
387 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
389 otx2_mbox_process(mbox);
393 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
394 const struct rte_eth_dev *eth_dev,
396 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
398 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
399 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
400 uint16_t port = eth_dev->data->port_id;
401 struct otx2_eth_rxq *rxq;
404 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
408 if (rx_queue_id < 0) {
409 for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
410 rxq = eth_dev->data->rx_queues[i];
411 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
412 sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
413 rc = sso_xae_reconfigure(
414 (struct rte_eventdev *)(uintptr_t)event_dev);
415 rc |= sso_rxq_enable(otx2_eth_dev, i,
416 queue_conf->ev.sched_type,
417 queue_conf->ev.queue_id, port);
419 rxq = eth_dev->data->rx_queues[0];
420 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
422 rxq = eth_dev->data->rx_queues[rx_queue_id];
423 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
424 sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
425 rc = sso_xae_reconfigure((struct rte_eventdev *)
426 (uintptr_t)event_dev);
427 rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
428 queue_conf->ev.sched_type,
429 queue_conf->ev.queue_id, port);
430 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
434 otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
435 queue_conf->ev.queue_id);
439 dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
440 dev->tstamp = &otx2_eth_dev->tstamp;
441 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
447 otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
448 const struct rte_eth_dev *eth_dev,
451 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
452 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
455 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
459 if (rx_queue_id < 0) {
460 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
461 rc = sso_rxq_disable(otx2_eth_dev, i);
462 sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
463 eth_dev->data->rx_queues[i], false);
466 rc = sso_rxq_disable(otx2_eth_dev, (uint16_t)rx_queue_id);
467 sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
468 eth_dev->data->rx_queues[rx_queue_id],
473 otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
474 eth_dev->data->port_id, rx_queue_id);
480 otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
481 const struct rte_eth_dev *eth_dev)
483 RTE_SET_USED(event_dev);
484 RTE_SET_USED(eth_dev);
490 otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
491 const struct rte_eth_dev *eth_dev)
493 RTE_SET_USED(event_dev);
494 RTE_SET_USED(eth_dev);
500 otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
501 const struct rte_eth_dev *eth_dev, uint32_t *caps)
506 ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
510 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
516 sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
518 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
519 struct npa_aq_enq_req *aura_req;
521 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
522 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
523 aura_req->ctype = NPA_AQ_CTYPE_AURA;
524 aura_req->op = NPA_AQ_INSTOP_WRITE;
526 aura_req->aura.limit = nb_sqb_bufs;
527 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
529 return otx2_mbox_process(npa_lf->mbox);
533 sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
534 uint16_t eth_port_id, uint16_t tx_queue_id,
535 struct otx2_eth_txq *txq)
537 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
540 for (i = 0; i < event_dev->data->nb_ports; i++) {
541 dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
543 struct otx2_ssogws_dual *old_dws;
544 struct otx2_ssogws_dual *dws;
546 old_dws = event_dev->data->ports[i];
547 dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
548 sizeof(struct otx2_ssogws_dual)
549 + RTE_CACHE_LINE_SIZE +
551 (dev->max_port_id + 1) *
552 RTE_MAX_QUEUES_PER_PORT),
554 event_dev->data->socket_id);
558 /* First cache line is reserved for cookie */
559 dws = (struct otx2_ssogws_dual *)
560 ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
562 ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
563 )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
565 event_dev->data->ports[i] = dws;
567 struct otx2_ssogws *old_ws;
568 struct otx2_ssogws *ws;
570 old_ws = event_dev->data->ports[i];
571 ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
572 sizeof(struct otx2_ssogws) +
573 RTE_CACHE_LINE_SIZE +
575 (dev->max_port_id + 1) *
576 RTE_MAX_QUEUES_PER_PORT),
578 event_dev->data->socket_id);
582 /* First cache line is reserved for cookie */
583 ws = (struct otx2_ssogws *)
584 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
586 ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
587 )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
589 event_dev->data->ports[i] = ws;
597 otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
598 const struct rte_eth_dev *eth_dev,
601 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
602 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
603 struct otx2_eth_txq *txq;
607 if (tx_queue_id < 0) {
608 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
609 txq = eth_dev->data->tx_queues[i];
610 sso_sqb_aura_limit_edit(txq->sqb_pool,
612 ret = sso_add_tx_queue_data(event_dev,
613 eth_dev->data->port_id, i,
619 txq = eth_dev->data->tx_queues[tx_queue_id];
620 sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
621 ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
627 dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
628 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
634 otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
635 const struct rte_eth_dev *eth_dev,
638 struct otx2_eth_txq *txq;
642 RTE_SET_USED(eth_dev);
643 RTE_SET_USED(event_dev);
644 if (tx_queue_id < 0) {
645 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
646 txq = eth_dev->data->tx_queues[i];
647 sso_sqb_aura_limit_edit(txq->sqb_pool,
651 txq = eth_dev->data->tx_queues[tx_queue_id];
652 sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);