1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_evdev.h"
8 otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
9 const struct rte_eth_dev *eth_dev, uint32_t *caps)
13 RTE_SET_USED(event_dev);
14 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
16 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
18 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
24 sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
27 struct otx2_mbox *mbox = dev->mbox;
28 struct nix_aq_enq_req *aq;
31 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
33 aq->ctype = NIX_AQ_CTYPE_CQ;
34 aq->op = NIX_AQ_INSTOP_WRITE;
39 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
40 aq->cq_mask.ena = ~(aq->cq_mask.ena);
41 aq->cq_mask.caching = ~(aq->cq_mask.caching);
43 rc = otx2_mbox_process(mbox);
45 otx2_err("Failed to disable cq context");
49 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
51 aq->ctype = NIX_AQ_CTYPE_RQ;
52 aq->op = NIX_AQ_INSTOP_WRITE;
56 aq->rq.sso_grp = ggrp;
58 /* Mbuf Header generation :
59 * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
60 * it already has data related to mbuf size, headroom, private area.
61 * > Using WQE_SKIP we can directly assign
62 * mbuf = wqe - sizeof(struct mbuf);
63 * so that mbuf header will not have unpredicted values while headroom
64 * and private data starts at the beginning of wqe_data.
67 aq->rq.wqe_caching = 1;
69 aq->rq.flow_tagw = 20; /* 20-bits */
71 /* Flow Tag calculation :
73 * rq_tag <31:24> = good/bad_tag<8:0>;
74 * rq_tag <23:0> = [ltag]
76 * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
77 * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
80 * ltag<23:0> = (eth_port_id & 0xF) << 20;
82 * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
84 * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
85 * <27:20> (eth_port_id) | <20:0> [TAG]
88 aq->rq.ltag = (eth_port_id & 0xF) << 20;
89 aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
90 (RTE_EVENT_TYPE_ETHDEV << 4);
91 aq->rq.bad_utag = aq->rq.good_utag;
93 aq->rq.ena = 0; /* Don't enable RQ yet */
94 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
95 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
97 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
98 /* mask the bits to write. */
99 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
100 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
101 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
102 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
103 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
104 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
105 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
106 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
107 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
108 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
109 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
110 aq->rq_mask.ena = ~(aq->rq_mask.ena);
111 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
112 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
114 rc = otx2_mbox_process(mbox);
116 otx2_err("Failed to init rx adapter context");
126 sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
128 struct otx2_mbox *mbox = dev->mbox;
129 struct nix_aq_enq_req *aq;
132 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
134 aq->ctype = NIX_AQ_CTYPE_CQ;
135 aq->op = NIX_AQ_INSTOP_INIT;
140 otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
141 aq->cq_mask.ena = ~(aq->cq_mask.ena);
142 aq->cq_mask.caching = ~(aq->cq_mask.caching);
144 rc = otx2_mbox_process(mbox);
146 otx2_err("Failed to init cq context");
150 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
152 aq->ctype = NIX_AQ_CTYPE_RQ;
153 aq->op = NIX_AQ_INSTOP_WRITE;
156 aq->rq.sso_tt = SSO_TT_UNTAGGED;
159 aq->rq.wqe_caching = 0;
162 aq->rq.flow_tagw = 0x20;
164 aq->rq.good_utag = 0;
167 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
168 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
170 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
171 /* mask the bits to write. */
172 aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
173 aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
174 aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
175 aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
176 aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
177 aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
178 aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
179 aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
180 aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
181 aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
182 aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
183 aq->rq_mask.ena = ~(aq->rq_mask.ena);
184 aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
185 aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
187 rc = otx2_mbox_process(mbox);
189 otx2_err("Failed to clear rx adapter context");
199 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
201 switch (event_type) {
202 case RTE_EVENT_TYPE_ETHDEV:
204 struct otx2_eth_rxq *rxq = data;
205 int i, match = false;
207 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
208 if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
213 dev->rx_adptr_pool_cnt++;
214 dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
216 dev->rx_adptr_pool_cnt
218 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
221 dev->adptr_xae_cnt += rxq->pool->size;
225 case RTE_EVENT_TYPE_TIMER:
227 dev->adptr_xae_cnt += (*(uint64_t *)data);
236 sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
238 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
241 for (i = 0; i < dev->nb_event_ports; i++) {
243 struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
245 ws->lookup_mem = lookup_mem;
247 struct otx2_ssogws *ws = event_dev->data->ports[i];
249 ws->lookup_mem = lookup_mem;
255 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
256 const struct rte_eth_dev *eth_dev,
258 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
260 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
261 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
262 uint16_t port = eth_dev->data->port_id;
263 struct otx2_eth_rxq *rxq;
266 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
270 if (rx_queue_id < 0) {
271 for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
272 rxq = eth_dev->data->rx_queues[i];
273 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
274 rc = sso_xae_reconfigure((struct rte_eventdev *)
275 (uintptr_t)event_dev);
276 rc |= sso_rxq_enable(otx2_eth_dev, i,
277 queue_conf->ev.sched_type,
278 queue_conf->ev.queue_id, port);
280 rxq = eth_dev->data->rx_queues[0];
281 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
283 rxq = eth_dev->data->rx_queues[rx_queue_id];
284 sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
285 rc = sso_xae_reconfigure((struct rte_eventdev *)
286 (uintptr_t)event_dev);
287 rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
288 queue_conf->ev.sched_type,
289 queue_conf->ev.queue_id, port);
290 sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
294 otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
295 queue_conf->ev.queue_id);
299 dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
300 dev->tstamp = &otx2_eth_dev->tstamp;
301 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
307 otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
308 const struct rte_eth_dev *eth_dev,
311 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
314 RTE_SET_USED(event_dev);
315 rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
319 if (rx_queue_id < 0) {
320 for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
321 rc = sso_rxq_disable(dev, i);
323 rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
327 otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
328 eth_dev->data->port_id, rx_queue_id);
334 otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
335 const struct rte_eth_dev *eth_dev)
337 RTE_SET_USED(event_dev);
338 RTE_SET_USED(eth_dev);
344 otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
345 const struct rte_eth_dev *eth_dev)
347 RTE_SET_USED(event_dev);
348 RTE_SET_USED(eth_dev);
354 otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
355 const struct rte_eth_dev *eth_dev, uint32_t *caps)
360 ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
364 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
370 sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
372 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
373 struct npa_aq_enq_req *aura_req;
375 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
376 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
377 aura_req->ctype = NPA_AQ_CTYPE_AURA;
378 aura_req->op = NPA_AQ_INSTOP_WRITE;
380 aura_req->aura.limit = nb_sqb_bufs;
381 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
383 return otx2_mbox_process(npa_lf->mbox);
387 otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
388 const struct rte_eth_dev *eth_dev,
391 struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
392 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
393 struct otx2_eth_txq *txq;
397 if (tx_queue_id < 0) {
398 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
399 txq = eth_dev->data->tx_queues[i];
400 sso_sqb_aura_limit_edit(txq->sqb_pool,
404 txq = eth_dev->data->tx_queues[tx_queue_id];
405 sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
408 dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
409 sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
415 otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
416 const struct rte_eth_dev *eth_dev,
419 struct otx2_eth_txq *txq;
423 RTE_SET_USED(eth_dev);
424 RTE_SET_USED(event_dev);
425 if (tx_queue_id < 0) {
426 for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
427 txq = eth_dev->data->tx_queues[i];
428 sso_sqb_aura_limit_edit(txq->sqb_pool,
432 txq = eth_dev->data->tx_queues[tx_queue_id];
433 sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);