#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_eal.h>
-#include <rte_eventdev_pmd_pci.h>
+#include <eventdev_pmd_pci.h>
#include <rte_kvargs.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
-#include "otx2_evdev_stats.h"
#include "otx2_evdev.h"
+#include "otx2_evdev_crypto_adptr_tx.h"
+#include "otx2_evdev_stats.h"
#include "otx2_irq.h"
#include "otx2_tim_evdev.h"
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
+ event_dev->ca_enqueue = otx2_ssogws_ca_enq;
if (dev->dual_ws) {
event_dev->enqueue = otx2_ssogws_dual_enq;
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
+ event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
}
event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static void
static void
otx2_sso_port_release(void *port)
{
- rte_free(port);
+ struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
+ struct otx2_sso_evdev *dev;
+ int i;
+
+ if (!gws_cookie->configured)
+ goto free;
+
+ dev = sso_pmd_priv(gws_cookie->event_dev);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], i, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], i, false);
+ }
+ memset(ws, 0, sizeof(*ws));
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ sso_port_link_modify(ws, i, false);
+ memset(ws, 0, sizeof(*ws));
+ }
+
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+
+free:
+ rte_free(gws_cookie);
}
static void
RTE_SET_USED(queue_id);
}
-static void
-sso_clr_links(const struct rte_eventdev *event_dev)
-{
- struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- int i, j;
-
- for (i = 0; i < dev->nb_event_ports; i++) {
- if (dev->dual_ws) {
- struct otx2_ssogws_dual *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++) {
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[0], j, false);
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[1], j, false);
- }
- } else {
- struct otx2_ssogws *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++)
- sso_port_link_modify(ws, j, false);
- }
- }
-}
-
static void
sso_restore_links(const struct rte_eventdev *event_dev)
{
ws->tag_op = base + SSOW_LF_GWS_TAG;
ws->wqp_op = base + SSOW_LF_GWS_WQP;
ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK;
- ws->swtp_op = base + SSOW_LF_GWS_SWTP;
+ ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
}
}
for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws_dual *ws;
uintptr_t base;
} else {
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws_dual),
+ sizeof(struct otx2_ssogws_dual) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
- }
- if (ws == NULL) {
- otx2_err("Failed to alloc memory for port=%d", i);
- rc = -ENOMEM;
- break;
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d",
+ i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
}
ws->port = i;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
+ ws->base[0] = base;
vws++;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
+ ws->base[1] = base;
vws++;
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
}
for (i = 0; i < nb_lf; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws *ws;
uintptr_t base;
- /* Free memory prior to re-allocation if needed */
if (event_dev->data->ports[i] != NULL) {
ws = event_dev->data->ports[i];
- rte_free(ws);
- ws = NULL;
- }
+ } else {
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("otx2_sso_ws",
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d",
+ i);
+ rc = -ENOMEM;
+ break;
+ }
- /* Allocate event port memory */
- ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws),
- RTE_CACHE_LINE_SIZE,
- event_dev->data->socket_id);
- if (ws == NULL) {
- otx2_err("Failed to alloc memory for port=%d", i);
- rc = -ENOMEM;
- break;
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
}
ws->port = i;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
sso_set_port_ops(ws, base);
+ ws->base = base;
+
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
event_dev->data->ports[i] = ws;
}
dev->fc_iova = mz->iova;
dev->fc_mem = mz->addr;
-
+ *dev->fc_mem = 0;
aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
memset(aura, 0, sizeof(struct npa_aura_s));
return otx2_mbox_process(mbox);
}
+static int
+sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct sso_release_xaq *req;
+
+ otx2_sso_dbg("Freeing XAQ for GGRPs");
+ req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
+ req->hwgrps = dev->nb_event_queues;
+
+ return otx2_mbox_process(mbox);
+}
+
static void
sso_lf_teardown(struct otx2_sso_evdev *dev,
enum otx2_sso_lf_type lf_type)
return -EINVAL;
}
- if (dev->configured) {
+ if (dev->configured)
sso_unregister_irqs(event_dev);
- /* Clear any prior port-queue mapping. */
- sso_clr_links(event_dev);
- }
if (dev->nb_event_queues) {
/* Finit any previous queues. */
ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
ws->swtag_req = 0;
ws->vws = 0;
- ws->ws_state[0].cur_grp = 0;
- ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
- ws->ws_state[1].cur_grp = 0;
- ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
} else {
struct otx2_ssogws *ws;
ws = event_dev->data->ports[i];
ssogws_reset(ws);
ws->swtag_req = 0;
- ws->cur_grp = 0;
- ws->cur_tt = SSO_SYNC_EMPTY;
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
}
}
otx2_write64(enable, ws->grps_base[i] +
SSO_LF_GGRP_QCTL);
}
- ws->ws_state[0].cur_grp = 0;
- ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
} else {
struct otx2_ssogws *ws = event_dev->data->ports[0];
otx2_write64(enable, ws->grps_base[i] +
SSO_LF_GGRP_QCTL);
}
- ws->cur_grp = 0;
- ws->cur_tt = SSO_SYNC_EMPTY;
}
/* reset SSO GWS cache */
sso_xae_reconfigure(struct rte_eventdev *event_dev)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- struct rte_mempool *prev_xaq_pool;
int rc = 0;
if (event_dev->data->dev_started)
sso_cleanup(event_dev, 0);
- prev_xaq_pool = dev->xaq_pool;
+ rc = sso_ggrp_free_xaq(dev);
+ if (rc < 0) {
+ otx2_err("Failed to free XAQ\n");
+ return rc;
+ }
+
+ rte_mempool_free(dev->xaq_pool);
dev->xaq_pool = NULL;
rc = sso_xaq_allocate(dev);
if (rc < 0) {
otx2_err("Failed to alloc xaq pool %d", rc);
- rte_mempool_free(prev_xaq_pool);
return rc;
}
rc = sso_ggrp_alloc_xaq(dev);
if (rc < 0) {
otx2_err("Failed to alloc xaq to ggrp %d", rc);
- rte_mempool_free(prev_xaq_pool);
return rc;
}
- rte_mempool_free(prev_xaq_pool);
rte_mb();
if (event_dev->data->dev_started)
sso_cleanup(event_dev, 1);
.timer_adapter_caps_get = otx2_tim_caps_get,
+ .crypto_adapter_caps_get = otx2_ca_caps_get,
+ .crypto_adapter_queue_pair_add = otx2_ca_qp_add,
+ .crypto_adapter_queue_pair_del = otx2_ca_qp_del,
+
.xstats_get = otx2_sso_xstats_get,
.xstats_reset = otx2_sso_xstats_reset,
.xstats_get_names = otx2_sso_xstats_get_names,
#define OTX2_SSO_XAE_CNT "xae_cnt"
#define OTX2_SSO_SINGLE_WS "single_ws"
#define OTX2_SSO_GGRP_QOS "qos"
-#define OTX2_SSO_SELFTEST "selftest"
static void
parse_queue_param(char *value, void *opaque)
if (kvlist == NULL)
return;
- rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
- &dev->selftest);
rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
&dev->xae_cnt);
rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
event_dev->data->name, dev->max_event_queues,
dev->max_event_ports);
- if (dev->selftest) {
- event_dev->dev->driver = &pci_sso.driver;
- event_dev->dev_ops->dev_selftest();
- }
otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
OTX2_SSO_SINGLE_WS "=1"
OTX2_SSO_GGRP_QOS "=<string>"
- OTX2_SSO_SELFTEST "=1"
OTX2_NPA_LOCK_MASK "=<1-65535>");