static void
otx2_sso_port_release(void *port)
{
- rte_free(port);
+ struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
+ struct otx2_sso_evdev *dev;
+ int i;
+
+ if (!gws_cookie->configured)
+ goto free;
+
+ dev = sso_pmd_priv(gws_cookie->event_dev);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], i, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], i, false);
+ }
+ memset(ws, 0, sizeof(*ws));
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ sso_port_link_modify(ws, i, false);
+ memset(ws, 0, sizeof(*ws));
+ }
+
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+
+free:
+ rte_free(gws_cookie);
}
static void
RTE_SET_USED(queue_id);
}
-static void
-sso_clr_links(const struct rte_eventdev *event_dev)
-{
- struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- int i, j;
-
- for (i = 0; i < dev->nb_event_ports; i++) {
- if (dev->dual_ws) {
- struct otx2_ssogws_dual *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++) {
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[0], j, false);
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[1], j, false);
- }
- } else {
- struct otx2_ssogws *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++)
- sso_port_link_modify(ws, j, false);
- }
- }
-}
-
static void
sso_restore_links(const struct rte_eventdev *event_dev)
{
}
for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws_dual *ws;
uintptr_t base;
} else {
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws_dual),
+ sizeof(struct otx2_ssogws_dual) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
- }
- if (ws == NULL) {
- otx2_err("Failed to alloc memory for port=%d", i);
- rc = -ENOMEM;
- break;
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d",
+ i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
}
ws->port = i;
sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
vws++;
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
}
for (i = 0; i < nb_lf; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws *ws;
uintptr_t base;
/* Free memory prior to re-allocation if needed */
if (event_dev->data->ports[i] != NULL) {
ws = event_dev->data->ports[i];
- rte_free(ws);
+ rte_free(ssogws_get_cookie(ws));
ws = NULL;
}
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL) {
break;
}
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
ws->port = i;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
sso_set_port_ops(ws, base);
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
return -EINVAL;
}
- if (dev->configured) {
+ if (dev->configured)
sso_unregister_irqs(event_dev);
- /* Clear any prior port-queue mapping. */
- sso_clr_links(event_dev);
- }
if (dev->nb_event_queues) {
/* Finit any previous queues. */
struct otx2_ssogws_dual *dws;
old_dws = event_dev->data->ports[i];
- dws = rte_realloc_socket(old_dws,
+ dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
sizeof(struct otx2_ssogws_dual)
- + (sizeof(uint64_t) *
+ + RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
(dev->max_port_id + 1) *
RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
if (dws == NULL)
return -ENOMEM;
+ /* First cache line is reserved for cookie */
+ dws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
+
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;
struct otx2_ssogws *ws;
old_ws = event_dev->data->ports[i];
- ws = rte_realloc_socket(old_ws,
- sizeof(struct otx2_ssogws_dual)
- + (sizeof(uint64_t) *
- (dev->max_port_id + 1) *
- RTE_MAX_QUEUES_PER_PORT),
+ ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
+ (dev->max_port_id + 1) *
+ RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL)
return -ENOMEM;
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;