dev->adptr_xae_cnt += rxq->qconf.mp->size;
break;
}
+ case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
+ struct rte_mempool *mp = data;
+ uint64_t *old_ptr;
+
+ for (i = 0; i < dev->vec_pool_cnt; i++) {
+ if ((uint64_t)mp == dev->vec_pools[i])
+ return;
+ }
+
+ dev->vec_pool_cnt++;
+ old_ptr = dev->vec_pools;
+ dev->vec_pools =
+ rte_realloc(dev->vec_pools,
+ sizeof(uint64_t) * dev->vec_pool_cnt, 0);
+ if (dev->vec_pools == NULL) {
+ dev->adptr_xae_cnt += mp->size;
+ dev->vec_pools = old_ptr;
+ dev->vec_pool_cnt--;
+ return;
+ }
+ dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
+
+ dev->adptr_xae_cnt += mp->size;
+ break;
+ }
case RTE_EVENT_TYPE_TIMER: {
struct cnxk_tim_ring *timr = data;
uint16_t *old_ring_ptr;
uint16_t port_id, const struct rte_event *ev,
uint8_t custom_flowid)
{
+ struct roc_nix *nix = &cnxk_eth_dev->nix;
struct roc_nix_rq *rq;
+ int rc;
rq = &cnxk_eth_dev->rqs[rq_id];
rq->sso_ena = 1;
rq->tag_mask |= ev->flow_id;
}
- return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+ rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+ if (rc)
+ return rc;
+
+ if (rq_id == 0 && roc_nix_inl_inb_is_enabled(nix)) {
+ uint32_t sec_tag_const;
+
+ /* IPSec tag const is 8-bit left shifted value of tag_mask
+ * as it applies to bit 32:8 of tag only.
+ */
+ sec_tag_const = rq->tag_mask >> 8;
+ rc = roc_nix_inl_inb_tag_update(nix, sec_tag_const,
+ ev->sched_type);
+ if (rc)
+ plt_err("Failed to set tag conf for ipsec, rc=%d", rc);
+ }
+
+ return rc;
}
static int
rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, true,
dev->force_ena_bp);
+ cnxk_eth_dev->nb_rxq_sso++;
}
if (rc < 0) {
dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
+ /* Switch to use PF/VF's NIX LF instead of inline device for inbound
+ * when all the RQ's are switched to event dev mode. We do this only
+ * when using inline device is not forced by dev args.
+ */
+ if (!cnxk_eth_dev->inb.force_inl_dev &&
+ cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
+ cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
+
return 0;
}
rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, false,
dev->force_ena_bp);
+ cnxk_eth_dev->nb_rxq_sso--;
}
if (rc < 0)
plt_err("Failed to clear Rx adapter config port=%d, q=%d",
eth_dev->data->port_id, rx_queue_id);
+ /* Removing RQ from Rx adapter implies need to use
+ * inline device for CQ/Poll mode.
+ */
+ cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
+
return rc;
}
return 0;
}
+
+static int
+cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
+{
+ return roc_npa_aura_limit_modify(
+ sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+}
+
+static int
+cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
+ uint16_t eth_port_id, uint16_t tx_queue_id,
+ void *txq)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ uint16_t max_port_id = dev->max_port_id;
+ uint64_t *txq_data = dev->tx_adptr_data;
+
+ if (txq_data == NULL || eth_port_id > max_port_id) {
+ max_port_id = RTE_MAX(max_port_id, eth_port_id);
+ txq_data = rte_realloc_socket(
+ txq_data,
+ (sizeof(uint64_t) * (max_port_id + 1) *
+ RTE_MAX_QUEUES_PER_PORT),
+ RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
+ if (txq_data == NULL)
+ return -ENOMEM;
+ }
+
+ ((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
+ txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
+ dev->max_port_id = max_port_id;
+ dev->tx_adptr_data = txq_data;
+ return 0;
+}
+
+int
+cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct roc_nix_sq *sq;
+ int i, ret;
+ void *txq;
+
+ if (tx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
+ } else {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ sq = &cnxk_eth_dev->sqs[tx_queue_id];
+ cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
+ ret = cnxk_sso_updt_tx_queue_data(
+ event_dev, eth_dev->data->port_id, tx_queue_id, txq);
+ if (ret < 0)
+ return ret;
+
+ dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
+ }
+
+ return 0;
+}
+
+int
+cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct roc_nix_sq *sq;
+ int i, ret;
+
+ RTE_SET_USED(event_dev);
+ if (tx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
+ } else {
+ sq = &cnxk_eth_dev->sqs[tx_queue_id];
+ cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+ ret = cnxk_sso_updt_tx_queue_data(
+ event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}