1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
5 #include <rte_atomic.h>
6 #include <rte_bus_pci.h>
7 #include <rte_ethdev.h>
8 #include <rte_spinlock.h>
10 #include "otx2_common.h"
11 #include "otx2_sec_idev.h"
13 static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
17 * Check if rte_eth_dev is security offload capable otx2_eth_dev
20 otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev)
22 struct rte_pci_device *pci_dev;
24 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
26 if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF ||
27 pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF ||
28 pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF)
35 otx2_sec_idev_cfg_init(int port_id)
37 struct otx2_sec_idev_cfg *cfg;
40 cfg = &sec_cfg[port_id];
42 rte_spinlock_init(&cfg->tx_cpt_lock);
44 for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
45 cfg->tx_cpt[i].qp = NULL;
46 rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
53 otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
55 struct otx2_sec_idev_cfg *cfg;
58 if (qp == NULL || port_id > OTX2_MAX_INLINE_PORTS)
61 cfg = &sec_cfg[port_id];
63 /* Find a free slot to save CPT LF */
65 rte_spinlock_lock(&cfg->tx_cpt_lock);
67 for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
68 if (cfg->tx_cpt[i].qp == NULL) {
69 cfg->tx_cpt[i].qp = qp;
78 rte_spinlock_unlock(&cfg->tx_cpt_lock);
83 otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
85 struct otx2_sec_idev_cfg *cfg;
92 for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
93 cfg = &sec_cfg[port_id];
95 rte_spinlock_lock(&cfg->tx_cpt_lock);
97 for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
98 if (cfg->tx_cpt[i].qp != qp)
101 /* Don't free if the QP is in use by any sec session */
102 if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
105 cfg->tx_cpt[i].qp = NULL;
112 rte_spinlock_unlock(&cfg->tx_cpt_lock);
118 rte_spinlock_unlock(&cfg->tx_cpt_lock);
123 otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp)
125 struct otx2_sec_idev_cfg *cfg;
129 if (port_id > OTX2_MAX_INLINE_PORTS || qp == NULL)
132 cfg = &sec_cfg[port_id];
134 rte_spinlock_lock(&cfg->tx_cpt_lock);
136 index = cfg->tx_cpt_idx;
138 /* Get the next index with valid data */
139 for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
140 if (cfg->tx_cpt[index].qp != NULL)
142 index = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
145 if (i >= OTX2_MAX_CPT_QP_PER_PORT) {
150 *qp = cfg->tx_cpt[index].qp;
151 rte_atomic16_inc(&cfg->tx_cpt[index].ref_cnt);
153 cfg->tx_cpt_idx = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
158 rte_spinlock_unlock(&cfg->tx_cpt_lock);
163 otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp)
165 struct otx2_sec_idev_cfg *cfg;
172 for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
173 cfg = &sec_cfg[port_id];
174 for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
175 if (cfg->tx_cpt[i].qp == qp) {
176 rte_atomic16_dec(&cfg->tx_cpt[i].ref_cnt);