1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <cnxk_ethdev.h>
7 #define CNXK_NIX_INL_SELFTEST "selftest"
8 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
9 #define CNXK_INL_CPT_CHANNEL "inl_cpt_channel"
11 struct inl_cpt_channel {
12 bool is_multi_channel;
17 #define CNXK_NIX_INL_DEV_NAME RTE_STR(cnxk_nix_inl_dev_)
18 #define CNXK_NIX_INL_DEV_NAME_LEN \
19 (sizeof(CNXK_NIX_INL_DEV_NAME) + PCI_PRI_STR_SIZE)
22 bitmap_ctzll(uint64_t slab)
27 return __builtin_ctzll(slab);
31 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
37 if (!dev->outb.sa_bmap)
42 /* Scan from the beginning */
43 plt_bitmap_scan_init(dev->outb.sa_bmap);
44 /* Scan bitmap to get the free sa index */
45 rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
48 plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
49 "devargs to increase");
53 /* Get free SA index */
54 idx = pos + bitmap_ctzll(slab);
55 plt_bitmap_clear(dev->outb.sa_bmap, idx);
61 cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx)
63 if (idx >= dev->outb.max_sa)
66 /* Check if it is already free */
67 if (plt_bitmap_get(dev->outb.sa_bmap, idx))
70 /* Mark index as free */
71 plt_bitmap_set(dev->outb.sa_bmap, idx);
75 struct cnxk_eth_sec_sess *
76 cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, uint32_t spi, bool inb)
78 struct cnxk_eth_sec_sess_list *list;
79 struct cnxk_eth_sec_sess *eth_sec;
81 list = inb ? &dev->inb.list : &dev->outb.list;
82 TAILQ_FOREACH(eth_sec, list, entry) {
83 if (eth_sec->spi == spi)
90 struct cnxk_eth_sec_sess *
91 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
92 struct rte_security_session *sess)
94 struct cnxk_eth_sec_sess *eth_sec = NULL;
96 /* Search in inbound list */
97 TAILQ_FOREACH(eth_sec, &dev->inb.list, entry) {
98 if (eth_sec->sess == sess)
102 /* Search in outbound list */
103 TAILQ_FOREACH(eth_sec, &dev->outb.list, entry) {
104 if (eth_sec->sess == sess)
112 cnxk_eth_sec_session_get_size(void *device __rte_unused)
114 return sizeof(struct cnxk_eth_sec_sess);
117 struct rte_security_ops cnxk_eth_sec_ops = {
118 .session_get_size = cnxk_eth_sec_session_get_size
122 parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
129 *(uint16_t *)extra_args = val;
135 parse_selftest(const char *key, const char *value, void *extra_args)
142 *(uint8_t *)extra_args = !!(val == 1);
147 parse_inl_cpt_channel(const char *key, const char *value, void *extra_args)
150 uint16_t chan = 0, mask = 0;
153 /* next will point to the separator '/' */
154 chan = strtol(value, &next, 16);
155 mask = strtol(++next, 0, 16);
157 if (chan > GENMASK(12, 0) || mask > GENMASK(12, 0))
160 ((struct inl_cpt_channel *)extra_args)->channel = chan;
161 ((struct inl_cpt_channel *)extra_args)->mask = mask;
162 ((struct inl_cpt_channel *)extra_args)->is_multi_channel = true;
168 nix_inl_parse_devargs(struct rte_devargs *devargs,
169 struct roc_nix_inl_dev *inl_dev)
171 uint32_t ipsec_in_max_spi = BIT(8) - 1;
172 struct inl_cpt_channel cpt_channel;
173 struct rte_kvargs *kvlist;
174 uint8_t selftest = 0;
176 memset(&cpt_channel, 0, sizeof(cpt_channel));
181 kvlist = rte_kvargs_parse(devargs->args, NULL);
185 rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
187 rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
188 &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
189 rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
191 rte_kvargs_free(kvlist);
194 inl_dev->ipsec_in_max_spi = ipsec_in_max_spi;
195 inl_dev->selftest = selftest;
196 inl_dev->channel = cpt_channel.channel;
197 inl_dev->chan_mask = cpt_channel.mask;
198 inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
205 nix_inl_dev_to_name(struct rte_pci_device *pci_dev, char *name)
207 snprintf(name, CNXK_NIX_INL_DEV_NAME_LEN,
208 CNXK_NIX_INL_DEV_NAME PCI_PRI_FMT, pci_dev->addr.domain,
209 pci_dev->addr.bus, pci_dev->addr.devid,
210 pci_dev->addr.function);
216 cnxk_nix_inl_dev_remove(struct rte_pci_device *pci_dev)
218 char name[CNXK_NIX_INL_DEV_NAME_LEN];
219 const struct rte_memzone *mz;
220 struct roc_nix_inl_dev *dev;
223 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
226 mz = rte_memzone_lookup(nix_inl_dev_to_name(pci_dev, name));
232 /* Cleanup inline dev */
233 rc = roc_nix_inl_dev_fini(dev);
235 plt_err("Failed to cleanup inl dev, rc=%d(%s)", rc,
236 roc_error_msg_get(rc));
240 rte_memzone_free(mz);
245 cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
246 struct rte_pci_device *pci_dev)
248 char name[CNXK_NIX_INL_DEV_NAME_LEN];
249 struct roc_nix_inl_dev *inl_dev;
250 const struct rte_memzone *mz;
253 RTE_SET_USED(pci_drv);
257 plt_err("Failed to initialize platform model, rc=%d", rc);
261 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
264 mz = rte_memzone_reserve_aligned(nix_inl_dev_to_name(pci_dev, name),
265 sizeof(*inl_dev), SOCKET_ID_ANY, 0,
266 RTE_CACHE_LINE_SIZE);
271 inl_dev->pci_dev = pci_dev;
273 /* Parse devargs string */
274 rc = nix_inl_parse_devargs(pci_dev->device.devargs, inl_dev);
276 plt_err("Failed to parse devargs rc=%d", rc);
280 rc = roc_nix_inl_dev_init(inl_dev);
282 plt_err("Failed to init nix inl device, rc=%d(%s)", rc,
283 roc_error_msg_get(rc));
289 rte_memzone_free(mz);
293 static const struct rte_pci_id cnxk_nix_inl_pci_map[] = {
294 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_PF)},
295 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_VF)},
301 static struct rte_pci_driver cnxk_nix_inl_pci = {
302 .id_table = cnxk_nix_inl_pci_map,
303 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
304 .probe = cnxk_nix_inl_dev_probe,
305 .remove = cnxk_nix_inl_dev_remove,
308 RTE_PMD_REGISTER_PCI(cnxk_nix_inl, cnxk_nix_inl_pci);
309 RTE_PMD_REGISTER_PCI_TABLE(cnxk_nix_inl, cnxk_nix_inl_pci_map);
310 RTE_PMD_REGISTER_KMOD_DEP(cnxk_nix_inl, "vfio-pci");
312 RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
313 CNXK_NIX_INL_SELFTEST "=1"
314 CNXK_NIX_INL_IPSEC_IN_MAX_SPI "=<1-65535>"
315 CNXK_INL_CPT_CHANNEL "=<1-4095>/<1-4095>");