1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static roc_npa_lf_init_cb_t lf_init_cb;
11 roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
13 if (lf_init_cb != NULL)
21 roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
24 const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
25 NPA_LF_POOL_OP_PTR_START0;
26 const uint64_t end = roc_npa_aura_handle_to_base(aura_handle) +
27 NPA_LF_POOL_OP_PTR_END0;
28 uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
29 struct npa_lf *lf = idev_npa_obj_get();
30 struct npa_aura_lim *lim;
35 lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
36 lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
38 roc_store_pair(lim[reg].ptr_start, reg, start);
39 roc_store_pair(lim[reg].ptr_end, reg, end);
43 npa_aura_pool_init(struct mbox *mbox, uint32_t aura_id, struct npa_aura_s *aura,
44 struct npa_pool_s *pool)
46 struct npa_aq_enq_req *aura_init_req, *pool_init_req;
47 struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
48 struct mbox_dev *mdev = &mbox->dev[0];
49 int rc = -ENOSPC, off;
51 aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
52 if (aura_init_req == NULL)
54 aura_init_req->aura_id = aura_id;
55 aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
56 aura_init_req->op = NPA_AQ_INSTOP_INIT;
57 mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
59 pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
60 if (pool_init_req == NULL)
62 pool_init_req->aura_id = aura_id;
63 pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
64 pool_init_req->op = NPA_AQ_INSTOP_INIT;
65 mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));
67 rc = mbox_process(mbox);
71 off = mbox->rx_start +
72 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
73 aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
74 off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
75 pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
77 if (aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
80 return NPA_ERR_AURA_POOL_INIT;
84 npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle)
86 struct npa_aq_enq_req *aura_req, *pool_req;
87 struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
88 struct mbox_dev *mdev = &mbox->dev[0];
89 struct ndc_sync_op *ndc_req;
90 int rc = -ENOSPC, off;
93 /* Procedure for disabling an aura/pool */
96 /* Clear all the pointers from the aura */
98 ptr = roc_npa_aura_op_alloc(aura_handle, 0);
101 pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
102 if (pool_req == NULL)
104 pool_req->aura_id = aura_id;
105 pool_req->ctype = NPA_AQ_CTYPE_POOL;
106 pool_req->op = NPA_AQ_INSTOP_WRITE;
107 pool_req->pool.ena = 0;
108 pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
110 aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
111 if (aura_req == NULL)
113 aura_req->aura_id = aura_id;
114 aura_req->ctype = NPA_AQ_CTYPE_AURA;
115 aura_req->op = NPA_AQ_INSTOP_WRITE;
116 aura_req->aura.ena = 0;
117 aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
119 rc = mbox_process(mbox);
123 off = mbox->rx_start +
124 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
125 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
127 off = mbox->rx_start + pool_rsp->hdr.next_msgoff;
128 aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
130 if (aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0)
131 return NPA_ERR_AURA_POOL_FINI;
133 /* Sync NDC-NPA for LF */
134 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
137 ndc_req->npa_lf_sync = 1;
138 rc = mbox_process(mbox);
140 plt_err("Error on NDC-NPA LF sync, rc %d", rc);
141 return NPA_ERR_AURA_POOL_FINI;
147 roc_npa_pool_op_pc_reset(uint64_t aura_handle)
149 struct npa_lf *lf = idev_npa_obj_get();
150 struct npa_aq_enq_req *pool_req;
151 struct npa_aq_enq_rsp *pool_rsp;
152 struct ndc_sync_op *ndc_req;
153 struct mbox_dev *mdev;
154 int rc = -ENOSPC, off;
158 return NPA_ERR_PARAM;
161 mdev = &mbox->dev[0];
162 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
164 pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
165 if (pool_req == NULL)
167 pool_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
168 pool_req->ctype = NPA_AQ_CTYPE_POOL;
169 pool_req->op = NPA_AQ_INSTOP_WRITE;
170 pool_req->pool.op_pc = 0;
171 pool_req->pool_mask.op_pc = ~pool_req->pool_mask.op_pc;
173 rc = mbox_process(mbox);
177 off = mbox->rx_start +
178 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
179 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
181 if (pool_rsp->hdr.rc != 0)
182 return NPA_ERR_AURA_POOL_FINI;
184 /* Sync NDC-NPA for LF */
185 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
188 ndc_req->npa_lf_sync = 1;
189 rc = mbox_process(mbox);
191 plt_err("Error on NDC-NPA LF sync, rc %d", rc);
192 return NPA_ERR_AURA_POOL_FINI;
198 roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena)
200 struct npa_aq_enq_req *aura_req;
204 lf = idev_npa_obj_get();
206 return NPA_ERR_DEVICE_NOT_BOUNDED;
208 aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
209 if (aura_req == NULL)
211 aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
212 aura_req->ctype = NPA_AQ_CTYPE_AURA;
213 aura_req->op = NPA_AQ_INSTOP_WRITE;
215 aura_req->aura.aura_drop_ena = ena;
216 aura_req->aura.aura_drop = limit;
217 aura_req->aura_mask.aura_drop_ena =
218 ~(aura_req->aura_mask.aura_drop_ena);
219 aura_req->aura_mask.aura_drop = ~(aura_req->aura_mask.aura_drop);
220 rc = mbox_process(lf->mbox);
226 npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
228 snprintf(name, PLT_MEMZONE_NAMESIZE, "roc_npa_stack_%x_%d", lf->pf_func,
233 static inline const struct plt_memzone *
234 npa_stack_dma_alloc(struct npa_lf *lf, char *name, int pool_id, size_t size)
236 const char *mz_name = npa_stack_memzone_name(lf, pool_id, name);
237 size = PLT_ALIGN_CEIL(size, ROC_ALIGN);
239 return plt_memzone_reserve_aligned(mz_name, size, 0, ROC_ALIGN);
243 npa_stack_dma_free(struct npa_lf *lf, char *name, int pool_id)
245 const struct plt_memzone *mz;
247 mz = plt_memzone_lookup(npa_stack_memzone_name(lf, pool_id, name));
249 return NPA_ERR_PARAM;
251 return plt_memzone_free(mz);
255 bitmap_ctzll(uint64_t slab)
260 return __builtin_ctzll(slab);
264 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
265 const uint32_t block_count, struct npa_aura_s *aura,
266 struct npa_pool_s *pool, uint64_t *aura_handle)
268 int rc, aura_id, pool_id, stack_size, alloc_size;
269 char name[PLT_MEMZONE_NAMESIZE];
270 const struct plt_memzone *mz;
275 if (!lf || !block_size || !block_count || !pool || !aura ||
277 return NPA_ERR_PARAM;
279 /* Block size should be cache line aligned and in range of 128B-128KB */
280 if (block_size % ROC_ALIGN || block_size < 128 ||
281 block_size > ROC_NPA_MAX_BLOCK_SZ)
282 return NPA_ERR_INVALID_BLOCK_SZ;
286 /* Scan from the beginning */
287 plt_bitmap_scan_init(lf->npa_bmp);
288 /* Scan bitmap to get the free pool */
289 rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
292 plt_err("Mempools exhausted");
293 return NPA_ERR_AURA_ID_ALLOC;
296 /* Get aura_id from resource bitmap */
297 aura_id = pos + bitmap_ctzll(slab);
298 /* Mark pool as reserved */
299 plt_bitmap_clear(lf->npa_bmp, aura_id);
301 /* Configuration based on each aura has separate pool(aura-pool pair) */
303 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
304 aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
305 NPA_ERR_AURA_ID_ALLOC :
310 /* Allocate stack memory */
311 stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
312 alloc_size = stack_size * lf->stack_pg_bytes;
314 mz = npa_stack_dma_alloc(lf, name, pool_id, alloc_size);
320 /* Update aura fields */
321 aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
323 aura->shift = plt_log2_u32(block_count);
324 aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
325 aura->limit = block_count;
326 aura->pool_caching = 1;
327 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
328 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
329 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
330 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
332 /* Many to one reduction */
333 aura->err_qint_idx = aura_id % lf->qints;
335 /* Update pool fields */
336 pool->stack_base = mz->iova;
338 pool->buf_size = block_size / ROC_ALIGN;
339 pool->stack_max_pages = stack_size;
340 pool->shift = plt_log2_u32(block_count);
341 pool->shift = pool->shift < 8 ? 0 : pool->shift - 8;
344 pool->stack_caching = 1;
345 pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
346 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
347 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
350 /* Many to one reduction */
351 pool->err_qint_idx = pool_id % lf->qints;
353 /* Issue AURA_INIT and POOL_INIT op */
354 rc = npa_aura_pool_init(lf->mbox, aura_id, aura, pool);
358 *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
359 /* Update aura count */
360 roc_npa_aura_op_cnt_set(*aura_handle, 0, block_count);
361 /* Read it back to make sure aura count is updated */
362 roc_npa_aura_op_cnt_get(*aura_handle);
367 plt_memzone_free(mz);
369 plt_bitmap_set(lf->npa_bmp, aura_id);
375 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
376 uint32_t block_count, struct npa_aura_s *aura,
377 struct npa_pool_s *pool)
379 struct npa_aura_s defaura;
380 struct npa_pool_s defpool;
381 struct idev_cfg *idev;
385 lf = idev_npa_obj_get();
387 rc = NPA_ERR_DEVICE_NOT_BOUNDED;
391 idev = idev_get_cfg();
398 memset(&defaura, 0, sizeof(struct npa_aura_s));
402 memset(&defpool, 0, sizeof(struct npa_pool_s));
403 defpool.nat_align = 1;
404 defpool.buf_offset = 1;
408 rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
411 plt_err("Failed to alloc pool or aura rc=%d", rc);
415 plt_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%" PRIx64,
416 lf, block_size, block_count, *aura_handle);
418 /* Just hold the reference of the object */
419 __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
425 roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
427 struct npa_aq_enq_req *aura_req;
431 lf = idev_npa_obj_get();
433 return NPA_ERR_DEVICE_NOT_BOUNDED;
435 aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
436 if (aura_req == NULL)
438 aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
439 aura_req->ctype = NPA_AQ_CTYPE_AURA;
440 aura_req->op = NPA_AQ_INSTOP_WRITE;
442 aura_req->aura.limit = aura_limit;
443 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
444 rc = mbox_process(lf->mbox);
450 npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)
452 char name[PLT_MEMZONE_NAMESIZE];
453 int aura_id, pool_id, rc;
455 if (!lf || !aura_handle)
456 return NPA_ERR_PARAM;
458 aura_id = roc_npa_aura_handle_to_aura(aura_handle);
460 rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);
461 rc |= npa_stack_dma_free(lf, name, pool_id);
463 plt_bitmap_set(lf->npa_bmp, aura_id);
469 roc_npa_pool_destroy(uint64_t aura_handle)
471 struct npa_lf *lf = idev_npa_obj_get();
474 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
475 rc = npa_aura_pool_pair_free(lf, aura_handle);
477 plt_err("Failed to destroy pool or aura rc=%d", rc);
479 /* Release the reference of npa */
485 roc_npa_pool_range_update_check(uint64_t aura_handle)
487 uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
489 struct npa_aura_lim *lim;
490 __io struct npa_pool_s *pool;
491 struct npa_aq_enq_req *req;
492 struct npa_aq_enq_rsp *rsp;
495 lf = idev_npa_obj_get();
497 return NPA_ERR_PARAM;
501 req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
505 req->aura_id = aura_id;
506 req->ctype = NPA_AQ_CTYPE_POOL;
507 req->op = NPA_AQ_INSTOP_READ;
509 rc = mbox_process_msg(lf->mbox, (void *)&rsp);
511 plt_err("Failed to get pool(0x%" PRIx64 ") context", aura_id);
516 if (lim[aura_id].ptr_start != pool->ptr_start ||
517 lim[aura_id].ptr_end != pool->ptr_end) {
518 plt_err("Range update failed on pool(0x%" PRIx64 ")", aura_id);
519 return NPA_ERR_PARAM;
526 npa_attach(struct mbox *mbox)
528 struct rsrc_attach_req *req;
530 req = mbox_alloc_msg_attach_resources(mbox);
536 return mbox_process(mbox);
540 npa_detach(struct mbox *mbox)
542 struct rsrc_detach_req *req;
544 req = mbox_alloc_msg_detach_resources(mbox);
550 return mbox_process(mbox);
554 npa_get_msix_offset(struct mbox *mbox, uint16_t *npa_msixoff)
556 struct msix_offset_rsp *msix_rsp;
559 /* Get NPA MSIX vector offsets */
560 mbox_alloc_msg_msix_offset(mbox);
561 rc = mbox_process_msg(mbox, (void *)&msix_rsp);
563 *npa_msixoff = msix_rsp->npa_msixoff;
569 npa_lf_alloc(struct npa_lf *lf)
571 struct mbox *mbox = lf->mbox;
572 struct npa_lf_alloc_req *req;
573 struct npa_lf_alloc_rsp *rsp;
576 req = mbox_alloc_msg_npa_lf_alloc(mbox);
579 req->aura_sz = lf->aura_sz;
580 req->nr_pools = lf->nr_pools;
582 rc = mbox_process_msg(mbox, (void *)&rsp);
584 return NPA_ERR_ALLOC;
586 lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
587 lf->stack_pg_bytes = rsp->stack_pg_bytes;
588 lf->qints = rsp->qints;
594 npa_lf_free(struct mbox *mbox)
596 mbox_alloc_msg_npa_lf_free(mbox);
597 return mbox_process(mbox);
600 static inline uint32_t
601 aura_size_to_u32(uint8_t val)
603 if (val == NPA_AURA_SZ_0)
605 if (val >= NPA_AURA_SZ_MAX)
608 return 1 << (val + 6);
612 pool_count_aura_sz_get(uint32_t *nr_pools, uint8_t *aura_sz)
616 val = roc_idev_npa_maxpools_get();
617 if (val < aura_size_to_u32(NPA_AURA_SZ_128))
619 if (val > aura_size_to_u32(NPA_AURA_SZ_1M))
622 roc_idev_npa_maxpools_set(val);
624 *aura_sz = plt_log2_u32(val) - 6;
628 npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
630 uint32_t i, bmp_sz, nr_pools;
635 if (!lf || !base || !mbox)
636 return NPA_ERR_PARAM;
638 if (base & ROC_AURA_ID_MASK)
639 return NPA_ERR_BASE_INVALID;
641 pool_count_aura_sz_get(&nr_pools, &aura_sz);
642 if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
643 return NPA_ERR_PARAM;
645 memset(lf, 0x0, sizeof(*lf));
647 lf->aura_sz = aura_sz;
648 lf->nr_pools = nr_pools;
651 rc = npa_lf_alloc(lf);
655 bmp_sz = plt_bitmap_get_memory_footprint(nr_pools);
657 /* Allocate memory for bitmap */
658 lf->npa_bmp_mem = plt_zmalloc(bmp_sz, ROC_ALIGN);
659 if (lf->npa_bmp_mem == NULL) {
664 /* Initialize pool resource bitmap array */
665 lf->npa_bmp = plt_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
666 if (lf->npa_bmp == NULL) {
671 /* Mark all pools available */
672 for (i = 0; i < nr_pools; i++)
673 plt_bitmap_set(lf->npa_bmp, i);
675 /* Allocate memory for qint context */
676 lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
677 if (lf->npa_qint_mem == NULL) {
682 /* Allocate memory for nap_aura_lim memory */
683 lf->aura_lim = plt_zmalloc(sizeof(struct npa_aura_lim) * nr_pools, 0);
684 if (lf->aura_lim == NULL) {
689 /* Init aura start & end limits */
690 for (i = 0; i < nr_pools; i++) {
691 lf->aura_lim[i].ptr_start = UINT64_MAX;
692 lf->aura_lim[i].ptr_end = 0x0ull;
698 plt_free(lf->npa_qint_mem);
700 plt_bitmap_free(lf->npa_bmp);
702 plt_free(lf->npa_bmp_mem);
704 npa_lf_free(lf->mbox);
710 npa_dev_fini(struct npa_lf *lf)
713 return NPA_ERR_PARAM;
715 plt_free(lf->aura_lim);
716 plt_free(lf->npa_qint_mem);
717 plt_bitmap_free(lf->npa_bmp);
718 plt_free(lf->npa_bmp_mem);
720 return npa_lf_free(lf->mbox);
724 npa_lf_init(struct dev *dev, struct plt_pci_device *pci_dev)
726 struct idev_cfg *idev;
727 uint16_t npa_msixoff;
731 idev = idev_get_cfg();
733 return NPA_ERR_ALLOC;
735 /* Not the first PCI device */
736 if (__atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0)
740 rc = (*lf_init_cb)(pci_dev);
745 rc = npa_attach(dev->mbox);
749 rc = npa_get_msix_offset(dev->mbox, &npa_msixoff);
754 rc = npa_dev_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
759 lf->pf_func = dev->pf_func;
760 lf->npa_msixoff = npa_msixoff;
761 lf->intr_handle = pci_dev->intr_handle;
762 lf->pci_dev = pci_dev;
764 idev->npa_pf_func = dev->pf_func;
768 rc = npa_register_irqs(lf);
772 plt_npa_dbg("npa=%p max_pools=%d pf_func=0x%x msix=0x%x", lf,
773 roc_idev_npa_maxpools_get(), lf->pf_func, npa_msixoff);
778 npa_dev_fini(idev->npa);
780 npa_detach(dev->mbox);
782 __atomic_fetch_sub(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
789 struct idev_cfg *idev;
792 idev = idev_get_cfg();
794 return NPA_ERR_ALLOC;
796 /* Not the last PCI device */
797 if (__atomic_sub_fetch(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0)
800 npa_unregister_irqs(idev->npa);
801 rc |= npa_dev_fini(idev->npa);
802 rc |= npa_detach(idev->npa->mbox);
803 idev_set_defaults(idev);
809 roc_npa_dev_init(struct roc_npa *roc_npa)
811 struct plt_pci_device *pci_dev;
816 if (roc_npa == NULL || roc_npa->pci_dev == NULL)
817 return NPA_ERR_PARAM;
819 PLT_STATIC_ASSERT(sizeof(struct npa) <= ROC_NPA_MEM_SZ);
820 npa = roc_npa_to_npa_priv(roc_npa);
821 memset(npa, 0, sizeof(*npa));
822 pci_dev = roc_npa->pci_dev;
825 /* Initialize device */
826 rc = dev_init(dev, pci_dev);
828 plt_err("Failed to init roc device");
832 npa->pci_dev = pci_dev;
833 dev->drv_inited = true;
839 roc_npa_dev_fini(struct roc_npa *roc_npa)
841 struct npa *npa = roc_npa_to_npa_priv(roc_npa);
844 return NPA_ERR_PARAM;
846 npa->dev.drv_inited = false;
847 return dev_fini(&npa->dev, npa->pci_dev);