doc: add sample for ABI checks
[dpdk.git] / drivers / compress / octeontx / otx_zip.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4
5 #include "otx_zip.h"
6
7 uint64_t
8 zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
9 {
10         uint8_t *base = hw_addr;
11         return *(volatile uint64_t *)(base + offset);
12 }
13
14 void
15 zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
16 {
17         uint8_t *base = hw_addr;
18         *(uint64_t *)(base + offset) = val;
19 }
20
21 static void
22 zip_q_enable(struct zipvf_qp *qp)
23 {
24         zip_vqx_ena_t que_ena;
25
26         /*ZIP VFx command queue init*/
27         que_ena.u = 0ull;
28         que_ena.s.ena = 1;
29
30         zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
31         rte_wmb();
32 }
33
34 /* initialize given qp on zip device */
35 int
36 zipvf_q_init(struct zipvf_qp *qp)
37 {
38         zip_vqx_sbuf_addr_t que_sbuf_addr;
39
40         uint64_t size;
41         void *cmdq_addr;
42         uint64_t iova;
43         struct zipvf_cmdq *cmdq = &qp->cmdq;
44         struct zip_vf *vf = qp->vf;
45
46         /* allocate and setup instruction queue */
47         size = ZIP_MAX_CMDQ_SIZE;
48         size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
49
50         cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
51         if (cmdq_addr == NULL)
52                 return -1;
53
54         cmdq->sw_head = (uint64_t *)cmdq_addr;
55         cmdq->va = (uint8_t *)cmdq_addr;
56         iova = rte_mem_virt2iova(cmdq_addr);
57
58         cmdq->iova = iova;
59
60         que_sbuf_addr.u = 0ull;
61         que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
62         zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
63
64         zip_q_enable(qp);
65
66         memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
67         rte_spinlock_init(&cmdq->qlock);
68
69         return 0;
70 }
71
72 int
73 zipvf_q_term(struct zipvf_qp *qp)
74 {
75         struct zipvf_cmdq *cmdq = &qp->cmdq;
76         zip_vqx_ena_t que_ena;
77         struct zip_vf *vf = qp->vf;
78
79         if (cmdq->va != NULL) {
80                 memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
81                 rte_free(cmdq->va);
82         }
83
84         /*Disabling the ZIP queue*/
85         que_ena.u = 0ull;
86         zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
87
88         return 0;
89 }
90
91 void
92 zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
93 {
94         zip_quex_doorbell_t dbell;
95         union zip_nptr_s ncp;
96         uint64_t *ncb_ptr;
97         struct zipvf_cmdq *cmdq = &qp->cmdq;
98         void *reg_base = qp->vf->vbar0;
99
100         /*Held queue lock*/
101         rte_spinlock_lock(&(cmdq->qlock));
102
103         /* Check space availability in zip cmd queue */
104         if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
105                 ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
106                 /*Last buffer of the command queue*/
107                 memcpy((uint8_t *)cmdq->sw_head,
108                         (uint8_t *)cmd,
109                         sizeof(union zip_inst_s));
110                 /* move pointer to next loc in unit of 64-bit word */
111                 cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
112
113                 /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
114                 ncb_ptr = cmdq->sw_head;
115                 /* Pointing head again to cmdqueue base*/
116                 cmdq->sw_head = (uint64_t *)cmdq->va;
117
118                 ncp.u = 0ull;
119                 ncp.s.addr = cmdq->iova;
120                 *ncb_ptr = ncp.u;
121         } else {
122                 /*Enough buffers available in the command queue*/
123                 memcpy((uint8_t *)cmdq->sw_head,
124                         (uint8_t *)cmd,
125                         sizeof(union zip_inst_s));
126                 cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
127         }
128
129         rte_wmb();
130
131         /* Ringing ZIP VF doorbell */
132         dbell.u = 0ull;
133         dbell.s.dbell_cnt = 1;
134         zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
135
136         rte_spinlock_unlock(&(cmdq->qlock));
137 }
138
139 int
140 zipvf_create(struct rte_compressdev *compressdev)
141 {
142         struct   rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
143         struct   zip_vf *zipvf = NULL;
144         char     *dev_name = compressdev->data->name;
145         void     *vbar0;
146         uint64_t reg;
147
148         if (pdev->mem_resource[0].phys_addr == 0ULL)
149                 return -EIO;
150
151         vbar0 = pdev->mem_resource[0].addr;
152         if (!vbar0) {
153                 ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
154                 return -ENODEV;
155         }
156
157         zipvf = (struct zip_vf *)(compressdev->data->dev_private);
158
159         if (!zipvf)
160                 return -ENOMEM;
161
162         zipvf->vbar0 = vbar0;
163         reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
164         /* Storing domain in local to ZIP VF */
165         zipvf->dom_sdom = reg;
166         zipvf->pdev = pdev;
167         zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
168         return 0;
169 }
170
171 int
172 zipvf_destroy(struct rte_compressdev *compressdev)
173 {
174         struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
175
176         /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
177         zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
178
179         return 0;
180 }