7c21c6a528a0fe3e56507e0489702e806ca078de
[dpdk.git] / drivers / raw / dpaa2_cmdif / dpaa2_cmdif.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8
9 #include <rte_bus_vdev.h>
10 #include <rte_atomic.h>
11 #include <rte_interrupts.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_lcore.h>
14
15 #include <rte_rawdev.h>
16 #include <rte_rawdev_pmd.h>
17
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
20 #include "dpaa2_cmdif_logs.h"
21 #include "rte_pmd_dpaa2_cmdif.h"
22
23 /* Dynamic log type identifier */
24 int dpaa2_cmdif_logtype;
25
26 /* CMDIF driver name */
27 #define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
28
29 /*
30  * This API provides the DPCI device ID in 'attr_value'.
31  * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
32  */
33 static int
34 dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
35                      const char *attr_name,
36                      uint64_t *attr_value)
37 {
38         struct dpaa2_dpci_dev *cidev = dev->dev_private;
39
40         DPAA2_CMDIF_FUNC_TRACE();
41
42         RTE_SET_USED(attr_name);
43
44         if (!attr_value) {
45                 DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
46                 return -EINVAL;
47         }
48         *attr_value = cidev->dpci_id;
49
50         return 0;
51 }
52
53 static int
54 dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
55                          struct rte_rawdev_buf **buffers,
56                          unsigned int count,
57                          rte_rawdev_obj_t context)
58 {
59         struct dpaa2_dpci_dev *cidev = dev->dev_private;
60         struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
61         struct dpaa2_queue *txq;
62         struct qbman_fd fd;
63         struct qbman_eq_desc eqdesc;
64         struct qbman_swp *swp;
65         uint32_t retry_count = 0;
66         int ret;
67
68         RTE_SET_USED(count);
69
70         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
71                 ret = dpaa2_affine_qbman_swp();
72                 if (ret) {
73                         DPAA2_CMDIF_ERR(
74                                 "Failed to allocate IO portal, tid: %d\n",
75                                 rte_gettid());
76                         return 0;
77                 }
78         }
79         swp = DPAA2_PER_LCORE_PORTAL;
80
81         cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
82         txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
83
84         /* Prepare enqueue descriptor */
85         qbman_eq_desc_clear(&eqdesc);
86         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
87         qbman_eq_desc_set_no_orp(&eqdesc, 0);
88         qbman_eq_desc_set_response(&eqdesc, 0, 0);
89
90         /* Set some of the FD parameters to i.
91          * For performance reasons do not memset
92          */
93         fd.simple.bpid_offset = 0;
94         fd.simple.ctrl = 0;
95
96         DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
97         DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
98         DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
99         DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
100
101         /* Enqueue a packet to the QBMAN */
102         do {
103                 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
104                 if (ret < 0 && ret != -EBUSY)
105                         DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
106                 retry_count++;
107         } while ((ret == -EBUSY) && (retry_count < DPAA2_MAX_TX_RETRY_COUNT));
108
109         if (ret < 0)
110                 return ret;
111
112         DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
113
114         return 1;
115 }
116
117 static int
118 dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
119                          struct rte_rawdev_buf **buffers,
120                          unsigned int count,
121                          rte_rawdev_obj_t context)
122 {
123         struct dpaa2_dpci_dev *cidev = dev->dev_private;
124         struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
125         struct dpaa2_queue *rxq;
126         struct qbman_swp *swp;
127         struct qbman_result *dq_storage;
128         const struct qbman_fd *fd;
129         struct qbman_pull_desc pulldesc;
130         uint8_t status;
131         int ret;
132
133         RTE_SET_USED(count);
134
135         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
136                 ret = dpaa2_affine_qbman_swp();
137                 if (ret) {
138                         DPAA2_CMDIF_ERR(
139                                 "Failed to allocate IO portal, tid: %d\n",
140                                 rte_gettid());
141                         return 0;
142                 }
143         }
144         swp = DPAA2_PER_LCORE_PORTAL;
145
146         cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
147         rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
148         dq_storage = rxq->q_storage->dq_storage[0];
149
150         qbman_pull_desc_clear(&pulldesc);
151         qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
152         qbman_pull_desc_set_numframes(&pulldesc, 1);
153         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
154                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
155
156         while (1) {
157                 if (qbman_swp_pull(swp, &pulldesc)) {
158                         DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
159                         /* Portal was busy, try again */
160                         continue;
161                 }
162                 break;
163         }
164
165         /* Check if previous issued command is completed. */
166         while (!qbman_check_command_complete(dq_storage))
167                 ;
168         /* Loop until the dq_storage is updated with new token by QBMAN */
169         while (!qbman_result_has_new_result(swp, dq_storage))
170                 ;
171
172         /* Check for valid frame. */
173         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
174         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
175                 DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
176                 return 0;
177         }
178
179         fd = qbman_result_DQ_fd(dq_storage);
180
181         buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
182                         DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
183         cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
184         cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
185         cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
186
187         DPAA2_CMDIF_DP_DEBUG("packet received\n");
188
189         return 1;
190 }
191
192 static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
193         .attr_get = dpaa2_cmdif_get_attr,
194         .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
195         .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
196 };
197
198 static int
199 dpaa2_cmdif_create(const char *name,
200                    struct rte_vdev_device *vdev,
201                    int socket_id)
202 {
203         struct rte_rawdev *rawdev;
204         struct dpaa2_dpci_dev *cidev;
205
206         /* Allocate device structure */
207         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
208                                          socket_id);
209         if (!rawdev) {
210                 DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
211                 return -EINVAL;
212         }
213
214         rawdev->dev_ops = &dpaa2_cmdif_ops;
215         rawdev->device = &vdev->device;
216
217         /* For secondary processes, the primary has done all the work */
218         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
219                 return 0;
220
221         cidev = rte_dpaa2_alloc_dpci_dev();
222         if (!cidev) {
223                 DPAA2_CMDIF_ERR("Unable to allocate CI device");
224                 rte_rawdev_pmd_release(rawdev);
225                 return -ENODEV;
226         }
227
228         rawdev->dev_private = cidev;
229
230         return 0;
231 }
232
233 static int
234 dpaa2_cmdif_destroy(const char *name)
235 {
236         int ret;
237         struct rte_rawdev *rdev;
238
239         rdev = rte_rawdev_pmd_get_named_dev(name);
240         if (!rdev) {
241                 DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
242                 return -EINVAL;
243         }
244
245         /* The primary process will only free the DPCI device */
246         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
247                 rte_dpaa2_free_dpci_dev(rdev->dev_private);
248
249         ret = rte_rawdev_pmd_release(rdev);
250         if (ret)
251                 DPAA2_CMDIF_DEBUG("Device cleanup failed");
252
253         return 0;
254 }
255
256 static int
257 dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
258 {
259         const char *name;
260         int ret = 0;
261
262         name = rte_vdev_device_name(vdev);
263
264         DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
265
266         ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
267
268         return ret;
269 }
270
271 static int
272 dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
273 {
274         const char *name;
275         int ret;
276
277         name = rte_vdev_device_name(vdev);
278         if (name == NULL)
279                 return -1;
280
281         DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
282
283         ret = dpaa2_cmdif_destroy(name);
284
285         return ret;
286 }
287
288 static struct rte_vdev_driver dpaa2_cmdif_drv = {
289         .probe = dpaa2_cmdif_probe,
290         .remove = dpaa2_cmdif_remove
291 };
292
293 RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
294
295 RTE_INIT(dpaa2_cmdif_init_log)
296 {
297         dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
298         if (dpaa2_cmdif_logtype >= 0)
299                 rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
300 }