4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
44 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
46 const struct rte_memzone *mz_tmp;
50 lio_dev_err(lio_dev, "Memzone NULL\n");
54 mz_tmp = rte_memzone_lookup(mz->name);
56 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
60 ret = rte_memzone_free(mz);
62 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
66 * lio_init_instr_queue()
67 * @param lio_dev - pointer to the lio device structure.
68 * @param txpciq - queue to be initialized.
70 * Called at driver init time for each input queue. iq_conf has the
71 * configuration parameters for the queue.
73 * @return Success: 0 Failure: -1
76 lio_init_instr_queue(struct lio_device *lio_dev,
77 union octeon_txpciq txpciq,
78 uint32_t num_descs, unsigned int socket_id)
80 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
81 struct lio_instr_queue *iq;
85 instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
87 q_size = instr_type * num_descs;
88 iq = lio_dev->instr_queue[iq_no];
89 iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
90 "instr_queue", iq_no, q_size,
93 if (iq->iq_mz == NULL) {
94 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
99 iq->base_addr_dma = iq->iq_mz->phys_addr;
100 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
102 iq->max_count = num_descs;
104 /* Initialize a list to holds requests that have been posted to Octeon
105 * but has yet to be fetched by octeon
107 iq->request_list = rte_zmalloc_socket("request_list",
108 sizeof(*iq->request_list) *
112 if (iq->request_list == NULL) {
113 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
115 lio_dma_zone_free(lio_dev, iq->iq_mz);
119 lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
120 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
123 iq->lio_dev = lio_dev;
124 iq->txpciq.txpciq64 = txpciq.txpciq64;
126 iq->host_write_index = 0;
127 iq->lio_read_index = 0;
130 rte_atomic64_set(&iq->instr_pending, 0);
132 /* Initialize the spinlock for this instruction queue */
133 rte_spinlock_init(&iq->lock);
134 rte_spinlock_init(&iq->post_lock);
136 rte_atomic64_clear(&iq->iq_flush_running);
138 lio_dev->io_qmask.iq |= (1ULL << iq_no);
140 /* Set the 32B/64B mode for each input queue */
141 lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
142 iq->iqcmd_64B = (instr_type == 64);
144 lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
150 lio_setup_instr_queue0(struct lio_device *lio_dev)
152 union octeon_txpciq txpciq;
153 uint32_t num_descs = 0;
156 num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
158 lio_dev->num_iqs = 0;
160 lio_dev->instr_queue[0] = rte_zmalloc(NULL,
161 sizeof(struct lio_instr_queue), 0);
162 if (lio_dev->instr_queue[0] == NULL)
165 lio_dev->instr_queue[0]->q_index = 0;
166 lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
168 txpciq.s.q_no = iq_no;
169 txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
170 txpciq.s.use_qpg = 0;
172 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
173 rte_free(lio_dev->instr_queue[0]);
174 lio_dev->instr_queue[0] = NULL;
184 * lio_delete_instr_queue()
185 * @param lio_dev - pointer to the lio device structure.
186 * @param iq_no - queue to be deleted.
188 * Called at driver unload time for each input queue. Deletes all
189 * allocated resources for the input queue.
192 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
194 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
196 rte_free(iq->request_list);
197 iq->request_list = NULL;
198 lio_dma_zone_free(lio_dev, iq->iq_mz);
202 lio_free_instr_queue0(struct lio_device *lio_dev)
204 lio_delete_instr_queue(lio_dev, 0);
205 rte_free(lio_dev->instr_queue[0]);
206 lio_dev->instr_queue[0] = NULL;
211 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
213 char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
216 buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
217 snprintf(sc_pool_name, sizeof(sc_pool_name),
218 "lio_sc_pool_%u", lio_dev->port_id);
219 lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
220 LIO_MAX_SOFT_COMMAND_BUFFERS,
221 0, 0, buf_size, SOCKET_ID_ANY);
226 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
228 rte_mempool_free(lio_dev->sc_buf_pool);
231 struct lio_soft_command *
232 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
233 uint32_t rdatasize, uint32_t ctxsize)
235 uint32_t offset = sizeof(struct lio_soft_command);
236 struct lio_soft_command *sc;
240 RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
241 LIO_SOFT_COMMAND_BUFFER_SIZE);
243 m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
245 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
249 /* set rte_mbuf data size and there is only 1 segment */
250 m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
251 m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
253 /* use rte_mbuf buffer for soft command */
254 sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
255 memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
256 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
257 sc->dma_addr = rte_mbuf_data_dma_addr(m);
260 dma_addr = sc->dma_addr;
263 sc->ctxptr = (uint8_t *)sc + offset;
264 sc->ctxsize = ctxsize;
267 /* Start data at 128 byte boundary */
268 offset = (offset + ctxsize + 127) & 0xffffff80;
271 sc->virtdptr = (uint8_t *)sc + offset;
272 sc->dmadptr = dma_addr + offset;
273 sc->datasize = datasize;
276 /* Start rdata at 128 byte boundary */
277 offset = (offset + datasize + 127) & 0xffffff80;
280 RTE_ASSERT(rdatasize >= 16);
281 sc->virtrptr = (uint8_t *)sc + offset;
282 sc->dmarptr = dma_addr + offset;
283 sc->rdatasize = rdatasize;
284 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
292 lio_free_soft_command(struct lio_soft_command *sc)
294 rte_pktmbuf_free(sc->mbuf);
298 lio_setup_response_list(struct lio_device *lio_dev)
300 STAILQ_INIT(&lio_dev->response_list.head);
301 rte_spinlock_init(&lio_dev->response_list.lock);
302 rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
306 lio_process_ordered_list(struct lio_device *lio_dev)
308 int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
309 struct lio_response_list *ordered_sc_list;
310 struct lio_soft_command *sc;
311 int request_complete = 0;
315 ordered_sc_list = &lio_dev->response_list;
318 rte_spinlock_lock(&ordered_sc_list->lock);
320 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
321 /* ordered_sc_list is empty; there is
324 rte_spinlock_unlock(&ordered_sc_list->lock);
328 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
329 struct lio_soft_command, node);
331 status = LIO_REQUEST_PENDING;
333 /* check if octeon has finished DMA'ing a response
334 * to where rptr is pointing to
336 status64 = *sc->status_word;
338 if (status64 != LIO_COMPLETION_WORD_INIT) {
339 /* This logic ensures that all 64b have been written.
340 * 1. check byte 0 for non-FF
341 * 2. if non-FF, then swap result from BE to host order
342 * 3. check byte 7 (swapped to 0) for non-FF
343 * 4. if non-FF, use the low 32-bit status code
344 * 5. if either byte 0 or byte 7 is FF, don't use status
346 if ((status64 & 0xff) != 0xff) {
347 lio_swap_8B_data(&status64, 1);
348 if (((status64 & 0xff) != 0xff)) {
349 /* retrieve 16-bit firmware status */
350 status = (uint32_t)(status64 &
354 LIO_FIRMWARE_STATUS_CODE(
358 status = LIO_REQUEST_DONE;
362 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
365 "cmd failed, timeout (%ld, %ld)\n",
366 (long)lio_uptime, (long)sc->timeout);
367 status = LIO_REQUEST_TIMEOUT;
370 if (status != LIO_REQUEST_PENDING) {
371 /* we have received a response or we have timed out.
372 * remove node from linked list
374 STAILQ_REMOVE(&ordered_sc_list->head,
375 &sc->node, lio_stailq_node, entries);
377 &lio_dev->response_list.pending_req_count);
378 rte_spinlock_unlock(&ordered_sc_list->lock);
381 sc->callback(status, sc->callback_arg);
385 /* no response yet */
386 request_complete = 0;
387 rte_spinlock_unlock(&ordered_sc_list->lock);
390 /* If we hit the Max Ordered requests to process every loop,
391 * we quit and let this function be invoked the next time
392 * the poll thread runs to process the remaining requests.
393 * This function can take up the entire CPU if there is
394 * no upper limit to the requests processed.
396 if (request_complete >= resp_to_process)
398 } while (request_complete);