4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_spinlock.h>
41 #include <rte_memory.h>
43 #include "lio_struct.h"
45 #define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
46 (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
48 #define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
51 (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
53 /** Descriptor format.
54 * The descriptor ring is made of descriptors which have 2 64-bit values:
55 * -# Physical (bus) address of the data buffer.
56 * -# Physical (bus) address of a lio_droq_info structure.
57 * The device DMA's incoming packets and its information at the address
58 * given by these descriptor fields.
60 struct lio_droq_desc {
61 /** The buffer pointer */
64 /** The Info pointer */
68 #define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
70 /** Information about packet DMA'ed by Octeon.
71 * The format of the information available at Info Pointer after Octeon
72 * has posted a packet. Not all descriptors have valid information. Only
73 * the Info field of the first descriptor for a packet has information
76 struct lio_droq_info {
77 /** The Output Receive Header. */
80 /** The Length of the packet. */
84 #define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
86 /** Pointer to data buffer.
87 * Driver keeps a pointer to the data buffer that it made available to
88 * the Octeon device. Since the descriptor ring keeps physical (bus)
89 * addresses, this field is required for the driver to keep track of
90 * the virtual address pointers.
92 struct lio_recv_buffer {
93 /** Packet buffer, including meta data. */
96 /** Data in the packet buffer. */
101 #define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
103 #define LIO_DROQ_SIZE (sizeof(struct lio_droq))
105 #define LIO_IQ_SEND_OK 0
106 #define LIO_IQ_SEND_STOP 1
107 #define LIO_IQ_SEND_FAILED -1
110 #define LIO_REQTYPE_NONE 0
111 #define LIO_REQTYPE_NORESP_NET 1
112 #define LIO_REQTYPE_NORESP_NET_SG 2
113 #define LIO_REQTYPE_SOFT_COMMAND 3
115 struct lio_request_list {
120 /*---------------------- INSTRUCTION FORMAT ----------------------------*/
122 struct lio_instr3_64B {
123 /** Pointer where the input data is available. */
126 /** Instruction Header. */
129 /** Instruction Header. */
132 /** Input Request Header. */
135 /** opcode/subcode specific parameters */
138 /** Return Data Parameters */
141 /** Pointer where the response for a RAW mode packet will be written
148 union lio_instr_64B {
149 struct lio_instr3_64B cmd3;
152 /** The size of each buffer in soft command buffer pool */
153 #define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
155 /** Maximum number of buffers to allocate into soft command buffer pool */
156 #define LIO_MAX_SOFT_COMMAND_BUFFERS 255
158 struct lio_soft_command {
159 /** Soft command buffer info. */
160 struct lio_stailq_node node;
164 /** Command and return status */
165 union lio_instr_64B cmd;
167 #define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
168 uint64_t *status_word;
170 /** Data buffer info */
175 /** Return buffer info */
180 /** Context buffer info */
184 /** Time out and callback */
188 void (*callback)(uint32_t, void *);
190 struct rte_mbuf *mbuf;
193 struct lio_iq_post_status {
219 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
222 uint64_t more : 6; /* How many udd words follow the command */
224 uint64_t reserved : 29;
226 uint64_t param1 : 16;
230 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
234 uint64_t param1 : 16;
236 uint64_t reserved : 29;
246 #define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
248 /* Instruction Header */
249 struct octeon_instr_ih3 {
250 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
253 uint64_t reserved3 : 1;
255 /** Gather indicator 1=gather*/
258 /** Data length OR no. of entries in gather list */
259 uint64_t dlengsz : 14;
261 /** Front Data size */
265 uint64_t reserved2 : 4;
267 /** PKI port kind - PKIND */
271 uint64_t reserved1 : 32;
273 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
275 uint64_t reserved1 : 32;
277 /** PKI port kind - PKIND */
281 uint64_t reserved2 : 4;
283 /** Front Data size */
286 /** Data length OR no. of entries in gather list */
287 uint64_t dlengsz : 14;
289 /** Gather indicator 1=gather*/
293 uint64_t reserved3 : 1;
298 /* PKI Instruction Header(PKI IH) */
299 struct octeon_instr_pki_ih3 {
300 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
305 /** Raw mode indicator 1 = RAW */
315 uint64_t reserved2 : 1;
327 uint64_t tagtype : 2;
330 uint64_t reserved1 : 2;
338 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
347 uint64_t reserved1 : 2;
350 uint64_t tagtype : 2;
362 uint64_t reserved2 : 1;
370 /** Raw mode indicator 1 = RAW */
378 /** Input Request Header */
379 struct octeon_instr_irh {
380 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
383 uint64_t subcode : 7;
385 uint64_t priority : 3;
386 uint64_t reserved : 5;
387 uint64_t ossp : 32; /* opcode/subcode specific parameters */
388 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
389 uint64_t ossp : 32; /* opcode/subcode specific parameters */
390 uint64_t reserved : 5;
391 uint64_t priority : 3;
393 uint64_t subcode : 7;
399 /* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
400 #define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
401 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
402 #define OCTEON_PCI_CMD_O3 (24 + 8)
404 /** Return Data Parameters */
405 struct octeon_instr_rdp {
406 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
407 uint64_t reserved : 49;
408 uint64_t pcie_port : 3;
410 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
412 uint64_t pcie_port : 3;
413 uint64_t reserved : 49;
417 int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
418 void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
420 struct lio_soft_command *
421 lio_alloc_soft_command(struct lio_device *lio_dev,
422 uint32_t datasize, uint32_t rdatasize,
424 void lio_prepare_soft_command(struct lio_device *lio_dev,
425 struct lio_soft_command *sc,
426 uint8_t opcode, uint8_t subcode,
427 uint32_t irh_ossp, uint64_t ossp0,
429 int lio_send_soft_command(struct lio_device *lio_dev,
430 struct lio_soft_command *sc);
431 void lio_free_soft_command(struct lio_soft_command *sc);
433 /** Maximum ordered requests to process in every invocation of
434 * lio_process_ordered_list(). The function will continue to process requests
435 * as long as it can find one that has finished processing. If it keeps
436 * finding requests that have completed, the function can run for ever. The
437 * value defined here sets an upper limit on the number of requests it can
438 * process before it returns control to the poll thread.
440 #define LIO_MAX_ORD_REQS_TO_PROCESS 4096
442 /** Error codes used in Octeon Host-Core communication.
445 * ----------------------------
447 * ----------------------------
448 * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
449 * are reserved to identify the group to which the error code belongs. The
450 * lower 16-bits, called Minor Error Number, carry the actual code.
452 * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
454 /** Status for a request.
455 * If the request is successfully queued, the driver will return
456 * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
457 * the driver if the response for request failed to arrive before a
458 * time-out period or if the request processing * got interrupted due to
459 * a signal respectively.
462 /** A value of 0x00000000 indicates no error i.e. success */
463 LIO_REQUEST_DONE = 0x00000000,
464 /** (Major number: 0x0000; Minor Number: 0x0001) */
465 LIO_REQUEST_PENDING = 0x00000001,
466 LIO_REQUEST_TIMEOUT = 0x00000003,
470 /*------ Error codes used by firmware (bits 15..0 set by firmware */
471 #define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
472 #define LIO_FIRMWARE_STATUS_CODE(status) \
473 ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
475 /** Initialize the response lists. The number of response lists to create is
477 * @param lio_dev - the lio device structure.
479 void lio_setup_response_list(struct lio_device *lio_dev);
481 /** Check the status of first entry in the ordered list. If the instruction at
482 * that entry finished processing or has timed-out, the entry is cleaned.
483 * @param lio_dev - the lio device structure.
484 * @return 1 if the ordered list is empty, 0 otherwise.
486 int lio_process_ordered_list(struct lio_device *lio_dev);
489 lio_swap_8B_data(uint64_t *data, uint32_t blocks)
492 *data = rte_cpu_to_be_64(*data);
498 static inline uint64_t
499 lio_map_ring(void *buf)
501 phys_addr_t dma_addr;
503 dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
505 return (uint64_t)dma_addr;
508 static inline uint64_t
509 lio_map_ring_info(struct lio_droq *droq, uint32_t i)
511 phys_addr_t dma_addr;
513 dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
515 return (uint64_t)dma_addr;
519 lio_opcode_slow_path(union octeon_rh *rh)
521 uint16_t subcode1, subcode2;
523 subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
524 subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
526 return subcode2 != subcode1;
529 /* Macro to increment index.
530 * Index is incremented by count; if the sum exceeds
531 * max, index is wrapped-around to the start.
533 static inline uint32_t
534 lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
536 if ((index + count) >= max)
537 index = index + count - max;
544 int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
545 int desc_size, struct rte_mempool *mpool,
546 unsigned int socket_id);
547 uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
549 void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
551 /** Setup instruction queue zero for the device
552 * @param lio_dev which lio device to setup
554 * @return 0 if success. -1 if fails
556 int lio_setup_instr_queue0(struct lio_device *lio_dev);
557 void lio_free_instr_queue0(struct lio_device *lio_dev);
558 #endif /* _LIO_RXTX_H_ */