4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_spinlock.h>
41 #include <rte_memory.h>
43 #include "lio_struct.h"
46 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
49 #define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
50 (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
52 #define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
55 (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
57 /** Descriptor format.
58 * The descriptor ring is made of descriptors which have 2 64-bit values:
59 * -# Physical (bus) address of the data buffer.
60 * -# Physical (bus) address of a lio_droq_info structure.
61 * The device DMA's incoming packets and its information at the address
62 * given by these descriptor fields.
64 struct lio_droq_desc {
65 /** The buffer pointer */
68 /** The Info pointer */
72 #define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
74 /** Information about packet DMA'ed by Octeon.
75 * The format of the information available at Info Pointer after Octeon
76 * has posted a packet. Not all descriptors have valid information. Only
77 * the Info field of the first descriptor for a packet has information
80 struct lio_droq_info {
81 /** The Output Receive Header. */
84 /** The Length of the packet. */
88 #define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
90 /** Pointer to data buffer.
91 * Driver keeps a pointer to the data buffer that it made available to
92 * the Octeon device. Since the descriptor ring keeps physical (bus)
93 * addresses, this field is required for the driver to keep track of
94 * the virtual address pointers.
96 struct lio_recv_buffer {
97 /** Packet buffer, including meta data. */
100 /** Data in the packet buffer. */
105 #define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
107 #define LIO_DROQ_SIZE (sizeof(struct lio_droq))
109 #define LIO_IQ_SEND_OK 0
110 #define LIO_IQ_SEND_STOP 1
111 #define LIO_IQ_SEND_FAILED -1
114 #define LIO_REQTYPE_NONE 0
115 #define LIO_REQTYPE_NORESP_NET 1
116 #define LIO_REQTYPE_NORESP_NET_SG 2
117 #define LIO_REQTYPE_SOFT_COMMAND 3
119 struct lio_request_list {
124 /*---------------------- INSTRUCTION FORMAT ----------------------------*/
126 struct lio_instr3_64B {
127 /** Pointer where the input data is available. */
130 /** Instruction Header. */
133 /** Instruction Header. */
136 /** Input Request Header. */
139 /** opcode/subcode specific parameters */
142 /** Return Data Parameters */
145 /** Pointer where the response for a RAW mode packet will be written
152 union lio_instr_64B {
153 struct lio_instr3_64B cmd3;
156 /** The size of each buffer in soft command buffer pool */
157 #define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
159 /** Maximum number of buffers to allocate into soft command buffer pool */
160 #define LIO_MAX_SOFT_COMMAND_BUFFERS 255
162 struct lio_soft_command {
163 /** Soft command buffer info. */
164 struct lio_stailq_node node;
168 /** Command and return status */
169 union lio_instr_64B cmd;
171 #define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
172 uint64_t *status_word;
174 /** Data buffer info */
179 /** Return buffer info */
184 /** Context buffer info */
188 /** Time out and callback */
192 void (*callback)(uint32_t, void *);
194 struct rte_mbuf *mbuf;
197 struct lio_iq_post_status {
223 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
226 uint64_t more : 6; /* How many udd words follow the command */
228 uint64_t reserved : 29;
230 uint64_t param1 : 16;
234 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
238 uint64_t param1 : 16;
240 uint64_t reserved : 29;
250 #define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
252 /* Maximum number of 8-byte words can be
253 * sent in a NIC control message.
255 #define LIO_MAX_NCTRL_UDD 32
257 /* Structure of control information passed by driver to the BASE
258 * layer when sending control commands to Octeon device software.
260 struct lio_ctrl_pkt {
261 /** Command to be passed to the Octeon device software. */
262 union octeon_cmd ncmd;
268 /** Response buffer */
272 /** Additional data that may be needed by some commands. */
273 uint64_t udd[LIO_MAX_NCTRL_UDD];
275 /** Input queue to use to send this command. */
278 /** Time to wait for Octeon software to respond to this control command.
279 * If wait_time is 0, BASE assumes no response is expected.
283 struct lio_dev_ctrl_cmd *ctrl_cmd;
286 /** Structure of data information passed by driver to the BASE
287 * layer when forwarding data to Octeon device software.
289 struct lio_data_pkt {
290 /** Pointer to information maintained by NIC module for this packet. The
291 * BASE layer passes this as-is to the driver.
295 /** Type of buffer passed in "buf" above. */
298 /** Total data bytes to be transferred in this command. */
301 /** Command to be passed to the Octeon device software. */
302 union lio_instr_64B cmd;
304 /** Input queue to use to send this command. */
308 /** Structure passed by driver to BASE layer to prepare a command to send
309 * network data to Octeon.
311 union lio_cmd_setup {
315 uint32_t timestamp : 1;
316 uint32_t ip_csum : 1;
317 uint32_t transport_csum : 1;
318 uint32_t tnl_csum : 1;
327 uint64_t cmd_setup64;
330 /* Instruction Header */
331 struct octeon_instr_ih3 {
332 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
335 uint64_t reserved3 : 1;
337 /** Gather indicator 1=gather*/
340 /** Data length OR no. of entries in gather list */
341 uint64_t dlengsz : 14;
343 /** Front Data size */
347 uint64_t reserved2 : 4;
349 /** PKI port kind - PKIND */
353 uint64_t reserved1 : 32;
355 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
357 uint64_t reserved1 : 32;
359 /** PKI port kind - PKIND */
363 uint64_t reserved2 : 4;
365 /** Front Data size */
368 /** Data length OR no. of entries in gather list */
369 uint64_t dlengsz : 14;
371 /** Gather indicator 1=gather*/
375 uint64_t reserved3 : 1;
380 /* PKI Instruction Header(PKI IH) */
381 struct octeon_instr_pki_ih3 {
382 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
387 /** Raw mode indicator 1 = RAW */
397 uint64_t reserved2 : 1;
409 uint64_t tagtype : 2;
412 uint64_t reserved1 : 2;
420 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
429 uint64_t reserved1 : 2;
432 uint64_t tagtype : 2;
444 uint64_t reserved2 : 1;
452 /** Raw mode indicator 1 = RAW */
460 /** Input Request Header */
461 struct octeon_instr_irh {
462 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
465 uint64_t subcode : 7;
467 uint64_t priority : 3;
468 uint64_t reserved : 5;
469 uint64_t ossp : 32; /* opcode/subcode specific parameters */
470 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
471 uint64_t ossp : 32; /* opcode/subcode specific parameters */
472 uint64_t reserved : 5;
473 uint64_t priority : 3;
475 uint64_t subcode : 7;
481 /* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
482 #define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
483 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
484 #define OCTEON_PCI_CMD_O3 (24 + 8)
486 /** Return Data Parameters */
487 struct octeon_instr_rdp {
488 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
489 uint64_t reserved : 49;
490 uint64_t pcie_port : 3;
492 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
494 uint64_t pcie_port : 3;
495 uint64_t reserved : 49;
499 union octeon_packet_params {
500 uint32_t pkt_params32;
502 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
503 uint32_t reserved : 24;
504 uint32_t ip_csum : 1; /* Perform IP header checksum(s) */
505 /* Perform Outer transport header checksum */
506 uint32_t transport_csum : 1;
507 /* Find tunnel, and perform transport csum. */
508 uint32_t tnl_csum : 1;
509 uint32_t tsflag : 1; /* Timestamp this packet */
510 uint32_t ipsec_ops : 4; /* IPsec operation */
512 uint32_t ipsec_ops : 4;
514 uint32_t tnl_csum : 1;
515 uint32_t transport_csum : 1;
516 uint32_t ip_csum : 1;
517 uint32_t reserved : 7;
522 /** Utility function to prepare a 64B NIC instruction based on a setup command
523 * @param cmd - pointer to instruction to be filled in.
524 * @param setup - pointer to the setup structure
525 * @param q_no - which queue for back pressure
527 * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
530 lio_prepare_pci_cmd(struct lio_device *lio_dev,
531 union lio_instr_64B *cmd,
532 union lio_cmd_setup *setup,
535 union octeon_packet_params packet_params;
536 struct octeon_instr_pki_ih3 *pki_ih3;
537 struct octeon_instr_irh *irh;
538 struct octeon_instr_ih3 *ih3;
541 memset(cmd, 0, sizeof(union lio_instr_64B));
543 ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
544 pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
546 /* assume that rflag is cleared so therefore front data will only have
547 * irh and ossp[1] and ossp[2] for a total of 24 bytes
549 ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
551 ih3->fsz = OCTEON_PCI_CMD_O3;
553 if (!setup->s.gather) {
554 ih3->dlengsz = setup->s.u.datasize;
557 ih3->dlengsz = setup->s.u.gatherptrs;
564 pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
566 port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;
571 pki_ih3->tag = LIO_DATA(port);
573 pki_ih3->tagtype = OCTEON_ORDERED_TAG;
574 pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
575 pki_ih3->pm = 0x0; /* parse from L2 */
576 pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/
578 irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
580 irh->opcode = LIO_OPCODE;
581 irh->subcode = LIO_OPCODE_NW_DATA;
583 packet_params.pkt_params32 = 0;
584 packet_params.s.ip_csum = setup->s.ip_csum;
585 packet_params.s.transport_csum = setup->s.transport_csum;
586 packet_params.s.tnl_csum = setup->s.tnl_csum;
587 packet_params.s.tsflag = setup->s.timestamp;
589 irh->ossp = packet_params.pkt_params32;
592 int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
593 void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
595 struct lio_soft_command *
596 lio_alloc_soft_command(struct lio_device *lio_dev,
597 uint32_t datasize, uint32_t rdatasize,
599 void lio_prepare_soft_command(struct lio_device *lio_dev,
600 struct lio_soft_command *sc,
601 uint8_t opcode, uint8_t subcode,
602 uint32_t irh_ossp, uint64_t ossp0,
604 int lio_send_soft_command(struct lio_device *lio_dev,
605 struct lio_soft_command *sc);
606 void lio_free_soft_command(struct lio_soft_command *sc);
608 /** Send control packet to the device
609 * @param lio_dev - lio device pointer
610 * @param nctrl - control structure with command, timeout, and callback info
612 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
613 * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
615 int lio_send_ctrl_pkt(struct lio_device *lio_dev,
616 struct lio_ctrl_pkt *ctrl_pkt);
618 /** Maximum ordered requests to process in every invocation of
619 * lio_process_ordered_list(). The function will continue to process requests
620 * as long as it can find one that has finished processing. If it keeps
621 * finding requests that have completed, the function can run for ever. The
622 * value defined here sets an upper limit on the number of requests it can
623 * process before it returns control to the poll thread.
625 #define LIO_MAX_ORD_REQS_TO_PROCESS 4096
627 /** Error codes used in Octeon Host-Core communication.
630 * ----------------------------
632 * ----------------------------
633 * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
634 * are reserved to identify the group to which the error code belongs. The
635 * lower 16-bits, called Minor Error Number, carry the actual code.
637 * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
639 /** Status for a request.
640 * If the request is successfully queued, the driver will return
641 * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
642 * the driver if the response for request failed to arrive before a
643 * time-out period or if the request processing * got interrupted due to
644 * a signal respectively.
647 /** A value of 0x00000000 indicates no error i.e. success */
648 LIO_REQUEST_DONE = 0x00000000,
649 /** (Major number: 0x0000; Minor Number: 0x0001) */
650 LIO_REQUEST_PENDING = 0x00000001,
651 LIO_REQUEST_TIMEOUT = 0x00000003,
655 /*------ Error codes used by firmware (bits 15..0 set by firmware */
656 #define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
657 #define LIO_FIRMWARE_STATUS_CODE(status) \
658 ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
660 /** Initialize the response lists. The number of response lists to create is
662 * @param lio_dev - the lio device structure.
664 void lio_setup_response_list(struct lio_device *lio_dev);
666 /** Check the status of first entry in the ordered list. If the instruction at
667 * that entry finished processing or has timed-out, the entry is cleaned.
668 * @param lio_dev - the lio device structure.
669 * @return 1 if the ordered list is empty, 0 otherwise.
671 int lio_process_ordered_list(struct lio_device *lio_dev);
673 #define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \
674 (((lio_dev)->instr_queue[iq_no]->stats.field) += count)
677 lio_swap_8B_data(uint64_t *data, uint32_t blocks)
680 *data = rte_cpu_to_be_64(*data);
686 static inline uint64_t
687 lio_map_ring(void *buf)
689 phys_addr_t dma_addr;
691 dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
693 return (uint64_t)dma_addr;
696 static inline uint64_t
697 lio_map_ring_info(struct lio_droq *droq, uint32_t i)
699 phys_addr_t dma_addr;
701 dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
703 return (uint64_t)dma_addr;
707 lio_opcode_slow_path(union octeon_rh *rh)
709 uint16_t subcode1, subcode2;
711 subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
712 subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
714 return subcode2 != subcode1;
718 lio_add_sg_size(struct lio_sg_entry *sg_entry,
719 uint16_t size, uint32_t pos)
721 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
722 sg_entry->u.size[pos] = size;
723 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
724 sg_entry->u.size[3 - pos] = size;
728 /* Macro to increment index.
729 * Index is incremented by count; if the sum exceeds
730 * max, index is wrapped-around to the start.
732 static inline uint32_t
733 lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
735 if ((index + count) >= max)
736 index = index + count - max;
743 int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
744 int desc_size, struct rte_mempool *mpool,
745 unsigned int socket_id);
746 uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
748 void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
750 void lio_delete_sglist(struct lio_instr_queue *txq);
751 int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
752 int fw_mapped_iq, int num_descs, unsigned int socket_id);
753 uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,
755 int lio_wait_for_instr_fetch(struct lio_device *lio_dev);
756 int lio_setup_iq(struct lio_device *lio_dev, int q_index,
757 union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,
758 unsigned int socket_id);
759 int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
760 void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);
761 /** Setup instruction queue zero for the device
762 * @param lio_dev which lio device to setup
764 * @return 0 if success. -1 if fails
766 int lio_setup_instr_queue0(struct lio_device *lio_dev);
767 void lio_free_instr_queue0(struct lio_device *lio_dev);
768 void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);
769 #endif /* _LIO_RXTX_H_ */