4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _LIO_STRUCT_H_
35 #define _LIO_STRUCT_H_
39 #include <sys/queue.h>
41 #include <rte_spinlock.h>
42 #include <rte_atomic.h>
44 #include "lio_hw_defs.h"
46 struct lio_stailq_node {
47 STAILQ_ENTRY(lio_stailq_node) entries;
50 STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
59 /** The Descriptor Ring Output Queue structure.
60 * This structure has all the information required to implement a
64 /** A spinlock to protect access to this ring. */
71 struct lio_device *lio_dev;
73 /** The 8B aligned descriptor ring starts at this address. */
74 struct lio_droq_desc *desc_ring;
76 /** Index in the ring where the driver should read the next packet */
79 /** Index in the ring where Octeon will write the next packet */
82 /** Index in the ring where the driver will refill the descriptor's
87 /** Packets pending to be processed */
88 rte_atomic64_t pkts_pending;
90 /** Number of descriptors in this ring. */
93 /** The number of descriptors pending refill. */
94 uint32_t refill_count;
96 uint32_t refill_threshold;
98 /** The 8B aligned info ptrs begin from this address. */
99 struct lio_droq_info *info_list;
101 /** The receive buffer list. This list has the virtual addresses of the
104 struct lio_recv_buffer *recv_buf_list;
106 /** The size of each buffer pointed by the buffer pointer. */
107 uint32_t buffer_size;
109 /** Pointer to the mapped packet credit register.
110 * Host writes number of info/buffer ptrs available to this register
112 void *pkts_credit_reg;
114 /** Pointer to the mapped packet sent register.
115 * Octeon writes the number of packets DMA'ed to host memory
120 /** DMA mapped address of the DROQ descriptor ring. */
121 size_t desc_ring_dma;
123 /** Info ptr list are allocated at this virtual address. */
124 size_t info_base_addr;
126 /** DMA mapped address of the info list */
127 size_t info_list_dma;
129 /** Allocated size of info list. */
130 uint32_t info_alloc_size;
133 const struct rte_memzone *desc_ring_mz;
134 const struct rte_memzone *info_mz;
135 struct rte_mempool *mpool;
138 /** Receive Header */
140 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
144 uint64_t subcode : 8;
145 uint64_t len : 3; /** additional 64-bit words */
146 uint64_t reserved : 17;
147 uint64_t ossp : 32; /** opcode/subcode specific parameters */
151 uint64_t subcode : 8;
152 uint64_t len : 3; /** additional 64-bit words */
155 uint64_t priority : 3;
156 uint64_t csum_verified : 3; /** checksum verified. */
157 uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
158 uint64_t encap_on : 1;
159 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
163 uint64_t subcode : 8;
164 uint64_t len : 3; /** additional 64-bit words */
165 uint64_t reserved : 8;
167 uint64_t gmxport : 16;
172 uint64_t ossp : 32; /** opcode/subcode specific parameters */
173 uint64_t reserved : 17;
174 uint64_t len : 3; /** additional 64-bit words */
175 uint64_t subcode : 8;
179 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
180 uint64_t encap_on : 1;
181 uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
182 uint64_t csum_verified : 3; /** checksum verified. */
183 uint64_t priority : 3;
186 uint64_t len : 3; /** additional 64-bit words */
187 uint64_t subcode : 8;
191 uint64_t gmxport : 16;
193 uint64_t reserved : 8;
194 uint64_t len : 3; /** additional 64-bit words */
195 uint64_t subcode : 8;
201 #define OCTEON_RH_SIZE (sizeof(union octeon_rh))
203 /** The txpciq info passed to host from the firmware */
204 union octeon_txpciq {
208 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
212 uint64_t use_qpg : 1;
214 uint64_t aura_num : 10;
215 uint64_t reserved : 20;
217 uint64_t reserved : 20;
218 uint64_t aura_num : 10;
220 uint64_t use_qpg : 1;
228 /** The instruction (input) queue.
229 * The input queue is used to post raw (instruction) mode data or packet
230 * data to Octeon device from the host. Each input queue for
231 * a LIO device has one such structure to represent it.
233 struct lio_instr_queue {
234 /** A spinlock to protect access to the input ring. */
237 rte_spinlock_t post_lock;
239 struct lio_device *lio_dev;
241 uint32_t pkt_in_done;
243 rte_atomic64_t iq_flush_running;
245 /** Flag that indicates if the queue uses 64 byte commands. */
246 uint32_t iqcmd_64B:1;
249 union octeon_txpciq txpciq;
255 /** Maximum no. of instructions in this queue. */
258 /** Index in input ring where the driver should write the next packet */
259 uint32_t host_write_index;
261 /** Index in input ring where Octeon is expected to read the next
264 uint32_t lio_read_index;
266 /** This index aids in finding the window in the queue where Octeon
267 * has read the commands.
269 uint32_t flush_index;
271 /** This field keeps track of the instructions pending in this queue. */
272 rte_atomic64_t instr_pending;
274 /** Pointer to the Virtual Base addr of the input ring. */
277 struct lio_request_list *request_list;
279 /** Octeon doorbell register for the ring. */
282 /** Octeon instruction count register for this ring. */
285 /** Number of instructions pending to be posted to Octeon. */
288 /** DMA mapped base address of the input descriptor ring. */
289 uint64_t base_addr_dma;
291 /** Application context */
294 /* network stack queue index */
298 const struct rte_memzone *iq_mz;
301 /** This structure is used by driver to store information required
302 * to free the mbuff when the packet has been fetched by Octeon.
303 * Bytes offset below assume worst-case of a 64-bit system.
305 struct lio_buf_free_info {
306 /** Bytes 1-8. Pointer to network device private structure. */
307 struct lio_device *lio_dev;
309 /** Bytes 9-16. Pointer to mbuff. */
310 struct rte_mbuf *mbuf;
312 /** Bytes 17-24. Pointer to gather list. */
313 struct lio_gather *g;
315 /** Bytes 25-32. Physical address of mbuf->data or gather list. */
318 /** Bytes 33-47. Piggybacked soft command, if any */
319 struct lio_soft_command *sc;
321 /** Bytes 48-63. iq no */
325 /* The Scatter-Gather List Entry. The scatter or gather component used with
326 * input instruction has this format.
328 struct lio_sg_entry {
329 /** The first 64 bit gives the size of data in each dptr. */
335 /** The 4 dptr pointers for this entry. */
339 #define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
341 /** Structure of a node in list of gather components maintained by
342 * driver for each network device.
345 /** List manipulation. Next and prev pointers. */
346 struct lio_stailq_node list;
348 /** Size of the gather component at sg in bytes. */
351 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
354 /** Gather component that can accommodate max sized fragment list
355 * received from the IP layer.
357 struct lio_sg_entry *sg;
360 struct lio_io_enable {
367 void (*setup_iq_regs)(struct lio_device *, uint32_t);
368 void (*setup_oq_regs)(struct lio_device *, uint32_t);
370 int (*setup_mbox)(struct lio_device *);
371 void (*free_mbox)(struct lio_device *);
373 int (*setup_device_regs)(struct lio_device *);
374 int (*enable_io_queues)(struct lio_device *);
375 void (*disable_io_queues)(struct lio_device *);
378 struct lio_pf_vf_hs_word {
379 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
380 /** PKIND value assigned for the DPI interface */
383 /** OCTEON core clock multiplier */
384 uint64_t core_tics_per_us : 16;
386 /** OCTEON coprocessor clock multiplier */
387 uint64_t coproc_tics_per_us : 16;
389 /** app that currently running on OCTEON */
390 uint64_t app_mode : 8;
393 uint64_t reserved : 16;
395 #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
398 uint64_t reserved : 16;
400 /** app that currently running on OCTEON */
401 uint64_t app_mode : 8;
403 /** OCTEON coprocessor clock multiplier */
404 uint64_t coproc_tics_per_us : 16;
406 /** OCTEON core clock multiplier */
407 uint64_t core_tics_per_us : 16;
409 /** PKIND value assigned for the DPI interface */
414 struct lio_sriov_info {
415 /** Number of rings assigned to VF */
416 uint32_t rings_per_vf;
418 /** Number of VF devices enabled */
422 /* Head of a response list */
423 struct lio_response_list {
424 /** List structure to add delete pending entries to */
425 struct lio_stailq_head head;
427 /** A lock for this response list */
430 rte_atomic64_t pending_req_count;
433 /* Structure to define the configuration attributes for each Input queue. */
434 struct lio_iq_config {
435 /* Max number of IQs available */
438 /** Pending list size (usually set to the sum of the size of all Input
441 uint32_t pending_list_size;
443 /** Command size - 32 or 64 bytes */
447 /* Structure to define the configuration attributes for each Output queue. */
448 struct lio_oq_config {
449 /* Max number of OQs available */
452 /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
455 /** The number of buffers that were consumed during packet processing by
456 * the driver on this Output queue before the driver attempts to
457 * replenish the descriptor ring with new buffers.
459 uint32_t refill_threshold;
462 /* Structure to define the configuration. */
465 const char *card_name;
467 /** Input Queue attributes. */
468 struct lio_iq_config iq;
470 /** Output Queue attributes. */
471 struct lio_oq_config oq;
475 int num_def_tx_descs;
477 /* Num of desc for rx rings */
478 int num_def_rx_descs;
483 /** Status of a RGMII Link on Octeon as seen by core driver. */
484 union octeon_link_status {
485 uint64_t link_status64;
488 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
492 uint64_t link_up : 1;
493 uint64_t autoneg : 1;
494 uint64_t if_mode : 5;
496 uint64_t flashing : 1;
497 uint64_t reserved : 15;
499 uint64_t reserved : 15;
500 uint64_t flashing : 1;
502 uint64_t if_mode : 5;
503 uint64_t autoneg : 1;
504 uint64_t link_up : 1;
512 /** The rxpciq info passed to host from the firmware */
513 union octeon_rxpciq {
517 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
519 uint64_t reserved : 56;
521 uint64_t reserved : 56;
527 /** Information for a OCTEON ethernet interface shared between core & host. */
528 struct octeon_link_info {
529 union octeon_link_status link;
532 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
533 uint64_t gmxport : 16;
534 uint64_t macaddr_is_admin_assigned : 1;
535 uint64_t vlan_is_admin_assigned : 1;
537 uint64_t num_txpciq : 8;
538 uint64_t num_rxpciq : 8;
540 uint64_t num_rxpciq : 8;
541 uint64_t num_txpciq : 8;
543 uint64_t vlan_is_admin_assigned : 1;
544 uint64_t macaddr_is_admin_assigned : 1;
545 uint64_t gmxport : 16;
548 union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
549 union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
552 /* ----------------------- THE LIO DEVICE --------------------------- */
554 * Each lio device has this structure to represent all its
558 /** PCI device pointer */
559 struct rte_pci_device *pci_dev;
561 /** Octeon Chip type */
566 /** This device's PCIe port used for traffic. */
569 /** The state of this device */
570 rte_atomic64_t status;
572 struct octeon_link_info linfo;
576 struct lio_fn_list fn_list;
580 /** Guards each glist */
581 rte_spinlock_t *glist_lock;
582 /** Array of gather component linked lists */
583 struct lio_stailq_head *glist_head;
585 /* The pool containing pre allocated buffers used for soft commands */
586 struct rte_mempool *sc_buf_pool;
588 /** The input instruction queues */
589 struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
591 /** The singly-linked tail queues of instruction response */
592 struct lio_response_list response_list;
596 /** The DROQ output queues */
597 struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
599 struct lio_io_enable io_qmask;
601 struct lio_sriov_info sriov_info;
603 struct lio_pf_vf_hs_word pfvf_hsword;
605 /** Mail Box details of each lio queue. */
606 struct lio_mbox **mbox;
608 char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
610 const struct lio_config *default_config;
612 struct rte_eth_dev *eth_dev;
615 uint8_t max_rx_queues;
616 uint8_t max_tx_queues;
617 uint8_t nb_rx_queues;
618 uint8_t nb_tx_queues;
619 uint8_t port_configured;
622 #endif /* _LIO_STRUCT_H_ */