4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _LIO_STRUCT_H_
35 #define _LIO_STRUCT_H_
39 #include <sys/queue.h>
41 #include <rte_spinlock.h>
42 #include <rte_atomic.h>
44 #include "lio_hw_defs.h"
46 struct lio_stailq_node {
47 STAILQ_ENTRY(lio_stailq_node) entries;
50 STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
59 /** The Descriptor Ring Output Queue structure.
60 * This structure has all the information required to implement a
64 /** A spinlock to protect access to this ring. */
71 struct lio_device *lio_dev;
73 /** The 8B aligned descriptor ring starts at this address. */
74 struct lio_droq_desc *desc_ring;
76 /** Index in the ring where the driver should read the next packet */
79 /** Index in the ring where Octeon will write the next packet */
82 /** Index in the ring where the driver will refill the descriptor's
87 /** Packets pending to be processed */
88 rte_atomic64_t pkts_pending;
90 /** Number of descriptors in this ring. */
93 /** The number of descriptors pending refill. */
94 uint32_t refill_count;
96 uint32_t refill_threshold;
98 /** The 8B aligned info ptrs begin from this address. */
99 struct lio_droq_info *info_list;
101 /** The receive buffer list. This list has the virtual addresses of the
104 struct lio_recv_buffer *recv_buf_list;
106 /** The size of each buffer pointed by the buffer pointer. */
107 uint32_t buffer_size;
109 /** Pointer to the mapped packet credit register.
110 * Host writes number of info/buffer ptrs available to this register
112 void *pkts_credit_reg;
114 /** Pointer to the mapped packet sent register.
115 * Octeon writes the number of packets DMA'ed to host memory
120 /** DMA mapped address of the DROQ descriptor ring. */
121 size_t desc_ring_dma;
123 /** Info ptr list are allocated at this virtual address. */
124 size_t info_base_addr;
126 /** DMA mapped address of the info list */
127 size_t info_list_dma;
129 /** Allocated size of info list. */
130 uint32_t info_alloc_size;
133 const struct rte_memzone *desc_ring_mz;
134 const struct rte_memzone *info_mz;
135 struct rte_mempool *mpool;
138 /** Receive Header */
140 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
144 uint64_t subcode : 8;
145 uint64_t len : 3; /** additional 64-bit words */
146 uint64_t reserved : 17;
147 uint64_t ossp : 32; /** opcode/subcode specific parameters */
151 uint64_t subcode : 8;
152 uint64_t len : 3; /** additional 64-bit words */
155 uint64_t priority : 3;
156 uint64_t csum_verified : 3; /** checksum verified. */
157 uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
158 uint64_t encap_on : 1;
159 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
163 uint64_t subcode : 8;
164 uint64_t len : 3; /** additional 64-bit words */
165 uint64_t reserved : 8;
167 uint64_t gmxport : 16;
172 uint64_t ossp : 32; /** opcode/subcode specific parameters */
173 uint64_t reserved : 17;
174 uint64_t len : 3; /** additional 64-bit words */
175 uint64_t subcode : 8;
179 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
180 uint64_t encap_on : 1;
181 uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
182 uint64_t csum_verified : 3; /** checksum verified. */
183 uint64_t priority : 3;
186 uint64_t len : 3; /** additional 64-bit words */
187 uint64_t subcode : 8;
191 uint64_t gmxport : 16;
193 uint64_t reserved : 8;
194 uint64_t len : 3; /** additional 64-bit words */
195 uint64_t subcode : 8;
201 #define OCTEON_RH_SIZE (sizeof(union octeon_rh))
203 /** The txpciq info passed to host from the firmware */
204 union octeon_txpciq {
208 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
212 uint64_t use_qpg : 1;
214 uint64_t aura_num : 10;
215 uint64_t reserved : 20;
217 uint64_t reserved : 20;
218 uint64_t aura_num : 10;
220 uint64_t use_qpg : 1;
228 /** The instruction (input) queue.
229 * The input queue is used to post raw (instruction) mode data or packet
230 * data to Octeon device from the host. Each input queue for
231 * a LIO device has one such structure to represent it.
233 struct lio_instr_queue {
234 /** A spinlock to protect access to the input ring. */
237 rte_spinlock_t post_lock;
239 struct lio_device *lio_dev;
241 uint32_t pkt_in_done;
243 rte_atomic64_t iq_flush_running;
245 /** Flag that indicates if the queue uses 64 byte commands. */
246 uint32_t iqcmd_64B:1;
249 union octeon_txpciq txpciq;
255 /** Maximum no. of instructions in this queue. */
258 /** Index in input ring where the driver should write the next packet */
259 uint32_t host_write_index;
261 /** Index in input ring where Octeon is expected to read the next
264 uint32_t lio_read_index;
266 /** This index aids in finding the window in the queue where Octeon
267 * has read the commands.
269 uint32_t flush_index;
271 /** This field keeps track of the instructions pending in this queue. */
272 rte_atomic64_t instr_pending;
274 /** Pointer to the Virtual Base addr of the input ring. */
277 struct lio_request_list *request_list;
279 /** Octeon doorbell register for the ring. */
282 /** Octeon instruction count register for this ring. */
285 /** Number of instructions pending to be posted to Octeon. */
288 /** DMA mapped base address of the input descriptor ring. */
289 uint64_t base_addr_dma;
291 /** Application context */
294 /* network stack queue index */
298 const struct rte_memzone *iq_mz;
301 /** This structure is used by driver to store information required
302 * to free the mbuff when the packet has been fetched by Octeon.
303 * Bytes offset below assume worst-case of a 64-bit system.
305 struct lio_buf_free_info {
306 /** Bytes 1-8. Pointer to network device private structure. */
307 struct lio_device *lio_dev;
309 /** Bytes 9-16. Pointer to mbuff. */
310 struct rte_mbuf *mbuf;
312 /** Bytes 17-24. Pointer to gather list. */
313 struct lio_gather *g;
315 /** Bytes 25-32. Physical address of mbuf->data or gather list. */
318 /** Bytes 33-47. Piggybacked soft command, if any */
319 struct lio_soft_command *sc;
321 /** Bytes 48-63. iq no */
325 /* The Scatter-Gather List Entry. The scatter or gather component used with
326 * input instruction has this format.
328 struct lio_sg_entry {
329 /** The first 64 bit gives the size of data in each dptr. */
335 /** The 4 dptr pointers for this entry. */
339 #define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
341 /** Structure of a node in list of gather components maintained by
342 * driver for each network device.
345 /** List manipulation. Next and prev pointers. */
346 struct lio_stailq_node list;
348 /** Size of the gather component at sg in bytes. */
351 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
354 /** Gather component that can accommodate max sized fragment list
355 * received from the IP layer.
357 struct lio_sg_entry *sg;
361 uint16_t hash_key_size;
362 uint8_t hash_key[LIO_RSS_MAX_KEY_SZ];
363 /* Ideally a factor of number of queues */
364 uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
369 uint8_t ipv6_tcp_hash;
371 uint8_t ipv6_tcp_ex_hash;
372 uint8_t hash_disable;
375 struct lio_io_enable {
382 void (*setup_iq_regs)(struct lio_device *, uint32_t);
383 void (*setup_oq_regs)(struct lio_device *, uint32_t);
385 int (*setup_mbox)(struct lio_device *);
386 void (*free_mbox)(struct lio_device *);
388 int (*setup_device_regs)(struct lio_device *);
389 int (*enable_io_queues)(struct lio_device *);
390 void (*disable_io_queues)(struct lio_device *);
393 struct lio_pf_vf_hs_word {
394 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
395 /** PKIND value assigned for the DPI interface */
398 /** OCTEON core clock multiplier */
399 uint64_t core_tics_per_us : 16;
401 /** OCTEON coprocessor clock multiplier */
402 uint64_t coproc_tics_per_us : 16;
404 /** app that currently running on OCTEON */
405 uint64_t app_mode : 8;
408 uint64_t reserved : 16;
410 #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
413 uint64_t reserved : 16;
415 /** app that currently running on OCTEON */
416 uint64_t app_mode : 8;
418 /** OCTEON coprocessor clock multiplier */
419 uint64_t coproc_tics_per_us : 16;
421 /** OCTEON core clock multiplier */
422 uint64_t core_tics_per_us : 16;
424 /** PKIND value assigned for the DPI interface */
429 struct lio_sriov_info {
430 /** Number of rings assigned to VF */
431 uint32_t rings_per_vf;
433 /** Number of VF devices enabled */
437 /* Head of a response list */
438 struct lio_response_list {
439 /** List structure to add delete pending entries to */
440 struct lio_stailq_head head;
442 /** A lock for this response list */
445 rte_atomic64_t pending_req_count;
448 /* Structure to define the configuration attributes for each Input queue. */
449 struct lio_iq_config {
450 /* Max number of IQs available */
453 /** Pending list size (usually set to the sum of the size of all Input
456 uint32_t pending_list_size;
458 /** Command size - 32 or 64 bytes */
462 /* Structure to define the configuration attributes for each Output queue. */
463 struct lio_oq_config {
464 /* Max number of OQs available */
467 /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
470 /** The number of buffers that were consumed during packet processing by
471 * the driver on this Output queue before the driver attempts to
472 * replenish the descriptor ring with new buffers.
474 uint32_t refill_threshold;
477 /* Structure to define the configuration. */
480 const char *card_name;
482 /** Input Queue attributes. */
483 struct lio_iq_config iq;
485 /** Output Queue attributes. */
486 struct lio_oq_config oq;
490 int num_def_tx_descs;
492 /* Num of desc for rx rings */
493 int num_def_rx_descs;
498 /** Status of a RGMII Link on Octeon as seen by core driver. */
499 union octeon_link_status {
500 uint64_t link_status64;
503 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
507 uint64_t link_up : 1;
508 uint64_t autoneg : 1;
509 uint64_t if_mode : 5;
511 uint64_t flashing : 1;
512 uint64_t reserved : 15;
514 uint64_t reserved : 15;
515 uint64_t flashing : 1;
517 uint64_t if_mode : 5;
518 uint64_t autoneg : 1;
519 uint64_t link_up : 1;
527 /** The rxpciq info passed to host from the firmware */
528 union octeon_rxpciq {
532 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
534 uint64_t reserved : 56;
536 uint64_t reserved : 56;
542 /** Information for a OCTEON ethernet interface shared between core & host. */
543 struct octeon_link_info {
544 union octeon_link_status link;
547 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
548 uint64_t gmxport : 16;
549 uint64_t macaddr_is_admin_assigned : 1;
550 uint64_t vlan_is_admin_assigned : 1;
552 uint64_t num_txpciq : 8;
553 uint64_t num_rxpciq : 8;
555 uint64_t num_rxpciq : 8;
556 uint64_t num_txpciq : 8;
558 uint64_t vlan_is_admin_assigned : 1;
559 uint64_t macaddr_is_admin_assigned : 1;
560 uint64_t gmxport : 16;
563 union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
564 union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
567 /* ----------------------- THE LIO DEVICE --------------------------- */
569 * Each lio device has this structure to represent all its
573 /** PCI device pointer */
574 struct rte_pci_device *pci_dev;
576 /** Octeon Chip type */
581 /** This device's PCIe port used for traffic. */
584 /** The state of this device */
585 rte_atomic64_t status;
589 struct octeon_link_info linfo;
593 struct lio_fn_list fn_list;
597 /** Guards each glist */
598 rte_spinlock_t *glist_lock;
599 /** Array of gather component linked lists */
600 struct lio_stailq_head *glist_head;
602 /* The pool containing pre allocated buffers used for soft commands */
603 struct rte_mempool *sc_buf_pool;
605 /** The input instruction queues */
606 struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
608 /** The singly-linked tail queues of instruction response */
609 struct lio_response_list response_list;
613 /** The DROQ output queues */
614 struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
616 struct lio_io_enable io_qmask;
618 struct lio_sriov_info sriov_info;
620 struct lio_pf_vf_hs_word pfvf_hsword;
622 /** Mail Box details of each lio queue. */
623 struct lio_mbox **mbox;
625 char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
627 const struct lio_config *default_config;
629 struct rte_eth_dev *eth_dev;
632 uint8_t max_rx_queues;
633 uint8_t max_tx_queues;
634 uint8_t nb_rx_queues;
635 uint8_t nb_tx_queues;
636 uint8_t port_configured;
637 struct lio_rss_ctx rss_state;
640 #endif /* _LIO_STRUCT_H_ */