4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _LIO_STRUCT_H_
35 #define _LIO_STRUCT_H_
39 #include <sys/queue.h>
41 #include <rte_spinlock.h>
42 #include <rte_atomic.h>
44 #include "lio_hw_defs.h"
46 struct lio_stailq_node {
47 STAILQ_ENTRY(lio_stailq_node) entries;
50 STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
59 /** Input Queue statistics. Each input queue has four stats fields. */
61 uint64_t instr_posted; /**< Instructions posted to this queue. */
62 uint64_t instr_processed; /**< Instructions processed in this queue. */
63 uint64_t instr_dropped; /**< Instructions that could not be processed */
64 uint64_t bytes_sent; /**< Bytes sent through this queue. */
65 uint64_t tx_done; /**< Num of packets sent to network. */
66 uint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */
67 uint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */
68 uint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */
71 /** Output Queue statistics. Each output queue has four stats fields. */
72 struct lio_droq_stats {
73 /** Number of packets received in this queue. */
74 uint64_t pkts_received;
76 /** Bytes received by this queue. */
77 uint64_t bytes_received;
79 /** Packets dropped due to no memory available. */
80 uint64_t dropped_nomem;
82 /** Packets dropped due to large number of pkts to process. */
83 uint64_t dropped_toomany;
85 /** Number of packets sent to stack from this queue. */
86 uint64_t rx_pkts_received;
88 /** Number of Bytes sent to stack from this queue. */
89 uint64_t rx_bytes_received;
91 /** Num of Packets dropped due to receive path failures. */
94 /** Num of vxlan packets received; */
97 /** Num of failures of rte_pktmbuf_alloc() */
98 uint64_t rx_alloc_failure;
102 /** The Descriptor Ring Output Queue structure.
103 * This structure has all the information required to implement a
107 /** A spinlock to protect access to this ring. */
114 struct lio_device *lio_dev;
116 /** The 8B aligned descriptor ring starts at this address. */
117 struct lio_droq_desc *desc_ring;
119 /** Index in the ring where the driver should read the next packet */
122 /** Index in the ring where Octeon will write the next packet */
125 /** Index in the ring where the driver will refill the descriptor's
130 /** Packets pending to be processed */
131 rte_atomic64_t pkts_pending;
133 /** Number of descriptors in this ring. */
136 /** The number of descriptors pending refill. */
137 uint32_t refill_count;
139 uint32_t refill_threshold;
141 /** The 8B aligned info ptrs begin from this address. */
142 struct lio_droq_info *info_list;
144 /** The receive buffer list. This list has the virtual addresses of the
147 struct lio_recv_buffer *recv_buf_list;
149 /** The size of each buffer pointed by the buffer pointer. */
150 uint32_t buffer_size;
152 /** Pointer to the mapped packet credit register.
153 * Host writes number of info/buffer ptrs available to this register
155 void *pkts_credit_reg;
157 /** Pointer to the mapped packet sent register.
158 * Octeon writes the number of packets DMA'ed to host memory
163 /** Statistics for this DROQ. */
164 struct lio_droq_stats stats;
166 /** DMA mapped address of the DROQ descriptor ring. */
167 size_t desc_ring_dma;
169 /** Info ptr list are allocated at this virtual address. */
170 size_t info_base_addr;
172 /** DMA mapped address of the info list */
173 size_t info_list_dma;
175 /** Allocated size of info list. */
176 uint32_t info_alloc_size;
179 const struct rte_memzone *desc_ring_mz;
180 const struct rte_memzone *info_mz;
181 struct rte_mempool *mpool;
184 /** Receive Header */
186 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
190 uint64_t subcode : 8;
191 uint64_t len : 3; /** additional 64-bit words */
192 uint64_t reserved : 17;
193 uint64_t ossp : 32; /** opcode/subcode specific parameters */
197 uint64_t subcode : 8;
198 uint64_t len : 3; /** additional 64-bit words */
201 uint64_t priority : 3;
202 uint64_t csum_verified : 3; /** checksum verified. */
203 uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
204 uint64_t encap_on : 1;
205 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
209 uint64_t subcode : 8;
210 uint64_t len : 3; /** additional 64-bit words */
211 uint64_t reserved : 8;
213 uint64_t gmxport : 16;
218 uint64_t ossp : 32; /** opcode/subcode specific parameters */
219 uint64_t reserved : 17;
220 uint64_t len : 3; /** additional 64-bit words */
221 uint64_t subcode : 8;
225 uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
226 uint64_t encap_on : 1;
227 uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
228 uint64_t csum_verified : 3; /** checksum verified. */
229 uint64_t priority : 3;
232 uint64_t len : 3; /** additional 64-bit words */
233 uint64_t subcode : 8;
237 uint64_t gmxport : 16;
239 uint64_t reserved : 8;
240 uint64_t len : 3; /** additional 64-bit words */
241 uint64_t subcode : 8;
247 #define OCTEON_RH_SIZE (sizeof(union octeon_rh))
249 /** The txpciq info passed to host from the firmware */
250 union octeon_txpciq {
254 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
258 uint64_t use_qpg : 1;
260 uint64_t aura_num : 10;
261 uint64_t reserved : 20;
263 uint64_t reserved : 20;
264 uint64_t aura_num : 10;
266 uint64_t use_qpg : 1;
274 /** The instruction (input) queue.
275 * The input queue is used to post raw (instruction) mode data or packet
276 * data to Octeon device from the host. Each input queue for
277 * a LIO device has one such structure to represent it.
279 struct lio_instr_queue {
280 /** A spinlock to protect access to the input ring. */
283 rte_spinlock_t post_lock;
285 struct lio_device *lio_dev;
287 uint32_t pkt_in_done;
289 rte_atomic64_t iq_flush_running;
291 /** Flag that indicates if the queue uses 64 byte commands. */
292 uint32_t iqcmd_64B:1;
295 union octeon_txpciq txpciq;
301 /** Maximum no. of instructions in this queue. */
304 /** Index in input ring where the driver should write the next packet */
305 uint32_t host_write_index;
307 /** Index in input ring where Octeon is expected to read the next
310 uint32_t lio_read_index;
312 /** This index aids in finding the window in the queue where Octeon
313 * has read the commands.
315 uint32_t flush_index;
317 /** This field keeps track of the instructions pending in this queue. */
318 rte_atomic64_t instr_pending;
320 /** Pointer to the Virtual Base addr of the input ring. */
323 struct lio_request_list *request_list;
325 /** Octeon doorbell register for the ring. */
328 /** Octeon instruction count register for this ring. */
331 /** Number of instructions pending to be posted to Octeon. */
334 /** Statistics for this input queue. */
335 struct lio_iq_stats stats;
337 /** DMA mapped base address of the input descriptor ring. */
338 uint64_t base_addr_dma;
340 /** Application context */
343 /* network stack queue index */
347 const struct rte_memzone *iq_mz;
350 /** This structure is used by driver to store information required
351 * to free the mbuff when the packet has been fetched by Octeon.
352 * Bytes offset below assume worst-case of a 64-bit system.
354 struct lio_buf_free_info {
355 /** Bytes 1-8. Pointer to network device private structure. */
356 struct lio_device *lio_dev;
358 /** Bytes 9-16. Pointer to mbuff. */
359 struct rte_mbuf *mbuf;
361 /** Bytes 17-24. Pointer to gather list. */
362 struct lio_gather *g;
364 /** Bytes 25-32. Physical address of mbuf->data or gather list. */
367 /** Bytes 33-47. Piggybacked soft command, if any */
368 struct lio_soft_command *sc;
370 /** Bytes 48-63. iq no */
374 /* The Scatter-Gather List Entry. The scatter or gather component used with
375 * input instruction has this format.
377 struct lio_sg_entry {
378 /** The first 64 bit gives the size of data in each dptr. */
384 /** The 4 dptr pointers for this entry. */
388 #define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
390 /** Structure of a node in list of gather components maintained by
391 * driver for each network device.
394 /** List manipulation. Next and prev pointers. */
395 struct lio_stailq_node list;
397 /** Size of the gather component at sg in bytes. */
400 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
403 /** Gather component that can accommodate max sized fragment list
404 * received from the IP layer.
406 struct lio_sg_entry *sg;
410 uint16_t hash_key_size;
411 uint8_t hash_key[LIO_RSS_MAX_KEY_SZ];
412 /* Ideally a factor of number of queues */
413 uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
418 uint8_t ipv6_tcp_hash;
420 uint8_t ipv6_tcp_ex_hash;
421 uint8_t hash_disable;
424 struct lio_io_enable {
431 void (*setup_iq_regs)(struct lio_device *, uint32_t);
432 void (*setup_oq_regs)(struct lio_device *, uint32_t);
434 int (*setup_mbox)(struct lio_device *);
435 void (*free_mbox)(struct lio_device *);
437 int (*setup_device_regs)(struct lio_device *);
438 int (*enable_io_queues)(struct lio_device *);
439 void (*disable_io_queues)(struct lio_device *);
442 struct lio_pf_vf_hs_word {
443 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
444 /** PKIND value assigned for the DPI interface */
447 /** OCTEON core clock multiplier */
448 uint64_t core_tics_per_us : 16;
450 /** OCTEON coprocessor clock multiplier */
451 uint64_t coproc_tics_per_us : 16;
453 /** app that currently running on OCTEON */
454 uint64_t app_mode : 8;
457 uint64_t reserved : 16;
459 #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
462 uint64_t reserved : 16;
464 /** app that currently running on OCTEON */
465 uint64_t app_mode : 8;
467 /** OCTEON coprocessor clock multiplier */
468 uint64_t coproc_tics_per_us : 16;
470 /** OCTEON core clock multiplier */
471 uint64_t core_tics_per_us : 16;
473 /** PKIND value assigned for the DPI interface */
478 struct lio_sriov_info {
479 /** Number of rings assigned to VF */
480 uint32_t rings_per_vf;
482 /** Number of VF devices enabled */
486 /* Head of a response list */
487 struct lio_response_list {
488 /** List structure to add delete pending entries to */
489 struct lio_stailq_head head;
491 /** A lock for this response list */
494 rte_atomic64_t pending_req_count;
497 /* Structure to define the configuration attributes for each Input queue. */
498 struct lio_iq_config {
499 /* Max number of IQs available */
502 /** Pending list size (usually set to the sum of the size of all Input
505 uint32_t pending_list_size;
507 /** Command size - 32 or 64 bytes */
511 /* Structure to define the configuration attributes for each Output queue. */
512 struct lio_oq_config {
513 /* Max number of OQs available */
516 /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
519 /** The number of buffers that were consumed during packet processing by
520 * the driver on this Output queue before the driver attempts to
521 * replenish the descriptor ring with new buffers.
523 uint32_t refill_threshold;
526 /* Structure to define the configuration. */
529 const char *card_name;
531 /** Input Queue attributes. */
532 struct lio_iq_config iq;
534 /** Output Queue attributes. */
535 struct lio_oq_config oq;
539 int num_def_tx_descs;
541 /* Num of desc for rx rings */
542 int num_def_rx_descs;
547 /** Status of a RGMII Link on Octeon as seen by core driver. */
548 union octeon_link_status {
549 uint64_t link_status64;
552 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
556 uint64_t link_up : 1;
557 uint64_t autoneg : 1;
558 uint64_t if_mode : 5;
560 uint64_t flashing : 1;
561 uint64_t reserved : 15;
563 uint64_t reserved : 15;
564 uint64_t flashing : 1;
566 uint64_t if_mode : 5;
567 uint64_t autoneg : 1;
568 uint64_t link_up : 1;
576 /** The rxpciq info passed to host from the firmware */
577 union octeon_rxpciq {
581 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
583 uint64_t reserved : 56;
585 uint64_t reserved : 56;
591 /** Information for a OCTEON ethernet interface shared between core & host. */
592 struct octeon_link_info {
593 union octeon_link_status link;
596 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
597 uint64_t gmxport : 16;
598 uint64_t macaddr_is_admin_assigned : 1;
599 uint64_t vlan_is_admin_assigned : 1;
601 uint64_t num_txpciq : 8;
602 uint64_t num_rxpciq : 8;
604 uint64_t num_rxpciq : 8;
605 uint64_t num_txpciq : 8;
607 uint64_t vlan_is_admin_assigned : 1;
608 uint64_t macaddr_is_admin_assigned : 1;
609 uint64_t gmxport : 16;
612 union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
613 union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
616 /* ----------------------- THE LIO DEVICE --------------------------- */
618 * Each lio device has this structure to represent all its
622 /** PCI device pointer */
623 struct rte_pci_device *pci_dev;
625 /** Octeon Chip type */
630 /** This device's PCIe port used for traffic. */
633 /** The state of this device */
634 rte_atomic64_t status;
638 struct octeon_link_info linfo;
642 struct lio_fn_list fn_list;
646 /** Guards each glist */
647 rte_spinlock_t *glist_lock;
648 /** Array of gather component linked lists */
649 struct lio_stailq_head *glist_head;
651 /* The pool containing pre allocated buffers used for soft commands */
652 struct rte_mempool *sc_buf_pool;
654 /** The input instruction queues */
655 struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
657 /** The singly-linked tail queues of instruction response */
658 struct lio_response_list response_list;
662 /** The DROQ output queues */
663 struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
665 struct lio_io_enable io_qmask;
667 struct lio_sriov_info sriov_info;
669 struct lio_pf_vf_hs_word pfvf_hsword;
671 /** Mail Box details of each lio queue. */
672 struct lio_mbox **mbox;
674 char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
676 const struct lio_config *default_config;
678 struct rte_eth_dev *eth_dev;
681 uint8_t max_rx_queues;
682 uint8_t max_tx_queues;
683 uint8_t nb_rx_queues;
684 uint8_t nb_tx_queues;
685 uint8_t port_configured;
686 struct lio_rss_ctx rss_state;
689 #endif /* _LIO_STRUCT_H_ */