1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
10 extern int ntb_logtype;
12 #define NTB_LOG(level, fmt, args...) \
13 rte_log(RTE_LOG_ ## level, ntb_logtype, "%s(): " fmt "\n", \
17 #define NTB_INTEL_VENDOR_ID 0x8086
20 #define NTB_INTEL_DEV_ID_B2B_SKX 0x201C
22 /* Reserved to app to use. */
23 #define NTB_SPAD_USER "spad_user_"
24 #define NTB_SPAD_USER_LEN (sizeof(NTB_SPAD_USER) - 1)
25 #define NTB_SPAD_USER_MAX_NUM 4
26 #define NTB_ATTR_NAME_LEN 30
28 #define NTB_DFLT_TX_FREE_THRESH 256
60 /* Define spad registers usage. 0 is reserved. */
77 * NTB device operations
78 * @ntb_dev_init: Init ntb dev.
79 * @get_peer_mw_addr: To get the addr of peer mw[mw_idx].
80 * @mw_set_trans: Set translation of internal memory that remote can access.
81 * @get_link_status: get link status, link speed and link width.
82 * @set_link: Set local side up/down.
83 * @spad_read: Read local/peer spad register val.
84 * @spad_write: Write val to local/peer spad register.
85 * @db_read: Read doorbells status.
86 * @db_clear: Clear local doorbells.
87 * @db_set_mask: Set bits in db mask, preventing db interrpts generated
89 * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
90 * @vector_bind: Bind vector source [intr] to msix vector [msix].
93 int (*ntb_dev_init)(const struct rte_rawdev *dev);
94 void *(*get_peer_mw_addr)(const struct rte_rawdev *dev, int mw_idx);
95 int (*mw_set_trans)(const struct rte_rawdev *dev, int mw_idx,
96 uint64_t addr, uint64_t size);
97 int (*get_link_status)(const struct rte_rawdev *dev);
98 int (*set_link)(const struct rte_rawdev *dev, bool up);
99 uint32_t (*spad_read)(const struct rte_rawdev *dev, int spad,
101 int (*spad_write)(const struct rte_rawdev *dev, int spad,
102 bool peer, uint32_t spad_v);
103 uint64_t (*db_read)(const struct rte_rawdev *dev);
104 int (*db_clear)(const struct rte_rawdev *dev, uint64_t db_bits);
105 int (*db_set_mask)(const struct rte_rawdev *dev, uint64_t db_mask);
106 int (*peer_db_set)(const struct rte_rawdev *dev, uint8_t db_bit);
107 int (*vector_bind)(const struct rte_rawdev *dev, uint8_t intr,
112 uint64_t addr; /* buffer addr */
113 uint16_t len; /* buffer length */
118 #define NTB_FLAG_EOP 1 /* end of packet */
120 uint16_t len; /* buffer length */
121 uint16_t flags; /* flags */
124 struct ntb_rx_entry {
125 struct rte_mbuf *mbuf;
128 struct ntb_rx_queue {
129 struct ntb_desc *rx_desc_ring;
130 volatile struct ntb_used *rx_used_ring;
132 volatile uint16_t *used_cnt;
137 uint16_t rx_free_thresh;
139 struct rte_mempool *mpool; /* mempool for mbuf allocation */
140 struct ntb_rx_entry *sw_ring;
142 uint16_t queue_id; /* DPDK queue index. */
143 uint16_t port_id; /* Device port identifier. */
148 struct ntb_tx_entry {
149 struct rte_mbuf *mbuf;
154 struct ntb_tx_queue {
155 volatile struct ntb_desc *tx_desc_ring;
156 struct ntb_used *tx_used_ring;
157 volatile uint16_t *avail_cnt;
159 uint16_t last_avail; /* Next need to be free. */
160 uint16_t last_used; /* Next need to be sent. */
163 /* Total number of TX descriptors ready to be allocated. */
165 uint16_t tx_free_thresh;
167 struct ntb_tx_entry *sw_ring;
169 uint16_t queue_id; /* DPDK queue index. */
170 uint16_t port_id; /* Device port identifier. */
176 uint16_t avail_cnt __rte_cache_aligned;
177 uint16_t used_cnt __rte_cache_aligned;
178 struct ntb_desc desc_ring[] __rte_cache_aligned;
181 /* ntb private data. */
187 uint64_t db_valid_mask;
192 enum ntb_link link_status;
193 enum ntb_speed link_speed;
194 enum ntb_width link_width;
196 const struct ntb_dev_ops *ntb_ops;
198 struct rte_pci_device *pci_dev;
203 /* remote mem base addr */
204 uint64_t *peer_mw_base;
206 uint16_t queue_pairs;
208 uint32_t hdr_size_per_queue;
210 struct ntb_rx_queue **rx_queues;
211 struct ntb_tx_queue **tx_queues;
213 /* memzone to populate RX ring. */
214 const struct rte_memzone **mz;
217 uint8_t peer_used_mws;
219 /* Reserve several spad for app to use. */
220 int spad_user_list[NTB_SPAD_USER_MAX_NUM];