1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
11 #include <rte_bus_pci.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
16 #include "ntb_hw_intel.h"
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
29 intel_ntb_dev_init(const struct rte_rawdev *dev)
31 struct ntb_hw *hw = dev->dev_private;
36 NTB_LOG(ERR, "Invalid device.");
40 ret = rte_pci_read_config(hw->pci_dev, ®_val,
41 sizeof(reg_val), XEON_PPD_OFFSET);
43 NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
47 /* Check connection topo type. Only support B2B. */
48 switch (reg_val & XEON_PPD_CONN_MASK) {
49 case XEON_PPD_CONN_B2B:
50 NTB_LOG(INFO, "Topo B2B (back to back) is using.");
52 case XEON_PPD_CONN_TRANSPARENT:
53 case XEON_PPD_CONN_RP:
55 NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
59 /* Check device type. */
60 if (reg_val & XEON_PPD_DEV_DSD) {
61 NTB_LOG(INFO, "DSD, Downstream Device.");
62 hw->topo = NTB_TOPO_B2B_DSD;
64 NTB_LOG(INFO, "USD, Upstream device.");
65 hw->topo = NTB_TOPO_B2B_USD;
68 /* Check if bar4 is split. Do not support split bar. */
69 if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
70 NTB_LOG(ERR, "Do not support split bar.");
74 hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
76 hw->mw_cnt = XEON_MW_COUNT;
77 hw->db_cnt = XEON_DB_COUNT;
78 hw->spad_cnt = XEON_SPAD_COUNT;
80 hw->mw_size = rte_zmalloc("ntb_mw_size",
81 hw->mw_cnt * sizeof(uint64_t), 0);
82 for (i = 0; i < hw->mw_cnt; i++) {
83 bar = intel_ntb_bar[i];
84 hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
87 /* Reserve the last 2 spad registers for users. */
88 for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
89 hw->spad_user_list[i] = hw->spad_cnt;
90 hw->spad_user_list[0] = hw->spad_cnt - 2;
91 hw->spad_user_list[1] = hw->spad_cnt - 1;
97 intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
99 struct ntb_hw *hw = dev->dev_private;
103 NTB_LOG(ERR, "Invalid device.");
107 if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
108 NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
113 bar = intel_ntb_bar[mw_idx];
115 return hw->pci_dev->mem_resource[bar].addr;
119 intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
120 uint64_t addr, uint64_t size)
122 struct ntb_hw *hw = dev->dev_private;
123 void *xlat_addr, *limit_addr;
124 uint64_t xlat_off, limit_off;
125 uint64_t base, limit;
129 NTB_LOG(ERR, "Invalid device.");
133 if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
134 NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
139 bar = intel_ntb_bar[mw_idx];
141 xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
142 limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
143 xlat_addr = hw->hw_addr + xlat_off;
144 limit_addr = hw->hw_addr + limit_off;
146 /* Limit reg val should be EMBAR base address plus MW size. */
148 limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
149 rte_write64(base, xlat_addr);
150 rte_write64(limit, limit_addr);
152 /* Setup the external point so that remote can access. */
153 xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
154 xlat_addr = hw->hw_addr + xlat_off;
155 limit_off = XEON_EMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
156 limit_addr = hw->hw_addr + limit_off;
157 base = rte_read64(xlat_addr);
160 rte_write64(limit, limit_addr);
166 intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
168 struct ntb_hw *hw = dev->dev_private;
173 for (i = 0; i < hw->peer_used_mws; i++) {
174 if (addr >= hw->peer_mw_base[i] &&
175 addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
176 base = intel_ntb_get_peer_mw_addr(dev, i);
177 mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
187 intel_ntb_get_link_status(const struct rte_rawdev *dev)
189 struct ntb_hw *hw = dev->dev_private;
194 NTB_LOG(ERR, "Invalid device.");
198 ret = rte_pci_read_config(hw->pci_dev, ®_val,
199 sizeof(reg_val), XEON_LINK_STATUS_OFFSET);
201 NTB_LOG(ERR, "Unable to get link status.");
205 hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
207 if (hw->link_status) {
208 hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
209 hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
211 hw->link_speed = NTB_SPEED_NONE;
212 hw->link_width = NTB_WIDTH_NONE;
219 intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
221 struct ntb_hw *hw = dev->dev_private;
222 uint32_t ntb_ctrl, reg_off;
225 reg_off = XEON_NTBCNTL_OFFSET;
226 reg_addr = hw->hw_addr + reg_off;
227 ntb_ctrl = rte_read32(reg_addr);
230 ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
231 ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
232 ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
234 ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
235 ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
236 ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
239 rte_write32(ntb_ctrl, reg_addr);
245 intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
247 struct ntb_hw *hw = dev->dev_private;
248 uint32_t spad_v, reg_off;
251 if (spad < 0 || spad >= hw->spad_cnt) {
252 NTB_LOG(ERR, "Invalid spad reg index.");
256 /* When peer is true, read peer spad reg */
257 reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
258 reg_addr = hw->hw_addr + reg_off + (spad << 2);
259 spad_v = rte_read32(reg_addr);
265 intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
266 bool peer, uint32_t spad_v)
268 struct ntb_hw *hw = dev->dev_private;
272 if (spad < 0 || spad >= hw->spad_cnt) {
273 NTB_LOG(ERR, "Invalid spad reg index.");
277 /* When peer is true, write peer spad reg */
278 reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
279 reg_addr = hw->hw_addr + reg_off + (spad << 2);
281 rte_write32(spad_v, reg_addr);
287 intel_ntb_db_read(const struct rte_rawdev *dev)
289 struct ntb_hw *hw = dev->dev_private;
290 uint64_t db_off, db_bits;
293 db_off = XEON_IM_INT_STATUS_OFFSET;
294 db_addr = hw->hw_addr + db_off;
296 db_bits = rte_read64(db_addr);
302 intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
304 struct ntb_hw *hw = dev->dev_private;
308 db_off = XEON_IM_INT_STATUS_OFFSET;
309 db_addr = hw->hw_addr + db_off;
311 rte_write64(db_bits, db_addr);
317 intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
319 struct ntb_hw *hw = dev->dev_private;
323 db_m_off = XEON_IM_INT_DISABLE_OFFSET;
324 db_m_addr = hw->hw_addr + db_m_off;
326 db_mask |= hw->db_mask;
328 rte_write64(db_mask, db_m_addr);
330 hw->db_mask = db_mask;
336 intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
338 struct ntb_hw *hw = dev->dev_private;
342 if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
343 NTB_LOG(ERR, "Invalid doorbell.");
347 db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
348 db_addr = hw->hw_addr + db_off;
350 rte_write32(1, db_addr);
356 intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
358 struct ntb_hw *hw = dev->dev_private;
362 if (intr >= hw->db_cnt) {
363 NTB_LOG(ERR, "Invalid intr source.");
367 /* Bind intr source to msix vector */
368 reg_off = XEON_INTVEC_OFFSET;
369 reg_addr = hw->hw_addr + reg_off + intr;
371 rte_write8(msix, reg_addr);
376 /* operations for primary side of local ntb */
377 const struct ntb_dev_ops intel_ntb_ops = {
378 .ntb_dev_init = intel_ntb_dev_init,
379 .get_peer_mw_addr = intel_ntb_get_peer_mw_addr,
380 .mw_set_trans = intel_ntb_mw_set_trans,
381 .ioremap = intel_ntb_ioremap,
382 .get_link_status = intel_ntb_get_link_status,
383 .set_link = intel_ntb_set_link,
384 .spad_read = intel_ntb_spad_read,
385 .spad_write = intel_ntb_spad_write,
386 .db_read = intel_ntb_db_read,
387 .db_clear = intel_ntb_db_clear,
388 .db_set_mask = intel_ntb_db_set_mask,
389 .peer_db_set = intel_ntb_peer_db_set,
390 .vector_bind = intel_ntb_vector_bind,