1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
11 #include <rte_bus_pci.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
16 #include "ntb_hw_intel.h"
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
29 is_gen3_ntb(const struct ntb_hw *hw)
31 if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SKX)
38 is_gen4_ntb(const struct ntb_hw *hw)
40 if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_ICX)
47 intel_ntb3_check_ppd(struct ntb_hw *hw)
52 ret = rte_pci_read_config(hw->pci_dev, ®_val,
53 sizeof(reg_val), XEON_PPD_OFFSET);
55 NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
59 /* Check connection topo type. Only support B2B. */
60 switch (reg_val & XEON_PPD_CONN_MASK) {
61 case XEON_PPD_CONN_B2B:
62 NTB_LOG(INFO, "Topo B2B (back to back) is using.");
64 case XEON_PPD_CONN_TRANSPARENT:
65 case XEON_PPD_CONN_RP:
67 NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
71 /* Check device type. */
72 if (reg_val & XEON_PPD_DEV_DSD) {
73 NTB_LOG(INFO, "DSD, Downstream Device.");
74 hw->topo = NTB_TOPO_B2B_DSD;
76 NTB_LOG(INFO, "USD, Upstream device.");
77 hw->topo = NTB_TOPO_B2B_USD;
80 /* Check if bar4 is split. Do not support split bar. */
81 if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
82 NTB_LOG(ERR, "Do not support split bar.");
90 intel_ntb4_check_ppd(struct ntb_hw *hw)
94 reg_val = rte_read32(hw->hw_addr + XEON_GEN4_PPD1_OFFSET);
96 /* Check connection topo type. Only support B2B. */
97 switch (reg_val & XEON_GEN4_PPD_CONN_MASK) {
98 case XEON_GEN4_PPD_CONN_B2B:
99 NTB_LOG(INFO, "Topo B2B (back to back) is using.");
102 NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
106 /* Check device type. */
107 if (reg_val & XEON_GEN4_PPD_DEV_DSD) {
108 NTB_LOG(INFO, "DSD, Downstream Device.");
109 hw->topo = NTB_TOPO_B2B_DSD;
111 NTB_LOG(INFO, "USD, Upstream device.");
112 hw->topo = NTB_TOPO_B2B_USD;
119 intel_ntb_dev_init(const struct rte_rawdev *dev)
121 struct ntb_hw *hw = dev->dev_private;
126 NTB_LOG(ERR, "Invalid device.");
130 hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
133 ret = intel_ntb3_check_ppd(hw);
134 else if (is_gen4_ntb(hw))
135 /* PPD is in MMIO but not config space for NTB Gen4 */
136 ret = intel_ntb4_check_ppd(hw);
138 NTB_LOG(ERR, "Cannot init device for unsupported device.");
145 hw->mw_cnt = XEON_MW_COUNT;
146 hw->db_cnt = XEON_DB_COUNT;
147 hw->spad_cnt = XEON_SPAD_COUNT;
149 hw->mw_size = rte_zmalloc("ntb_mw_size",
150 hw->mw_cnt * sizeof(uint64_t), 0);
151 if (hw->mw_size == NULL) {
152 NTB_LOG(ERR, "Cannot allocate memory for mw size.");
156 for (i = 0; i < hw->mw_cnt; i++) {
157 bar = intel_ntb_bar[i];
158 hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
161 /* Reserve the last 2 spad registers for users. */
162 for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
163 hw->spad_user_list[i] = hw->spad_cnt;
164 hw->spad_user_list[0] = hw->spad_cnt - 2;
165 hw->spad_user_list[1] = hw->spad_cnt - 1;
171 intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
173 struct ntb_hw *hw = dev->dev_private;
177 NTB_LOG(ERR, "Invalid device.");
181 if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
182 NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
187 bar = intel_ntb_bar[mw_idx];
189 return hw->pci_dev->mem_resource[bar].addr;
193 intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
194 uint64_t addr, uint64_t size)
196 struct ntb_hw *hw = dev->dev_private;
197 void *xlat_addr, *limit_addr;
198 uint64_t xlat_off, limit_off;
199 uint64_t base, limit;
203 NTB_LOG(ERR, "Invalid device.");
207 if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
208 NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
213 bar = intel_ntb_bar[mw_idx];
215 xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
216 limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
217 xlat_addr = hw->hw_addr + xlat_off;
218 limit_addr = hw->hw_addr + limit_off;
220 /* Limit reg val should be EMBAR base address plus MW size. */
222 limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
223 rte_write64(base, xlat_addr);
224 rte_write64(limit, limit_addr);
226 if (is_gen3_ntb(hw)) {
227 /* Setup the external point so that remote can access. */
228 xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
229 xlat_addr = hw->hw_addr + xlat_off;
230 limit_off = XEON_EMBAR1XLMT_OFFSET +
231 mw_idx * XEON_BAR_INTERVAL_OFFSET;
232 limit_addr = hw->hw_addr + limit_off;
233 base = rte_read64(xlat_addr);
236 rte_write64(limit, limit_addr);
237 } else if (is_gen4_ntb(hw)) {
238 /* Set translate base address index register */
239 xlat_off = XEON_GEN4_IM1XBASEIDX_OFFSET +
240 mw_idx * XEON_GEN4_XBASEIDX_INTERVAL;
241 xlat_addr = hw->hw_addr + xlat_off;
242 rte_write16(rte_log2_u64(size), xlat_addr);
244 NTB_LOG(ERR, "Cannot set translation of memory windows for unsupported device.");
245 rte_write64(base, limit_addr);
246 rte_write64(0, xlat_addr);
254 intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
256 struct ntb_hw *hw = dev->dev_private;
261 for (i = 0; i < hw->peer_used_mws; i++) {
262 if (addr >= hw->peer_mw_base[i] &&
263 addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
264 base = intel_ntb_get_peer_mw_addr(dev, i);
265 mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
275 intel_ntb_get_link_status(const struct rte_rawdev *dev)
277 struct ntb_hw *hw = dev->dev_private;
278 uint16_t reg_val, reg_off;
282 NTB_LOG(ERR, "Invalid device.");
286 if (is_gen3_ntb(hw)) {
287 reg_off = XEON_GEN3_LINK_STATUS_OFFSET;
288 ret = rte_pci_read_config(hw->pci_dev, ®_val,
289 sizeof(reg_val), reg_off);
291 NTB_LOG(ERR, "Unable to get link status.");
294 } else if (is_gen4_ntb(hw)) {
295 reg_off = XEON_GEN4_LINK_STATUS_OFFSET;
296 reg_val = rte_read16(hw->hw_addr + reg_off);
298 NTB_LOG(ERR, "Cannot get link status for unsupported device.");
302 hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
304 if (hw->link_status) {
305 hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
306 hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
308 hw->link_speed = NTB_SPEED_NONE;
309 hw->link_width = NTB_WIDTH_NONE;
316 intel_ntb_gen3_set_link(const struct ntb_hw *hw, bool up)
318 uint32_t ntb_ctrl, reg_off;
321 reg_off = XEON_NTBCNTL_OFFSET;
322 reg_addr = hw->hw_addr + reg_off;
323 ntb_ctrl = rte_read32(reg_addr);
326 ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
327 ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
328 ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
330 ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
331 ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
332 ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
335 rte_write32(ntb_ctrl, reg_addr);
341 intel_ntb_gen4_set_link(const struct ntb_hw *hw, bool up)
343 uint32_t ntb_ctrl, ppd0;
348 reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
349 ntb_ctrl = NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
350 ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
351 rte_write32(ntb_ctrl, reg_addr);
353 reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
354 link_ctrl = rte_read16(reg_addr);
355 link_ctrl &= ~XEON_GEN4_LINK_CTRL_LINK_DIS;
356 rte_write16(link_ctrl, reg_addr);
358 /* start link training */
359 reg_addr = hw->hw_addr + XEON_GEN4_PPD0_OFFSET;
360 ppd0 = rte_read32(reg_addr);
361 ppd0 |= XEON_GEN4_PPD_LINKTRN;
362 rte_write32(ppd0, reg_addr);
364 /* make sure link training has started */
365 ppd0 = rte_read32(reg_addr);
366 if (!(ppd0 & XEON_GEN4_PPD_LINKTRN)) {
367 NTB_LOG(ERR, "Link is not training.");
371 reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
372 ntb_ctrl = rte_read32(reg_addr);
373 ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
374 ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
375 rte_write32(ntb_ctrl, reg_addr);
377 reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
378 link_ctrl = rte_read16(reg_addr);
379 link_ctrl |= XEON_GEN4_LINK_CTRL_LINK_DIS;
380 rte_write16(link_ctrl, reg_addr);
387 intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
389 struct ntb_hw *hw = dev->dev_private;
393 ret = intel_ntb_gen3_set_link(hw, up);
394 else if (is_gen4_ntb(hw))
395 ret = intel_ntb_gen4_set_link(hw, up);
397 NTB_LOG(ERR, "Cannot set link for unsupported device.");
405 intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
407 struct ntb_hw *hw = dev->dev_private;
408 uint32_t spad_v, reg_off;
411 if (spad < 0 || spad >= hw->spad_cnt) {
412 NTB_LOG(ERR, "Invalid spad reg index.");
416 /* When peer is true, read peer spad reg */
418 reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
420 else if (is_gen4_ntb(hw))
421 reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
424 NTB_LOG(ERR, "Cannot read spad for unsupported device.");
427 reg_addr = hw->hw_addr + reg_off + (spad << 2);
428 spad_v = rte_read32(reg_addr);
434 intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
435 bool peer, uint32_t spad_v)
437 struct ntb_hw *hw = dev->dev_private;
441 if (spad < 0 || spad >= hw->spad_cnt) {
442 NTB_LOG(ERR, "Invalid spad reg index.");
446 /* When peer is true, write peer spad reg */
448 reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
450 else if (is_gen4_ntb(hw))
451 reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
454 NTB_LOG(ERR, "Cannot write spad for unsupported device.");
457 reg_addr = hw->hw_addr + reg_off + (spad << 2);
459 rte_write32(spad_v, reg_addr);
465 intel_ntb_db_read(const struct rte_rawdev *dev)
467 struct ntb_hw *hw = dev->dev_private;
468 uint64_t db_off, db_bits;
471 db_off = XEON_IM_INT_STATUS_OFFSET;
472 db_addr = hw->hw_addr + db_off;
474 db_bits = rte_read64(db_addr);
480 intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
482 struct ntb_hw *hw = dev->dev_private;
486 db_off = XEON_IM_INT_STATUS_OFFSET;
487 db_addr = hw->hw_addr + db_off;
490 rte_write16(XEON_GEN4_SLOTSTS_DLLSCS,
491 hw->hw_addr + XEON_GEN4_SLOTSTS);
492 rte_write64(db_bits, db_addr);
498 intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
500 struct ntb_hw *hw = dev->dev_private;
504 db_m_off = XEON_IM_INT_DISABLE_OFFSET;
505 db_m_addr = hw->hw_addr + db_m_off;
507 db_mask |= hw->db_mask;
509 rte_write64(db_mask, db_m_addr);
511 hw->db_mask = db_mask;
517 intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
519 struct ntb_hw *hw = dev->dev_private;
523 if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
524 NTB_LOG(ERR, "Invalid doorbell.");
528 db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
529 db_addr = hw->hw_addr + db_off;
531 rte_write32(1, db_addr);
537 intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
539 struct ntb_hw *hw = dev->dev_private;
543 if (intr >= hw->db_cnt) {
544 NTB_LOG(ERR, "Invalid intr source.");
548 /* Bind intr source to msix vector */
550 reg_off = XEON_GEN3_INTVEC_OFFSET;
551 else if (is_gen4_ntb(hw))
552 reg_off = XEON_GEN4_INTVEC_OFFSET;
554 NTB_LOG(ERR, "Cannot bind vectors for unsupported device.");
557 reg_addr = hw->hw_addr + reg_off + intr;
559 rte_write8(msix, reg_addr);
564 /* operations for primary side of local ntb */
565 const struct ntb_dev_ops intel_ntb_ops = {
566 .ntb_dev_init = intel_ntb_dev_init,
567 .get_peer_mw_addr = intel_ntb_get_peer_mw_addr,
568 .mw_set_trans = intel_ntb_mw_set_trans,
569 .ioremap = intel_ntb_ioremap,
570 .get_link_status = intel_ntb_get_link_status,
571 .set_link = intel_ntb_set_link,
572 .spad_read = intel_ntb_spad_read,
573 .spad_write = intel_ntb_spad_write,
574 .db_read = intel_ntb_db_read,
575 .db_clear = intel_ntb_db_clear,
576 .db_set_mask = intel_ntb_db_set_mask,
577 .peer_db_set = intel_ntb_peer_db_set,
578 .vector_bind = intel_ntb_vector_bind,