1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22 #include <ifpga_rawdev.h>
24 #include "ipn3ke_rawdev_api.h"
25 #include "ipn3ke_flow.h"
26 #include "ipn3ke_logs.h"
27 #include "ipn3ke_ethdev.h"
29 int ipn3ke_afu_logtype;
31 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
32 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
33 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
34 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
35 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
36 { 0, 0 /* sentinel */ },
39 struct ipn3ke_pub_func ipn3ke_bridge_func;
42 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
43 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
46 uint64_t indirect_value;
47 volatile void *indirect_addrs;
49 uint64_t read_data = 0;
51 if (eth_group_sel != 0 && eth_group_sel != 1)
54 target_addr = addr | dev_sel << 17;
56 indirect_value = RCMD | target_addr << 32;
57 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
61 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
65 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
68 read_data = rte_read64(indirect_addrs);
69 if ((read_data >> 32) == 1)
72 } while (i <= try_cnt);
76 *rd_data = rte_le_to_cpu_32(read_data);
81 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
82 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
84 volatile void *indirect_addrs;
85 uint64_t indirect_value;
88 if (eth_group_sel != 0 && eth_group_sel != 1)
91 target_addr = addr | dev_sel << 17;
93 indirect_value = WCMD | target_addr << 32 | wr_data;
94 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
96 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
101 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
102 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
106 if (mac_num >= hw->port_num)
110 dev_sel = mac_num * 2 + 3;
112 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
116 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
117 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
121 if (mac_num >= hw->port_num)
125 dev_sel = mac_num * 2 + 3;
127 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
131 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
133 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
134 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
135 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
136 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
137 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
138 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
139 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
140 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
141 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
142 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
143 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
144 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
145 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
146 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
147 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
148 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
149 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
150 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
151 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
152 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
153 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
154 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
155 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
156 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
157 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
158 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
159 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
160 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
161 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
162 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
163 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
164 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
165 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
166 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
167 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
168 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
169 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
170 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
171 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
172 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
174 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
175 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
177 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
178 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
180 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
181 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
183 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
184 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
186 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
187 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
190 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
191 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
196 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
198 uint32_t timeout = 10000;
199 while (timeout > 0) {
200 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
201 == IPN3KE_VBNG_INIT_DONE)
208 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
216 ipn3ke_mtu_cal(uint32_t tx, uint32_t rx)
219 tmp = RTE_MIN(tx, rx);
220 tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU);
221 tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX -
222 IPN3KE_ETH_OVERHEAD));
227 ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num,
228 uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr)
234 if (!(*hw->f_mac_read) || !(*hw->f_mac_write))
237 (*hw->f_mac_read)(hw,
243 (*hw->f_mac_read)(hw,
249 tmp = ipn3ke_mtu_cal(tx, rx);
251 (*hw->f_mac_write)(hw,
257 (*hw->f_mac_write)(hw,
265 ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
266 uint32_t eth_group_sel)
268 ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
269 IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH);
273 ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
274 uint32_t eth_group_sel)
276 ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
277 IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG);
281 ipn3ke_mtu_setup(struct ipn3ke_hw *hw)
284 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
285 for (i = 0; i < hw->port_num; i++) {
286 ipn3ke_10G_mtu_setup(hw, i, 0);
287 ipn3ke_10G_mtu_setup(hw, i, 1);
289 } else if (hw->retimer.mac_type ==
290 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
291 for (i = 0; i < hw->port_num; i++) {
292 ipn3ke_25G_mtu_setup(hw, i, 0);
293 ipn3ke_25G_mtu_setup(hw, i, 1);
299 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
300 struct ipn3ke_hw *hw)
302 struct rte_rawdev *rawdev;
305 uint64_t port_num, mac_type, index;
307 rawdev = afu_dev->rawdev;
309 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
310 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
311 hw->afu_id.port = afu_dev->id.port;
312 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
313 hw->f_mac_read = ipn3ke_indirect_mac_read;
314 hw->f_mac_write = ipn3ke_indirect_mac_write;
316 rawdev->dev_ops->attr_get(rawdev,
317 "LineSideBARIndex", &index);
318 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
319 rawdev->dev_ops->attr_get(rawdev,
320 "NICSideBARIndex", &index);
321 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
322 rawdev->dev_ops->attr_get(rawdev,
323 "LineSideLinkPortNum", &port_num);
324 hw->retimer.port_num = (int)port_num;
325 hw->port_num = hw->retimer.port_num;
326 rawdev->dev_ops->attr_get(rawdev,
327 "LineSideMACType", &mac_type);
328 hw->retimer.mac_type = (int)mac_type;
333 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
334 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
335 /* After power on, wait until init done */
336 if (ipn3ke_vbng_init_done(hw))
339 ipn3ke_hw_cap_init(hw);
342 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
344 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
346 /* After reset, wait until init done */
347 if (ipn3ke_vbng_init_done(hw))
353 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
354 IPN3KE_READ_REG(hw, 0));
357 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
358 /* Enable inter connect channel */
359 for (i = 0; i < hw->port_num; i++) {
360 /* Enable the TX path */
361 ipn3ke_xmac_tx_enable(hw, i, 1);
363 /* Disables source address override */
364 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
366 /* Enable the RX path */
367 ipn3ke_xmac_rx_enable(hw, i, 1);
369 /* Clear NIC side TX statistics counters */
370 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
372 /* Clear NIC side RX statistics counters */
373 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
375 /* Clear line side TX statistics counters */
376 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
378 /* Clear line RX statistics counters */
379 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
381 } else if (hw->retimer.mac_type ==
382 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
383 /* Enable inter connect channel */
384 for (i = 0; i < hw->port_num; i++) {
385 /* Clear NIC side TX statistics counters */
386 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
388 /* Clear NIC side RX statistics counters */
389 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
391 /* Clear line side TX statistics counters */
392 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
394 /* Clear line side RX statistics counters */
395 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
400 ipn3ke_mtu_setup(hw);
402 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
404 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
407 hw->tm_hw_enable = 0;
408 hw->flow_hw_enable = 0;
409 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
410 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
411 ret = ipn3ke_hw_tm_init(hw);
414 hw->tm_hw_enable = 1;
416 ret = ipn3ke_flow_init(hw);
419 hw->flow_hw_enable = 1;
426 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
430 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
431 for (i = 0; i < hw->port_num; i++) {
432 /* Disable the TX path */
433 ipn3ke_xmac_tx_disable(hw, i, 1);
435 /* Disable the RX path */
436 ipn3ke_xmac_rx_disable(hw, i, 1);
438 /* Clear NIC side TX statistics counters */
439 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
441 /* Clear NIC side RX statistics counters */
442 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
444 /* Clear line side TX statistics counters */
445 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
447 /* Clear line side RX statistics counters */
448 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
450 } else if (hw->retimer.mac_type ==
451 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
452 for (i = 0; i < hw->port_num; i++) {
453 /* Clear NIC side TX statistics counters */
454 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
456 /* Clear NIC side RX statistics counters */
457 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
459 /* Clear line side TX statistics counters */
460 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
462 /* Clear line side RX statistics counters */
463 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
468 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
470 char name[RTE_ETH_NAME_MAX_LEN];
471 struct ipn3ke_hw *hw;
472 struct rte_eth_dev *i40e_eth;
473 struct ifpga_rawdev *ifpga_dev;
478 /* check if the AFU device has been probed already */
479 /* allocate shared mcp_vswitch structure */
480 if (!afu_dev->shared.data) {
481 snprintf(name, sizeof(name), "net_%s_hw",
482 afu_dev->device.name);
483 hw = rte_zmalloc_socket(name,
484 sizeof(struct ipn3ke_hw),
486 afu_dev->device.numa_node);
488 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
492 afu_dev->shared.data = hw;
494 rte_spinlock_init(&afu_dev->shared.lock);
496 hw = afu_dev->shared.data;
499 retval = ipn3ke_hw_init(afu_dev, hw);
503 if (ipn3ke_bridge_func.get_ifpga_rawdev == NULL)
505 ifpga_dev = ipn3ke_bridge_func.get_ifpga_rawdev(hw->rawdev);
507 IPN3KE_AFU_PMD_ERR("failed to find ifpga_device.");
509 /* probe representor ports */
511 for (i = 0; i < hw->port_num; i++) {
512 struct ipn3ke_rpst rpst = {
514 .switch_domain_id = hw->switch_domain_id,
518 /* representor port net_bdf_port */
519 snprintf(name, sizeof(name), "net_%s_representor_%d",
520 afu_dev->device.name, i);
523 fvl_bdf = ifpga_dev->fvl_bdf[j];
524 retval = rte_eth_dev_get_port_by_name(fvl_bdf,
529 i40e_eth = &rte_eth_devices[port_id];
530 rpst.i40e_pf_eth = i40e_eth;
531 rpst.i40e_pf_eth_port_id = port_id;
538 retval = rte_eth_dev_create(&afu_dev->device, name,
539 sizeof(struct ipn3ke_rpst), NULL, NULL,
540 ipn3ke_rpst_init, &rpst);
543 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
551 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
553 char name[RTE_ETH_NAME_MAX_LEN];
554 struct ipn3ke_hw *hw;
555 struct rte_eth_dev *ethdev;
558 hw = afu_dev->shared.data;
560 /* remove representor ports */
561 for (i = 0; i < hw->port_num; i++) {
562 /* representor port net_bdf_port */
563 snprintf(name, sizeof(name), "net_%s_representor_%d",
564 afu_dev->device.name, i);
566 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
570 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
573 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
575 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
578 ipn3ke_hw_uninit(hw);
583 static struct rte_afu_driver afu_ipn3ke_driver = {
584 .id_table = afu_uuid_ipn3ke_map,
585 .probe = ipn3ke_vswitch_probe,
586 .remove = ipn3ke_vswitch_remove,
589 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
591 RTE_INIT(ipn3ke_afu_init_log)
593 ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
594 if (ipn3ke_afu_logtype >= 0)
595 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);