1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22 #include <ifpga_rawdev.h>
24 #include "ipn3ke_rawdev_api.h"
25 #include "ipn3ke_flow.h"
26 #include "ipn3ke_logs.h"
27 #include "ipn3ke_ethdev.h"
29 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
30 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
31 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
32 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
33 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
34 { 0, 0 /* sentinel */ },
37 struct ipn3ke_pub_func ipn3ke_bridge_func;
40 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
41 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
44 uint64_t indirect_value;
45 volatile void *indirect_addrs;
47 uint64_t read_data = 0;
49 if (eth_group_sel != 0 && eth_group_sel != 1)
52 target_addr = addr | dev_sel << 17;
54 indirect_value = RCMD | target_addr << 32;
55 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
59 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
63 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
66 read_data = rte_read64(indirect_addrs);
67 if ((read_data >> 32) == 1)
70 } while (i <= try_cnt);
74 *rd_data = rte_le_to_cpu_32(read_data);
79 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
80 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
82 volatile void *indirect_addrs;
83 uint64_t indirect_value;
86 if (eth_group_sel != 0 && eth_group_sel != 1)
89 target_addr = addr | dev_sel << 17;
91 indirect_value = WCMD | target_addr << 32 | wr_data;
92 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
94 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
99 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
100 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
104 if (mac_num >= hw->port_num)
108 dev_sel = mac_num * 2 + 3;
110 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
114 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
115 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
119 if (mac_num >= hw->port_num)
123 dev_sel = mac_num * 2 + 3;
125 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
129 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
131 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
132 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
133 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
134 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
135 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
136 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
137 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
138 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
139 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
140 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
141 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
142 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
143 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
144 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
145 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
146 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
147 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
148 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
149 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
150 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
151 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
152 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
153 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
154 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
155 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
156 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
157 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
158 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
159 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
160 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
161 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
162 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
163 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
164 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
165 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
166 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
167 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
168 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
169 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
170 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
172 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
173 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
175 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
176 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
178 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
179 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
181 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
182 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
184 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
185 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
188 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
189 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
194 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
196 uint32_t timeout = 10000;
197 while (timeout > 0) {
198 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
199 == IPN3KE_VBNG_INIT_DONE)
206 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
214 ipn3ke_mtu_cal(uint32_t tx, uint32_t rx)
217 tmp = RTE_MIN(tx, rx);
218 tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU);
219 tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX -
220 IPN3KE_ETH_OVERHEAD));
225 ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num,
226 uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr)
232 if (!(*hw->f_mac_read) || !(*hw->f_mac_write))
235 (*hw->f_mac_read)(hw,
241 (*hw->f_mac_read)(hw,
247 tmp = ipn3ke_mtu_cal(tx, rx);
249 (*hw->f_mac_write)(hw,
255 (*hw->f_mac_write)(hw,
263 ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
264 uint32_t eth_group_sel)
266 ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
267 IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH);
271 ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
272 uint32_t eth_group_sel)
274 ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
275 IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG);
279 ipn3ke_mtu_setup(struct ipn3ke_hw *hw)
282 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
283 for (i = 0; i < hw->port_num; i++) {
284 ipn3ke_10G_mtu_setup(hw, i, 0);
285 ipn3ke_10G_mtu_setup(hw, i, 1);
287 } else if (hw->retimer.mac_type ==
288 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
289 for (i = 0; i < hw->port_num; i++) {
290 ipn3ke_25G_mtu_setup(hw, i, 0);
291 ipn3ke_25G_mtu_setup(hw, i, 1);
297 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
298 struct ipn3ke_hw *hw)
300 struct rte_rawdev *rawdev;
303 uint64_t port_num, mac_type, index;
305 rawdev = afu_dev->rawdev;
307 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
308 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
309 hw->afu_id.port = afu_dev->id.port;
310 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
311 hw->f_mac_read = ipn3ke_indirect_mac_read;
312 hw->f_mac_write = ipn3ke_indirect_mac_write;
314 rawdev->dev_ops->attr_get(rawdev,
315 "LineSideBARIndex", &index);
316 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
317 rawdev->dev_ops->attr_get(rawdev,
318 "NICSideBARIndex", &index);
319 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
320 rawdev->dev_ops->attr_get(rawdev,
321 "LineSideLinkPortNum", &port_num);
322 hw->retimer.port_num = (int)port_num;
323 hw->port_num = hw->retimer.port_num;
324 rawdev->dev_ops->attr_get(rawdev,
325 "LineSideMACType", &mac_type);
326 hw->retimer.mac_type = (int)mac_type;
331 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
332 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
333 /* After power on, wait until init done */
334 if (ipn3ke_vbng_init_done(hw))
337 ipn3ke_hw_cap_init(hw);
340 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
342 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
344 /* After reset, wait until init done */
345 if (ipn3ke_vbng_init_done(hw))
351 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
352 IPN3KE_READ_REG(hw, 0));
355 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
356 /* Enable inter connect channel */
357 for (i = 0; i < hw->port_num; i++) {
358 /* Enable the TX path */
359 ipn3ke_xmac_tx_enable(hw, i, 1);
361 /* Disables source address override */
362 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
364 /* Enable the RX path */
365 ipn3ke_xmac_rx_enable(hw, i, 1);
367 /* Clear NIC side TX statistics counters */
368 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
370 /* Clear NIC side RX statistics counters */
371 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
373 /* Clear line side TX statistics counters */
374 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
376 /* Clear line RX statistics counters */
377 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
379 } else if (hw->retimer.mac_type ==
380 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
381 /* Enable inter connect channel */
382 for (i = 0; i < hw->port_num; i++) {
383 /* Clear NIC side TX statistics counters */
384 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
386 /* Clear NIC side RX statistics counters */
387 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
389 /* Clear line side TX statistics counters */
390 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
392 /* Clear line side RX statistics counters */
393 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
398 ipn3ke_mtu_setup(hw);
400 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
402 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
405 hw->tm_hw_enable = 0;
406 hw->flow_hw_enable = 0;
407 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
408 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
409 ret = ipn3ke_hw_tm_init(hw);
412 hw->tm_hw_enable = 1;
414 ret = ipn3ke_flow_init(hw);
417 hw->flow_hw_enable = 1;
424 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
428 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
429 for (i = 0; i < hw->port_num; i++) {
430 /* Disable the TX path */
431 ipn3ke_xmac_tx_disable(hw, i, 1);
433 /* Disable the RX path */
434 ipn3ke_xmac_rx_disable(hw, i, 1);
436 /* Clear NIC side TX statistics counters */
437 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
439 /* Clear NIC side RX statistics counters */
440 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
442 /* Clear line side TX statistics counters */
443 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
445 /* Clear line side RX statistics counters */
446 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
448 } else if (hw->retimer.mac_type ==
449 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
450 for (i = 0; i < hw->port_num; i++) {
451 /* Clear NIC side TX statistics counters */
452 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
454 /* Clear NIC side RX statistics counters */
455 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
457 /* Clear line side TX statistics counters */
458 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
460 /* Clear line side RX statistics counters */
461 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
466 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
468 char name[RTE_ETH_NAME_MAX_LEN];
469 struct ipn3ke_hw *hw;
470 struct rte_eth_dev *i40e_eth;
471 struct ifpga_rawdev *ifpga_dev;
476 /* check if the AFU device has been probed already */
477 /* allocate shared mcp_vswitch structure */
478 if (!afu_dev->shared.data) {
479 snprintf(name, sizeof(name), "net_%s_hw",
480 afu_dev->device.name);
481 hw = rte_zmalloc_socket(name,
482 sizeof(struct ipn3ke_hw),
484 afu_dev->device.numa_node);
486 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
490 afu_dev->shared.data = hw;
492 rte_spinlock_init(&afu_dev->shared.lock);
494 hw = afu_dev->shared.data;
497 retval = ipn3ke_hw_init(afu_dev, hw);
501 if (ipn3ke_bridge_func.get_ifpga_rawdev == NULL)
503 ifpga_dev = ipn3ke_bridge_func.get_ifpga_rawdev(hw->rawdev);
505 IPN3KE_AFU_PMD_ERR("failed to find ifpga_device.");
507 /* probe representor ports */
509 for (i = 0; i < hw->port_num; i++) {
510 struct ipn3ke_rpst rpst = {
512 .switch_domain_id = hw->switch_domain_id,
516 /* representor port net_bdf_port */
517 snprintf(name, sizeof(name), "net_%s_representor_%d",
518 afu_dev->device.name, i);
521 fvl_bdf = ifpga_dev->fvl_bdf[j];
522 retval = rte_eth_dev_get_port_by_name(fvl_bdf,
527 i40e_eth = &rte_eth_devices[port_id];
528 rpst.i40e_pf_eth = i40e_eth;
529 rpst.i40e_pf_eth_port_id = port_id;
536 retval = rte_eth_dev_create(&afu_dev->device, name,
537 sizeof(struct ipn3ke_rpst), NULL, NULL,
538 ipn3ke_rpst_init, &rpst);
541 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
549 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
551 char name[RTE_ETH_NAME_MAX_LEN];
552 struct ipn3ke_hw *hw;
553 struct rte_eth_dev *ethdev;
556 hw = afu_dev->shared.data;
558 /* remove representor ports */
559 for (i = 0; i < hw->port_num; i++) {
560 /* representor port net_bdf_port */
561 snprintf(name, sizeof(name), "net_%s_representor_%d",
562 afu_dev->device.name, i);
564 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
568 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
571 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
573 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
576 ipn3ke_hw_uninit(hw);
581 static struct rte_afu_driver afu_ipn3ke_driver = {
582 .id_table = afu_uuid_ipn3ke_map,
583 .probe = ipn3ke_vswitch_probe,
584 .remove = ipn3ke_vswitch_remove,
587 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
588 RTE_LOG_REGISTER(ipn3ke_afu_logtype, pmd.afu.ipn3ke, NOTICE);