1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
28 int ipn3ke_afu_logtype;
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
32 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35 { 0, 0 /* sentinel */ },
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
43 uint64_t indirect_value;
44 volatile void *indirect_addrs;
46 uint64_t read_data = 0;
48 if (eth_group_sel != 0 && eth_group_sel != 1)
51 target_addr = addr | dev_sel << 17;
53 indirect_value = RCMD | target_addr << 32;
54 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
58 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
62 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
65 read_data = rte_read64(indirect_addrs);
66 if ((read_data >> 32) == 1)
69 } while (i <= try_cnt);
73 *rd_data = rte_le_to_cpu_32(read_data);
78 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
79 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
81 volatile void *indirect_addrs;
82 uint64_t indirect_value;
85 if (eth_group_sel != 0 && eth_group_sel != 1)
88 target_addr = addr | dev_sel << 17;
90 indirect_value = WCMD | target_addr << 32 | wr_data;
91 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
93 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
98 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
99 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
103 if (mac_num >= hw->port_num)
107 dev_sel = mac_num * 2 + 3;
109 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
113 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
114 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
118 if (mac_num >= hw->port_num)
122 dev_sel = mac_num * 2 + 3;
124 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
128 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
130 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
131 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
132 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
133 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
134 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
136 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
138 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
139 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
140 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
141 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
142 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
143 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
144 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
145 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
146 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
147 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
148 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
149 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
150 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
151 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
152 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
153 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
154 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
155 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
156 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
157 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
158 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
159 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
160 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
161 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
162 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
163 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
164 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
165 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
166 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
167 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
168 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
169 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
171 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
172 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
174 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
175 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
177 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
178 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
180 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
181 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
183 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
184 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
187 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
188 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
193 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
195 uint32_t timeout = 10000;
196 while (timeout > 0) {
197 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
198 == IPN3KE_VBNG_INIT_DONE)
205 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
213 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
214 struct ipn3ke_hw *hw)
216 struct rte_rawdev *rawdev;
219 uint64_t port_num, mac_type, index;
221 rawdev = afu_dev->rawdev;
223 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
224 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
225 hw->afu_id.port = afu_dev->id.port;
226 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
227 hw->f_mac_read = ipn3ke_indirect_mac_read;
228 hw->f_mac_write = ipn3ke_indirect_mac_write;
230 rawdev->dev_ops->attr_get(rawdev,
231 "LineSideBARIndex", &index);
232 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
233 rawdev->dev_ops->attr_get(rawdev,
234 "NICSideBARIndex", &index);
235 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
236 rawdev->dev_ops->attr_get(rawdev,
237 "LineSideLinkPortNum", &port_num);
238 hw->retimer.port_num = (int)port_num;
239 hw->port_num = hw->retimer.port_num;
240 rawdev->dev_ops->attr_get(rawdev,
241 "LineSideMACType", &mac_type);
242 hw->retimer.mac_type = (int)mac_type;
244 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", IPN3KE_READ_REG(hw, 0));
246 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
247 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
248 /* After power on, wait until init done */
249 if (ipn3ke_vbng_init_done(hw))
252 ipn3ke_hw_cap_init(hw);
255 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
257 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
259 /* After reset, wait until init done */
260 if (ipn3ke_vbng_init_done(hw))
264 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
265 /* Enable inter connect channel */
266 for (i = 0; i < hw->port_num; i++) {
267 /* Enable the TX path */
268 ipn3ke_xmac_tx_enable(hw, i, 1);
270 /* Disables source address override */
271 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
273 /* Enable the RX path */
274 ipn3ke_xmac_rx_enable(hw, i, 1);
276 /* Clear NIC side TX statistics counters */
277 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
279 /* Clear NIC side RX statistics counters */
280 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
282 /* Clear line side TX statistics counters */
283 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
285 /* Clear line RX statistics counters */
286 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
288 } else if (hw->retimer.mac_type ==
289 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
290 /* Enable inter connect channel */
291 for (i = 0; i < hw->port_num; i++) {
292 /* Clear NIC side TX statistics counters */
293 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
295 /* Clear NIC side RX statistics counters */
296 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
298 /* Clear line side TX statistics counters */
299 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
301 /* Clear line side RX statistics counters */
302 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
306 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
308 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
311 hw->tm_hw_enable = 0;
312 hw->flow_hw_enable = 0;
313 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
314 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
315 ret = ipn3ke_hw_tm_init(hw);
318 hw->tm_hw_enable = 1;
320 ret = ipn3ke_flow_init(hw);
323 hw->flow_hw_enable = 1;
333 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
337 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
338 for (i = 0; i < hw->port_num; i++) {
339 /* Disable the TX path */
340 ipn3ke_xmac_tx_disable(hw, i, 1);
342 /* Disable the RX path */
343 ipn3ke_xmac_rx_disable(hw, i, 1);
345 /* Clear NIC side TX statistics counters */
346 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
348 /* Clear NIC side RX statistics counters */
349 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
351 /* Clear line side TX statistics counters */
352 ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
354 /* Clear line side RX statistics counters */
355 ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
357 } else if (hw->retimer.mac_type ==
358 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
359 for (i = 0; i < hw->port_num; i++) {
360 /* Clear NIC side TX statistics counters */
361 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
363 /* Clear NIC side RX statistics counters */
364 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
366 /* Clear line side TX statistics counters */
367 ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
369 /* Clear line side RX statistics counters */
370 ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
375 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
377 char name[RTE_ETH_NAME_MAX_LEN];
378 struct ipn3ke_hw *hw;
381 /* check if the AFU device has been probed already */
382 /* allocate shared mcp_vswitch structure */
383 if (!afu_dev->shared.data) {
384 snprintf(name, sizeof(name), "net_%s_hw",
385 afu_dev->device.name);
386 hw = rte_zmalloc_socket(name,
387 sizeof(struct ipn3ke_hw),
389 afu_dev->device.numa_node);
391 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
395 afu_dev->shared.data = hw;
397 rte_spinlock_init(&afu_dev->shared.lock);
399 hw = afu_dev->shared.data;
402 retval = ipn3ke_hw_init(afu_dev, hw);
406 /* probe representor ports */
407 for (i = 0; i < hw->port_num; i++) {
408 struct ipn3ke_rpst rpst = {
410 .switch_domain_id = hw->switch_domain_id,
414 /* representor port net_bdf_port */
415 snprintf(name, sizeof(name), "net_%s_representor_%d",
416 afu_dev->device.name, i);
418 retval = rte_eth_dev_create(&afu_dev->device, name,
419 sizeof(struct ipn3ke_rpst), NULL, NULL,
420 ipn3ke_rpst_init, &rpst);
423 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
430 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
432 char name[RTE_ETH_NAME_MAX_LEN];
433 struct ipn3ke_hw *hw;
434 struct rte_eth_dev *ethdev;
437 hw = afu_dev->shared.data;
439 /* remove representor ports */
440 for (i = 0; i < hw->port_num; i++) {
441 /* representor port net_bdf_port */
442 snprintf(name, sizeof(name), "net_%s_representor_%d",
443 afu_dev->device.name, i);
445 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
449 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
452 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
454 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
457 ipn3ke_hw_uninit(hw);
462 static struct rte_afu_driver afu_ipn3ke_driver = {
463 .id_table = afu_uuid_ipn3ke_map,
464 .probe = ipn3ke_vswitch_probe,
465 .remove = ipn3ke_vswitch_remove,
468 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
470 static const char * const valid_args[] = {
471 #define IPN3KE_AFU_NAME "afu"
473 #define IPN3KE_FPGA_ACCELERATION_LIST "fpga_acc"
474 IPN3KE_FPGA_ACCELERATION_LIST,
475 #define IPN3KE_I40E_PF_LIST "i40e_pf"
481 ipn3ke_cfg_parse_acc_list(const char *afu_name,
482 const char *acc_list_name)
484 struct rte_afu_device *afu_dev;
485 struct ipn3ke_hw *hw;
486 const char *p_source;
488 char name[RTE_ETH_NAME_MAX_LEN];
490 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
493 hw = afu_dev->shared.data;
497 p_source = acc_list_name;
499 while ((*p_source == '{') || (*p_source == '|'))
502 while ((*p_source != '|') && (*p_source != '}'))
503 *p_start++ = *p_source++;
505 if (!strcmp(name, "tm") && hw->tm_hw_enable)
508 if (!strcmp(name, "flow") && hw->flow_hw_enable)
511 if (*p_source == '}')
519 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
522 struct rte_eth_dev *i40e_eth, *rpst_eth;
523 struct rte_afu_device *afu_dev;
524 struct ipn3ke_rpst *rpst;
525 struct ipn3ke_hw *hw;
526 const char *p_source;
528 char name[RTE_ETH_NAME_MAX_LEN];
533 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
536 hw = afu_dev->shared.data;
541 for (i = 0; i < hw->port_num; i++) {
542 snprintf(name, sizeof(name), "net_%s_representor_%d",
544 ret = rte_eth_dev_get_port_by_name(name, &port_id);
547 rpst_eth = &rte_eth_devices[port_id];
548 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
550 while ((*p_source == '{') || (*p_source == '|'))
553 while ((*p_source != '|') && (*p_source != '}'))
554 *p_start++ = *p_source++;
557 ret = rte_eth_dev_get_port_by_name(name, &port_id);
560 i40e_eth = &rte_eth_devices[port_id];
562 rpst->i40e_pf_eth = i40e_eth;
563 rpst->i40e_pf_eth_port_id = port_id;
565 if ((*p_source == '}') || !(*p_source))
573 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
575 struct rte_devargs *devargs;
576 struct rte_kvargs *kvlist = NULL;
577 char *afu_name = NULL;
578 char *acc_name = NULL;
579 char *pf_name = NULL;
585 devargs = dev->device.devargs;
587 kvlist = rte_kvargs_parse(devargs->args, valid_args);
589 IPN3KE_AFU_PMD_ERR("error when parsing param");
593 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
594 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
595 &rte_ifpga_get_string_arg,
597 IPN3KE_AFU_PMD_ERR("error to parse %s",
605 if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
606 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
607 &rte_ifpga_get_string_arg,
609 IPN3KE_AFU_PMD_ERR("error to parse %s",
610 IPN3KE_FPGA_ACCELERATION_LIST);
617 if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
618 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
619 &rte_ifpga_get_string_arg,
621 IPN3KE_AFU_PMD_ERR("error to parse %s",
622 IPN3KE_I40E_PF_LIST);
630 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
636 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
637 IPN3KE_I40E_PF_LIST);
642 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
644 IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
645 IPN3KE_FPGA_ACCELERATION_LIST);
649 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
650 IPN3KE_FPGA_ACCELERATION_LIST);
653 ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
658 rte_kvargs_free(kvlist);
668 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
670 struct rte_devargs *devargs;
671 struct rte_kvargs *kvlist = NULL;
672 char *afu_name = NULL;
673 struct rte_afu_device *afu_dev;
676 devargs = dev->device.devargs;
678 kvlist = rte_kvargs_parse(devargs->args, valid_args);
680 IPN3KE_AFU_PMD_ERR("error when parsing param");
684 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
685 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
686 &rte_ifpga_get_string_arg,
688 IPN3KE_AFU_PMD_ERR("error to parse %s",
691 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
694 ret = ipn3ke_vswitch_remove(afu_dev);
697 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
702 rte_kvargs_free(kvlist);
707 static struct rte_vdev_driver ipn3ke_cfg_driver = {
708 .probe = ipn3ke_cfg_probe,
709 .remove = ipn3ke_cfg_remove,
712 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
713 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
718 RTE_INIT(ipn3ke_afu_init_log)
720 ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
721 if (ipn3ke_afu_logtype >= 0)
722 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);