1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_logs.h"
25 #include "ipn3ke_ethdev.h"
27 int ipn3ke_afu_logtype;
29 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
30 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
31 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
32 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
33 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
34 { 0, 0 /* sentinel */ },
38 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
39 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
42 uint64_t indirect_value;
43 volatile void *indirect_addrs;
45 uint64_t read_data = 0;
47 if (eth_group_sel != 0 && eth_group_sel != 1)
51 target_addr = addr | dev_sel << 17;
53 indirect_value = RCMD | target_addr << 32;
54 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
58 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
62 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
65 read_data = rte_read64(indirect_addrs);
66 if ((read_data >> 32) == 1)
69 } while (i <= try_cnt);
73 *rd_data = rte_le_to_cpu_32(read_data);
78 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
79 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
81 volatile void *indirect_addrs;
82 uint64_t indirect_value;
85 if (eth_group_sel != 0 && eth_group_sel != 1)
89 target_addr = addr | dev_sel << 17;
91 indirect_value = WCMD | target_addr << 32 | wr_data;
92 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
94 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
99 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
100 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
104 if (mac_num >= hw->port_num)
108 dev_sel = mac_num * 2 + 3;
110 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
114 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
115 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
119 if (mac_num >= hw->port_num)
123 dev_sel = mac_num * 2 + 3;
125 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
129 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
131 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
132 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
133 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
134 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
135 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
136 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
137 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
138 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
139 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
140 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
141 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
142 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
143 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
144 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
145 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
146 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
147 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
148 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
149 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
150 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
151 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
152 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
153 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
154 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
155 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
156 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
157 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
158 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
159 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
160 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
161 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
162 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
163 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
164 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
165 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
166 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
167 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
168 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
169 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
170 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
172 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
173 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
175 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
176 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
178 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
179 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
181 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
182 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
184 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
185 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
188 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
189 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
194 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
195 struct ipn3ke_hw *hw)
197 struct rte_rawdev *rawdev;
200 uint64_t port_num, mac_type, index;
202 rawdev = afu_dev->rawdev;
204 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
205 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
206 hw->afu_id.port = afu_dev->id.port;
207 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
208 hw->f_mac_read = ipn3ke_indirect_mac_read;
209 hw->f_mac_write = ipn3ke_indirect_mac_write;
211 rawdev->dev_ops->attr_get(rawdev,
212 "LineSideBARIndex", &index);
213 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
214 rawdev->dev_ops->attr_get(rawdev,
215 "NICSideBARIndex", &index);
216 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
217 rawdev->dev_ops->attr_get(rawdev,
218 "LineSideLinkPortNum", &port_num);
219 hw->retimer.port_num = (int)port_num;
220 hw->port_num = hw->retimer.port_num;
221 rawdev->dev_ops->attr_get(rawdev,
222 "LineSideMACType", &mac_type);
223 hw->retimer.mac_type = (int)mac_type;
225 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
226 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
227 ipn3ke_hw_cap_init(hw);
228 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
229 IPN3KE_READ_REG(hw, 0));
232 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
233 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
236 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
237 /* Enable inter connect channel */
238 for (i = 0; i < hw->port_num; i++) {
239 /* Enable the TX path */
240 ipn3ke_xmac_tx_enable(hw, i, 1);
242 /* Disables source address override */
243 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
245 /* Enable the RX path */
246 ipn3ke_xmac_rx_enable(hw, i, 1);
248 /* Clear all TX statistics counters */
249 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
251 /* Clear all RX statistics counters */
252 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
256 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
258 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
261 hw->tm_hw_enable = 0;
262 hw->flow_hw_enable = 0;
263 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
264 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
265 hw->tm_hw_enable = 1;
266 hw->flow_hw_enable = 1;
276 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
280 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
281 for (i = 0; i < hw->port_num; i++) {
282 /* Disable the TX path */
283 ipn3ke_xmac_tx_disable(hw, i, 1);
285 /* Disable the RX path */
286 ipn3ke_xmac_rx_disable(hw, i, 1);
288 /* Clear all TX statistics counters */
289 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
291 /* Clear all RX statistics counters */
292 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
297 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
299 char name[RTE_ETH_NAME_MAX_LEN];
300 struct ipn3ke_hw *hw;
303 /* check if the AFU device has been probed already */
304 /* allocate shared mcp_vswitch structure */
305 if (!afu_dev->shared.data) {
306 snprintf(name, sizeof(name), "net_%s_hw",
307 afu_dev->device.name);
308 hw = rte_zmalloc_socket(name,
309 sizeof(struct ipn3ke_hw),
311 afu_dev->device.numa_node);
313 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
317 afu_dev->shared.data = hw;
319 rte_spinlock_init(&afu_dev->shared.lock);
321 hw = afu_dev->shared.data;
324 retval = ipn3ke_hw_init(afu_dev, hw);
328 /* probe representor ports */
329 for (i = 0; i < hw->port_num; i++) {
330 struct ipn3ke_rpst rpst = {
332 .switch_domain_id = hw->switch_domain_id,
336 /* representor port net_bdf_port */
337 snprintf(name, sizeof(name), "net_%s_representor_%d",
338 afu_dev->device.name, i);
340 retval = rte_eth_dev_create(&afu_dev->device, name,
341 sizeof(struct ipn3ke_rpst), NULL, NULL,
342 ipn3ke_rpst_init, &rpst);
345 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
352 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
354 char name[RTE_ETH_NAME_MAX_LEN];
355 struct ipn3ke_hw *hw;
356 struct rte_eth_dev *ethdev;
359 hw = afu_dev->shared.data;
361 /* remove representor ports */
362 for (i = 0; i < hw->port_num; i++) {
363 /* representor port net_bdf_port */
364 snprintf(name, sizeof(name), "net_%s_representor_%d",
365 afu_dev->device.name, i);
367 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
371 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
374 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
376 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
379 ipn3ke_hw_uninit(hw);
384 static struct rte_afu_driver afu_ipn3ke_driver = {
385 .id_table = afu_uuid_ipn3ke_map,
386 .probe = ipn3ke_vswitch_probe,
387 .remove = ipn3ke_vswitch_remove,
390 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
392 static const char * const valid_args[] = {
393 #define IPN3KE_AFU_NAME "afu"
395 #define IPN3KE_FPGA_ACCELERATION_LIST "fpga_acc"
396 IPN3KE_FPGA_ACCELERATION_LIST,
397 #define IPN3KE_I40E_PF_LIST "i40e_pf"
403 ipn3ke_cfg_parse_acc_list(const char *afu_name,
404 const char *acc_list_name)
406 struct rte_afu_device *afu_dev;
407 struct ipn3ke_hw *hw;
408 const char *p_source;
410 char name[RTE_ETH_NAME_MAX_LEN];
412 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
415 hw = afu_dev->shared.data;
419 p_source = acc_list_name;
421 while ((*p_source == '{') || (*p_source == '|'))
424 while ((*p_source != '|') && (*p_source != '}'))
425 *p_start++ = *p_source++;
427 if (!strcmp(name, "tm") && hw->tm_hw_enable)
430 if (!strcmp(name, "flow") && hw->flow_hw_enable)
433 if (*p_source == '}')
441 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
444 struct rte_eth_dev *i40e_eth, *rpst_eth;
445 struct rte_afu_device *afu_dev;
446 struct ipn3ke_rpst *rpst;
447 struct ipn3ke_hw *hw;
448 const char *p_source;
450 char name[RTE_ETH_NAME_MAX_LEN];
455 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
458 hw = afu_dev->shared.data;
463 for (i = 0; i < hw->port_num; i++) {
464 snprintf(name, sizeof(name), "net_%s_representor_%d",
466 ret = rte_eth_dev_get_port_by_name(name, &port_id);
469 rpst_eth = &rte_eth_devices[port_id];
470 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
472 while ((*p_source == '{') || (*p_source == '|'))
475 while ((*p_source != '|') && (*p_source != '}'))
476 *p_start++ = *p_source++;
479 ret = rte_eth_dev_get_port_by_name(name, &port_id);
482 i40e_eth = &rte_eth_devices[port_id];
484 rpst->i40e_pf_eth = i40e_eth;
485 rpst->i40e_pf_eth_port_id = port_id;
487 if ((*p_source == '}') || !(*p_source))
495 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
497 struct rte_devargs *devargs;
498 struct rte_kvargs *kvlist = NULL;
499 char *afu_name = NULL;
500 char *acc_name = NULL;
501 char *pf_name = NULL;
507 devargs = dev->device.devargs;
509 kvlist = rte_kvargs_parse(devargs->args, valid_args);
511 IPN3KE_AFU_PMD_ERR("error when parsing param");
515 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
516 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
517 &rte_ifpga_get_string_arg,
519 IPN3KE_AFU_PMD_ERR("error to parse %s",
527 if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
528 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
529 &rte_ifpga_get_string_arg,
531 IPN3KE_AFU_PMD_ERR("error to parse %s",
532 IPN3KE_FPGA_ACCELERATION_LIST);
539 if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
540 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
541 &rte_ifpga_get_string_arg,
543 IPN3KE_AFU_PMD_ERR("error to parse %s",
544 IPN3KE_I40E_PF_LIST);
552 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
558 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
559 IPN3KE_I40E_PF_LIST);
564 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
566 IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
567 IPN3KE_FPGA_ACCELERATION_LIST);
571 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
572 IPN3KE_FPGA_ACCELERATION_LIST);
575 ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
580 rte_kvargs_free(kvlist);
590 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
592 struct rte_devargs *devargs;
593 struct rte_kvargs *kvlist = NULL;
594 char *afu_name = NULL;
595 struct rte_afu_device *afu_dev;
598 devargs = dev->device.devargs;
600 kvlist = rte_kvargs_parse(devargs->args, valid_args);
602 IPN3KE_AFU_PMD_ERR("error when parsing param");
606 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
607 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
608 &rte_ifpga_get_string_arg,
610 IPN3KE_AFU_PMD_ERR("error to parse %s",
613 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
616 ret = ipn3ke_vswitch_remove(afu_dev);
619 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
624 rte_kvargs_free(kvlist);
629 static struct rte_vdev_driver ipn3ke_cfg_driver = {
630 .probe = ipn3ke_cfg_probe,
631 .remove = ipn3ke_cfg_remove,
634 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
635 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
640 RTE_INIT(ipn3ke_afu_init_log)
642 ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
643 if (ipn3ke_afu_logtype >= 0)
644 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);