1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
28 int ipn3ke_afu_logtype;
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
32 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35 { 0, 0 /* sentinel */ },
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
43 uint64_t indirect_value;
44 volatile void *indirect_addrs;
46 uint64_t read_data = 0;
48 if (eth_group_sel != 0 && eth_group_sel != 1)
52 target_addr = addr | dev_sel << 17;
54 indirect_value = RCMD | target_addr << 32;
55 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
59 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
63 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
66 read_data = rte_read64(indirect_addrs);
67 if ((read_data >> 32) == 1)
70 } while (i <= try_cnt);
74 *rd_data = rte_le_to_cpu_32(read_data);
79 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
80 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
82 volatile void *indirect_addrs;
83 uint64_t indirect_value;
86 if (eth_group_sel != 0 && eth_group_sel != 1)
90 target_addr = addr | dev_sel << 17;
92 indirect_value = WCMD | target_addr << 32 | wr_data;
93 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
95 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
100 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
101 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
105 if (mac_num >= hw->port_num)
109 dev_sel = mac_num * 2 + 3;
111 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
115 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
116 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
120 if (mac_num >= hw->port_num)
124 dev_sel = mac_num * 2 + 3;
126 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
130 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
132 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
133 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
134 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
136 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
138 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
139 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
140 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
141 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
142 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
143 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
144 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
145 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
146 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
147 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
148 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
149 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
150 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
151 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
152 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
153 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
154 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
155 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
156 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
157 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
158 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
159 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
160 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
161 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
162 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
163 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
164 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
165 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
166 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
167 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
168 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
169 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
170 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
171 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
173 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
174 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
176 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
177 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
179 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
180 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
182 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
183 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
185 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
186 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
189 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
190 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
195 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
196 struct ipn3ke_hw *hw)
198 struct rte_rawdev *rawdev;
201 uint64_t port_num, mac_type, index;
203 rawdev = afu_dev->rawdev;
205 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
206 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
207 hw->afu_id.port = afu_dev->id.port;
208 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
209 hw->f_mac_read = ipn3ke_indirect_mac_read;
210 hw->f_mac_write = ipn3ke_indirect_mac_write;
212 rawdev->dev_ops->attr_get(rawdev,
213 "LineSideBARIndex", &index);
214 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
215 rawdev->dev_ops->attr_get(rawdev,
216 "NICSideBARIndex", &index);
217 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
218 rawdev->dev_ops->attr_get(rawdev,
219 "LineSideLinkPortNum", &port_num);
220 hw->retimer.port_num = (int)port_num;
221 hw->port_num = hw->retimer.port_num;
222 rawdev->dev_ops->attr_get(rawdev,
223 "LineSideMACType", &mac_type);
224 hw->retimer.mac_type = (int)mac_type;
226 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
227 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
228 ipn3ke_hw_cap_init(hw);
229 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
230 IPN3KE_READ_REG(hw, 0));
233 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
234 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
237 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
238 /* Enable inter connect channel */
239 for (i = 0; i < hw->port_num; i++) {
240 /* Enable the TX path */
241 ipn3ke_xmac_tx_enable(hw, i, 1);
243 /* Disables source address override */
244 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
246 /* Enable the RX path */
247 ipn3ke_xmac_rx_enable(hw, i, 1);
249 /* Clear all TX statistics counters */
250 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
252 /* Clear all RX statistics counters */
253 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
257 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
259 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
262 hw->tm_hw_enable = 0;
263 hw->flow_hw_enable = 0;
264 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
265 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
266 ret = ipn3ke_hw_tm_init(hw);
269 hw->tm_hw_enable = 1;
271 ret = ipn3ke_flow_init(hw);
274 hw->flow_hw_enable = 1;
284 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
288 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
289 for (i = 0; i < hw->port_num; i++) {
290 /* Disable the TX path */
291 ipn3ke_xmac_tx_disable(hw, i, 1);
293 /* Disable the RX path */
294 ipn3ke_xmac_rx_disable(hw, i, 1);
296 /* Clear all TX statistics counters */
297 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
299 /* Clear all RX statistics counters */
300 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
305 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
307 char name[RTE_ETH_NAME_MAX_LEN];
308 struct ipn3ke_hw *hw;
311 /* check if the AFU device has been probed already */
312 /* allocate shared mcp_vswitch structure */
313 if (!afu_dev->shared.data) {
314 snprintf(name, sizeof(name), "net_%s_hw",
315 afu_dev->device.name);
316 hw = rte_zmalloc_socket(name,
317 sizeof(struct ipn3ke_hw),
319 afu_dev->device.numa_node);
321 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
325 afu_dev->shared.data = hw;
327 rte_spinlock_init(&afu_dev->shared.lock);
329 hw = afu_dev->shared.data;
332 retval = ipn3ke_hw_init(afu_dev, hw);
336 /* probe representor ports */
337 for (i = 0; i < hw->port_num; i++) {
338 struct ipn3ke_rpst rpst = {
340 .switch_domain_id = hw->switch_domain_id,
344 /* representor port net_bdf_port */
345 snprintf(name, sizeof(name), "net_%s_representor_%d",
346 afu_dev->device.name, i);
348 retval = rte_eth_dev_create(&afu_dev->device, name,
349 sizeof(struct ipn3ke_rpst), NULL, NULL,
350 ipn3ke_rpst_init, &rpst);
353 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
360 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
362 char name[RTE_ETH_NAME_MAX_LEN];
363 struct ipn3ke_hw *hw;
364 struct rte_eth_dev *ethdev;
367 hw = afu_dev->shared.data;
369 /* remove representor ports */
370 for (i = 0; i < hw->port_num; i++) {
371 /* representor port net_bdf_port */
372 snprintf(name, sizeof(name), "net_%s_representor_%d",
373 afu_dev->device.name, i);
375 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
379 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
382 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
384 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
387 ipn3ke_hw_uninit(hw);
392 static struct rte_afu_driver afu_ipn3ke_driver = {
393 .id_table = afu_uuid_ipn3ke_map,
394 .probe = ipn3ke_vswitch_probe,
395 .remove = ipn3ke_vswitch_remove,
398 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
400 static const char * const valid_args[] = {
401 #define IPN3KE_AFU_NAME "afu"
403 #define IPN3KE_FPGA_ACCELERATION_LIST "fpga_acc"
404 IPN3KE_FPGA_ACCELERATION_LIST,
405 #define IPN3KE_I40E_PF_LIST "i40e_pf"
411 ipn3ke_cfg_parse_acc_list(const char *afu_name,
412 const char *acc_list_name)
414 struct rte_afu_device *afu_dev;
415 struct ipn3ke_hw *hw;
416 const char *p_source;
418 char name[RTE_ETH_NAME_MAX_LEN];
420 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
423 hw = afu_dev->shared.data;
427 p_source = acc_list_name;
429 while ((*p_source == '{') || (*p_source == '|'))
432 while ((*p_source != '|') && (*p_source != '}'))
433 *p_start++ = *p_source++;
435 if (!strcmp(name, "tm") && hw->tm_hw_enable)
438 if (!strcmp(name, "flow") && hw->flow_hw_enable)
441 if (*p_source == '}')
449 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
452 struct rte_eth_dev *i40e_eth, *rpst_eth;
453 struct rte_afu_device *afu_dev;
454 struct ipn3ke_rpst *rpst;
455 struct ipn3ke_hw *hw;
456 const char *p_source;
458 char name[RTE_ETH_NAME_MAX_LEN];
463 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
466 hw = afu_dev->shared.data;
471 for (i = 0; i < hw->port_num; i++) {
472 snprintf(name, sizeof(name), "net_%s_representor_%d",
474 ret = rte_eth_dev_get_port_by_name(name, &port_id);
477 rpst_eth = &rte_eth_devices[port_id];
478 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
480 while ((*p_source == '{') || (*p_source == '|'))
483 while ((*p_source != '|') && (*p_source != '}'))
484 *p_start++ = *p_source++;
487 ret = rte_eth_dev_get_port_by_name(name, &port_id);
490 i40e_eth = &rte_eth_devices[port_id];
492 rpst->i40e_pf_eth = i40e_eth;
493 rpst->i40e_pf_eth_port_id = port_id;
495 if ((*p_source == '}') || !(*p_source))
503 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
505 struct rte_devargs *devargs;
506 struct rte_kvargs *kvlist = NULL;
507 char *afu_name = NULL;
508 char *acc_name = NULL;
509 char *pf_name = NULL;
515 devargs = dev->device.devargs;
517 kvlist = rte_kvargs_parse(devargs->args, valid_args);
519 IPN3KE_AFU_PMD_ERR("error when parsing param");
523 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
524 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
525 &rte_ifpga_get_string_arg,
527 IPN3KE_AFU_PMD_ERR("error to parse %s",
535 if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
536 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
537 &rte_ifpga_get_string_arg,
539 IPN3KE_AFU_PMD_ERR("error to parse %s",
540 IPN3KE_FPGA_ACCELERATION_LIST);
547 if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
548 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
549 &rte_ifpga_get_string_arg,
551 IPN3KE_AFU_PMD_ERR("error to parse %s",
552 IPN3KE_I40E_PF_LIST);
560 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
566 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
567 IPN3KE_I40E_PF_LIST);
572 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
574 IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
575 IPN3KE_FPGA_ACCELERATION_LIST);
579 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
580 IPN3KE_FPGA_ACCELERATION_LIST);
583 ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
588 rte_kvargs_free(kvlist);
598 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
600 struct rte_devargs *devargs;
601 struct rte_kvargs *kvlist = NULL;
602 char *afu_name = NULL;
603 struct rte_afu_device *afu_dev;
606 devargs = dev->device.devargs;
608 kvlist = rte_kvargs_parse(devargs->args, valid_args);
610 IPN3KE_AFU_PMD_ERR("error when parsing param");
614 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
615 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
616 &rte_ifpga_get_string_arg,
618 IPN3KE_AFU_PMD_ERR("error to parse %s",
621 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
624 ret = ipn3ke_vswitch_remove(afu_dev);
627 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
632 rte_kvargs_free(kvlist);
637 static struct rte_vdev_driver ipn3ke_cfg_driver = {
638 .probe = ipn3ke_cfg_probe,
639 .remove = ipn3ke_cfg_remove,
642 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
643 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
648 RTE_INIT(ipn3ke_afu_init_log)
650 ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
651 if (ipn3ke_afu_logtype >= 0)
652 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);