net/ipn3ke: add new driver
[dpdk.git] / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_logs.h"
25 #include "ipn3ke_ethdev.h"
26
27 int ipn3ke_afu_logtype;
28
29 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
30         { MAP_UUID_10G_LOW,  MAP_UUID_10G_HIGH },
31         { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
32         { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
33         { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
34         { 0, 0 /* sentinel */ },
35 };
36
37 static int
38 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
39         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
40 {
41         uint32_t i, try_cnt;
42         uint64_t indirect_value;
43         volatile void *indirect_addrs;
44         uint64_t target_addr;
45         uint64_t read_data = 0;
46
47         if (eth_group_sel != 0 && eth_group_sel != 1)
48                 return -1;
49
50         addr &= 0x3FF;
51         target_addr = addr | dev_sel << 17;
52
53         indirect_value = RCMD | target_addr << 32;
54         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
55
56         rte_delay_us(10);
57
58         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
59
60         i = 0;
61         try_cnt = 10;
62         indirect_addrs = hw->eth_group_bar[eth_group_sel] +
63                 0x18;
64         do {
65                 read_data = rte_read64(indirect_addrs);
66                 if ((read_data >> 32) == 1)
67                         break;
68                 i++;
69         } while (i <= try_cnt);
70         if (i > try_cnt)
71                 return -1;
72
73         *rd_data = rte_le_to_cpu_32(read_data);
74         return 0;
75 }
76
77 static int
78 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
79         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
80 {
81         volatile void *indirect_addrs;
82         uint64_t indirect_value;
83         uint64_t target_addr;
84
85         if (eth_group_sel != 0 && eth_group_sel != 1)
86                 return -1;
87
88         addr &= 0x3FF;
89         target_addr = addr | dev_sel << 17;
90
91         indirect_value = WCMD | target_addr << 32 | wr_data;
92         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
93
94         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
95         return 0;
96 }
97
98 static int
99 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
100         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
101 {
102         uint32_t dev_sel;
103
104         if (mac_num >= hw->port_num)
105                 return -1;
106
107         mac_num &= 0x7;
108         dev_sel = mac_num * 2 + 3;
109
110         return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
111 }
112
113 static int
114 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
115         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
116 {
117         uint32_t dev_sel;
118
119         if (mac_num >= hw->port_num)
120                 return -1;
121
122         mac_num &= 0x7;
123         dev_sel = mac_num * 2 + 3;
124
125         return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
126 }
127
128 static void
129 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
130 {
131         hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
132                         (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
133         hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
134                         (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
135         hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
136                         (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
137         hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
138                         (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
139         hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
140                         (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
141         hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
142                         (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
143         hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
144                         (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
145         hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
146                         (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
147         hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
148                         (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
149         hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
150                         (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
151         hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
152                         (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
153         hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
154                         (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
155         hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
156                         (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
157         hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
158                         (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
159         hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
160                         (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
161         hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
162                         (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
163         hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
164                         (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
165         hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
166                         (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
167         hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
168                         (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
169         hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
170                         (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
171
172         hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
173                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
174                         0, 0xFFFF);
175         hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
176                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
177                         4, 0xFFFF);
178         hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
179                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
180                         8, 0xFFFF);
181         hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
182                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
183                         0xC, 0xFFFF);
184         hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
185                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
186                         0x10, 0xFFFF);
187
188         hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
189                         IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
190                         0, 0xFFFFF);
191 }
192
193 static int
194 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
195         struct ipn3ke_hw *hw)
196 {
197         struct rte_rawdev *rawdev;
198         int ret;
199         int i;
200         uint64_t port_num, mac_type, index;
201
202         rawdev  = afu_dev->rawdev;
203
204         hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
205         hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
206         hw->afu_id.port = afu_dev->id.port;
207         hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
208         hw->f_mac_read = ipn3ke_indirect_mac_read;
209         hw->f_mac_write = ipn3ke_indirect_mac_write;
210         hw->rawdev = rawdev;
211         rawdev->dev_ops->attr_get(rawdev,
212                                 "LineSideBARIndex", &index);
213         hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
214         rawdev->dev_ops->attr_get(rawdev,
215                                 "NICSideBARIndex", &index);
216         hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
217         rawdev->dev_ops->attr_get(rawdev,
218                                 "LineSideLinkPortNum", &port_num);
219         hw->retimer.port_num = (int)port_num;
220         hw->port_num = hw->retimer.port_num;
221         rawdev->dev_ops->attr_get(rawdev,
222                                 "LineSideMACType", &mac_type);
223         hw->retimer.mac_type = (int)mac_type;
224
225         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
226                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
227                 ipn3ke_hw_cap_init(hw);
228                 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
229                         IPN3KE_READ_REG(hw, 0));
230
231                 /* Reset FPGA IP */
232                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
233                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
234         }
235
236         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
237                 /* Enable inter connect channel */
238                 for (i = 0; i < hw->port_num; i++) {
239                         /* Enable the TX path */
240                         ipn3ke_xmac_tx_enable(hw, i, 1);
241
242                         /* Disables source address override */
243                         ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
244
245                         /* Enable the RX path */
246                         ipn3ke_xmac_rx_enable(hw, i, 1);
247
248                         /* Clear all TX statistics counters */
249                         ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
250
251                         /* Clear all RX statistics counters */
252                         ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
253                 }
254         }
255
256         ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
257         if (ret)
258                 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
259                 ret);
260
261         hw->tm_hw_enable = 0;
262         hw->flow_hw_enable = 0;
263         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
264                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
265                 hw->tm_hw_enable = 1;
266                 hw->flow_hw_enable = 1;
267         }
268
269         hw->acc_tm = 0;
270         hw->acc_flow = 0;
271
272         return 0;
273 }
274
275 static void
276 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
277 {
278         int i;
279
280         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
281                 for (i = 0; i < hw->port_num; i++) {
282                         /* Disable the TX path */
283                         ipn3ke_xmac_tx_disable(hw, i, 1);
284
285                         /* Disable the RX path */
286                         ipn3ke_xmac_rx_disable(hw, i, 1);
287
288                         /* Clear all TX statistics counters */
289                         ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
290
291                         /* Clear all RX statistics counters */
292                         ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
293                 }
294         }
295 }
296
297 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
298 {
299         char name[RTE_ETH_NAME_MAX_LEN];
300         struct ipn3ke_hw *hw;
301         int i, retval;
302
303         /* check if the AFU device has been probed already */
304         /* allocate shared mcp_vswitch structure */
305         if (!afu_dev->shared.data) {
306                 snprintf(name, sizeof(name), "net_%s_hw",
307                         afu_dev->device.name);
308                 hw = rte_zmalloc_socket(name,
309                                         sizeof(struct ipn3ke_hw),
310                                         RTE_CACHE_LINE_SIZE,
311                                         afu_dev->device.numa_node);
312                 if (!hw) {
313                         IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
314                                 retval = -ENOMEM;
315                                 return -ENOMEM;
316                 }
317                 afu_dev->shared.data = hw;
318
319                 rte_spinlock_init(&afu_dev->shared.lock);
320         } else {
321                 hw = afu_dev->shared.data;
322         }
323
324         retval = ipn3ke_hw_init(afu_dev, hw);
325         if (retval)
326                 return retval;
327
328         /* probe representor ports */
329         for (i = 0; i < hw->port_num; i++) {
330                 struct ipn3ke_rpst rpst = {
331                         .port_id = i,
332                         .switch_domain_id = hw->switch_domain_id,
333                         .hw = hw
334                 };
335
336                 /* representor port net_bdf_port */
337                 snprintf(name, sizeof(name), "net_%s_representor_%d",
338                         afu_dev->device.name, i);
339
340                 retval = rte_eth_dev_create(&afu_dev->device, name,
341                         sizeof(struct ipn3ke_rpst), NULL, NULL,
342                         NULL, &rpst);
343
344                 if (retval)
345                         IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
346                                                                 name);
347         }
348
349         return 0;
350 }
351
352 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
353 {
354         char name[RTE_ETH_NAME_MAX_LEN];
355         struct ipn3ke_hw *hw;
356         struct rte_eth_dev *ethdev;
357         int i, ret;
358
359         hw = afu_dev->shared.data;
360
361         /* remove representor ports */
362         for (i = 0; i < hw->port_num; i++) {
363                 /* representor port net_bdf_port */
364                 snprintf(name, sizeof(name), "net_%s_representor_%d",
365                         afu_dev->device.name, i);
366
367                 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
368                 if (!ethdev)
369                         return -ENODEV;
370
371                 rte_eth_dev_destroy(ethdev, NULL);
372         }
373
374         ret = rte_eth_switch_domain_free(hw->switch_domain_id);
375         if (ret)
376                 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
377
378         /* hw uninit*/
379         ipn3ke_hw_uninit(hw);
380
381         return 0;
382 }
383
384 static struct rte_afu_driver afu_ipn3ke_driver = {
385         .id_table = afu_uuid_ipn3ke_map,
386         .probe = ipn3ke_vswitch_probe,
387         .remove = ipn3ke_vswitch_remove,
388 };
389
390 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
391
392 static const char * const valid_args[] = {
393 #define IPN3KE_AFU_NAME         "afu"
394                 IPN3KE_AFU_NAME,
395 #define IPN3KE_FPGA_ACCELERATION_LIST     "fpga_acc"
396                 IPN3KE_FPGA_ACCELERATION_LIST,
397 #define IPN3KE_I40E_PF_LIST     "i40e_pf"
398                 IPN3KE_I40E_PF_LIST,
399                 NULL
400 };
401
402 static int
403 ipn3ke_cfg_parse_acc_list(const char *afu_name,
404         const char *acc_list_name)
405 {
406         struct rte_afu_device *afu_dev;
407         struct ipn3ke_hw *hw;
408         const char *p_source;
409         char *p_start;
410         char name[RTE_ETH_NAME_MAX_LEN];
411
412         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
413         if (!afu_dev)
414                 return -1;
415         hw = afu_dev->shared.data;
416         if (!hw)
417                 return -1;
418
419         p_source = acc_list_name;
420         while (*p_source) {
421                 while ((*p_source == '{') || (*p_source == '|'))
422                         p_source++;
423                 p_start = name;
424                 while ((*p_source != '|') && (*p_source != '}'))
425                         *p_start++ = *p_source++;
426                 *p_start = 0;
427                 if (!strcmp(name, "tm") && hw->tm_hw_enable)
428                         hw->acc_tm = 1;
429
430                 if (!strcmp(name, "flow") && hw->flow_hw_enable)
431                         hw->acc_flow = 1;
432
433                 if (*p_source == '}')
434                         return 0;
435         }
436
437         return 0;
438 }
439
440 static int
441 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
442         const char *pf_name)
443 {
444         struct rte_eth_dev *i40e_eth, *rpst_eth;
445         struct rte_afu_device *afu_dev;
446         struct ipn3ke_rpst *rpst;
447         struct ipn3ke_hw *hw;
448         const char *p_source;
449         char *p_start;
450         char name[RTE_ETH_NAME_MAX_LEN];
451         uint16_t port_id;
452         int i;
453         int ret = -1;
454
455         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
456         if (!afu_dev)
457                 return -1;
458         hw = afu_dev->shared.data;
459         if (!hw)
460                 return -1;
461
462         p_source = pf_name;
463         for (i = 0; i < hw->port_num; i++) {
464                 snprintf(name, sizeof(name), "net_%s_representor_%d",
465                         afu_name, i);
466                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
467                 if (ret)
468                         return -1;
469                 rpst_eth = &rte_eth_devices[port_id];
470                 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
471
472                 while ((*p_source == '{') || (*p_source == '|'))
473                         p_source++;
474                 p_start = name;
475                 while ((*p_source != '|') && (*p_source != '}'))
476                         *p_start++ = *p_source++;
477                 *p_start = 0;
478
479                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
480                 if (ret)
481                         return -1;
482                 i40e_eth = &rte_eth_devices[port_id];
483
484                 rpst->i40e_pf_eth = i40e_eth;
485                 rpst->i40e_pf_eth_port_id = port_id;
486
487                 if ((*p_source == '}') || !(*p_source))
488                         break;
489         }
490
491         return 0;
492 }
493
494 static int
495 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
496 {
497         struct rte_devargs *devargs;
498         struct rte_kvargs *kvlist = NULL;
499         char *afu_name = NULL;
500         char *acc_name = NULL;
501         char *pf_name = NULL;
502         int afu_name_en = 0;
503         int acc_list_en = 0;
504         int pf_list_en = 0;
505         int ret = -1;
506
507         devargs = dev->device.devargs;
508
509         kvlist = rte_kvargs_parse(devargs->args, valid_args);
510         if (!kvlist) {
511                 IPN3KE_AFU_PMD_ERR("error when parsing param");
512                 goto end;
513         }
514
515         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
516                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
517                                        &rte_ifpga_get_string_arg,
518                                        &afu_name) < 0) {
519                         IPN3KE_AFU_PMD_ERR("error to parse %s",
520                                      IPN3KE_AFU_NAME);
521                         goto end;
522                 } else {
523                         afu_name_en = 1;
524                 }
525         }
526
527         if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
528                 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
529                                        &rte_ifpga_get_string_arg,
530                                        &acc_name) < 0) {
531                         IPN3KE_AFU_PMD_ERR("error to parse %s",
532                                      IPN3KE_FPGA_ACCELERATION_LIST);
533                         goto end;
534                 } else {
535                         acc_list_en = 1;
536                 }
537         }
538
539         if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
540                 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
541                                        &rte_ifpga_get_string_arg,
542                                        &pf_name) < 0) {
543                         IPN3KE_AFU_PMD_ERR("error to parse %s",
544                                      IPN3KE_I40E_PF_LIST);
545                         goto end;
546                 } else {
547                         pf_list_en = 1;
548                 }
549         }
550
551         if (!afu_name_en) {
552                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
553                           IPN3KE_AFU_NAME);
554                 goto end;
555         }
556
557         if (!pf_list_en) {
558                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
559                           IPN3KE_I40E_PF_LIST);
560                 goto end;
561         }
562
563         if (acc_list_en) {
564                 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
565                 if (ret) {
566                         IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
567                           IPN3KE_FPGA_ACCELERATION_LIST);
568                         goto end;
569                 }
570         } else {
571                 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
572                           IPN3KE_FPGA_ACCELERATION_LIST);
573         }
574
575         ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
576         if (ret)
577                 goto end;
578 end:
579         if (kvlist)
580                 rte_kvargs_free(kvlist);
581         if (afu_name)
582                 free(afu_name);
583         if (acc_name)
584                 free(acc_name);
585
586         return ret;
587 }
588
589 static int
590 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
591 {
592         struct rte_devargs *devargs;
593         struct rte_kvargs *kvlist = NULL;
594         char *afu_name = NULL;
595         struct rte_afu_device *afu_dev;
596         int ret = -1;
597
598         devargs = dev->device.devargs;
599
600         kvlist = rte_kvargs_parse(devargs->args, valid_args);
601         if (!kvlist) {
602                 IPN3KE_AFU_PMD_ERR("error when parsing param");
603                 goto end;
604         }
605
606         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
607                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
608                                        &rte_ifpga_get_string_arg,
609                                        &afu_name) < 0) {
610                         IPN3KE_AFU_PMD_ERR("error to parse %s",
611                                      IPN3KE_AFU_NAME);
612                 } else {
613                         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
614                         if (!afu_dev)
615                                 goto end;
616                         ret = ipn3ke_vswitch_remove(afu_dev);
617                 }
618         } else {
619                 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
620         }
621
622 end:
623         if (kvlist)
624                 rte_kvargs_free(kvlist);
625
626         return ret;
627 }
628
629 static struct rte_vdev_driver ipn3ke_cfg_driver = {
630         .probe = ipn3ke_cfg_probe,
631         .remove = ipn3ke_cfg_remove,
632 };
633
634 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
635 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
636         "afu=<string> "
637         "fpga_acc=<string>"
638         "i40e_pf=<string>");
639
640 RTE_INIT(ipn3ke_afu_init_log)
641 {
642         ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
643         if (ipn3ke_afu_logtype >= 0)
644                 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
645 }