devtools: pass custom options to checkpatch
[dpdk.git] / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
27
28 int ipn3ke_afu_logtype;
29
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31         { MAP_UUID_10G_LOW,  MAP_UUID_10G_HIGH },
32         { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33         { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34         { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35         { 0, 0 /* sentinel */ },
36 };
37
38 static int
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
41 {
42         uint32_t i, try_cnt;
43         uint64_t indirect_value;
44         volatile void *indirect_addrs;
45         uint64_t target_addr;
46         uint64_t read_data = 0;
47
48         if (eth_group_sel != 0 && eth_group_sel != 1)
49                 return -1;
50
51         addr &= 0x3FF;
52         target_addr = addr | dev_sel << 17;
53
54         indirect_value = RCMD | target_addr << 32;
55         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
56
57         rte_delay_us(10);
58
59         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
60
61         i = 0;
62         try_cnt = 10;
63         indirect_addrs = hw->eth_group_bar[eth_group_sel] +
64                 0x18;
65         do {
66                 read_data = rte_read64(indirect_addrs);
67                 if ((read_data >> 32) == 1)
68                         break;
69                 i++;
70         } while (i <= try_cnt);
71         if (i > try_cnt)
72                 return -1;
73
74         *rd_data = rte_le_to_cpu_32(read_data);
75         return 0;
76 }
77
78 static int
79 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
80         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
81 {
82         volatile void *indirect_addrs;
83         uint64_t indirect_value;
84         uint64_t target_addr;
85
86         if (eth_group_sel != 0 && eth_group_sel != 1)
87                 return -1;
88
89         addr &= 0x3FF;
90         target_addr = addr | dev_sel << 17;
91
92         indirect_value = WCMD | target_addr << 32 | wr_data;
93         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
94
95         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
96         return 0;
97 }
98
99 static int
100 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
101         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
102 {
103         uint32_t dev_sel;
104
105         if (mac_num >= hw->port_num)
106                 return -1;
107
108         mac_num &= 0x7;
109         dev_sel = mac_num * 2 + 3;
110
111         return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
112 }
113
114 static int
115 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
116         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
117 {
118         uint32_t dev_sel;
119
120         if (mac_num >= hw->port_num)
121                 return -1;
122
123         mac_num &= 0x7;
124         dev_sel = mac_num * 2 + 3;
125
126         return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
127 }
128
129 static void
130 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
131 {
132         hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
133                         (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
134         hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135                         (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
136         hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137                         (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
138         hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
139                         (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
140         hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
141                         (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
142         hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
143                         (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
144         hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
145                         (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
146         hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
147                         (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
148         hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
149                         (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
150         hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
151                         (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
152         hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
153                         (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
154         hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
155                         (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
156         hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
157                         (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
158         hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
159                         (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
160         hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
161                         (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
162         hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
163                         (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
164         hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
165                         (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
166         hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
167                         (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
168         hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
169                         (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
170         hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
171                         (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
172
173         hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
174                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
175                         0, 0xFFFF);
176         hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
177                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
178                         4, 0xFFFF);
179         hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
180                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
181                         8, 0xFFFF);
182         hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
183                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
184                         0xC, 0xFFFF);
185         hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
186                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
187                         0x10, 0xFFFF);
188
189         hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
190                         IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
191                         0, 0xFFFFF);
192 }
193
194 static int
195 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
196 {
197         uint32_t timeout = 10000;
198         while (timeout > 0) {
199                 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
200                         == IPN3KE_VBNG_INIT_DONE)
201                         break;
202                 rte_delay_us(1000);
203                 timeout--;
204         }
205
206         if (!timeout) {
207                 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
208                 return -1;
209         }
210
211         return 0;
212 }
213
214 static int
215 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
216         struct ipn3ke_hw *hw)
217 {
218         struct rte_rawdev *rawdev;
219         int ret;
220         int i;
221         uint64_t port_num, mac_type, index;
222
223         rawdev  = afu_dev->rawdev;
224
225         hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
226         hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
227         hw->afu_id.port = afu_dev->id.port;
228         hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
229         hw->f_mac_read = ipn3ke_indirect_mac_read;
230         hw->f_mac_write = ipn3ke_indirect_mac_write;
231         hw->rawdev = rawdev;
232         rawdev->dev_ops->attr_get(rawdev,
233                                 "LineSideBARIndex", &index);
234         hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
235         rawdev->dev_ops->attr_get(rawdev,
236                                 "NICSideBARIndex", &index);
237         hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
238         rawdev->dev_ops->attr_get(rawdev,
239                                 "LineSideLinkPortNum", &port_num);
240         hw->retimer.port_num = (int)port_num;
241         hw->port_num = hw->retimer.port_num;
242         rawdev->dev_ops->attr_get(rawdev,
243                                 "LineSideMACType", &mac_type);
244         hw->retimer.mac_type = (int)mac_type;
245
246         IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", IPN3KE_READ_REG(hw, 0));
247
248         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
249                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
250                 /* After power on, wait until init done */
251                 if (ipn3ke_vbng_init_done(hw))
252                         return -1;
253
254                 ipn3ke_hw_cap_init(hw);
255
256                 /* Reset vBNG IP */
257                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
258                 rte_delay_us(10);
259                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
260
261                 /* After reset, wait until init done */
262                 if (ipn3ke_vbng_init_done(hw))
263                         return -1;
264         }
265
266         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
267                 /* Enable inter connect channel */
268                 for (i = 0; i < hw->port_num; i++) {
269                         /* Enable the TX path */
270                         ipn3ke_xmac_tx_enable(hw, i, 1);
271
272                         /* Disables source address override */
273                         ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
274
275                         /* Enable the RX path */
276                         ipn3ke_xmac_rx_enable(hw, i, 1);
277
278                         /* Clear all TX statistics counters */
279                         ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
280
281                         /* Clear all RX statistics counters */
282                         ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
283                 }
284         }
285
286         ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
287         if (ret)
288                 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
289                 ret);
290
291         hw->tm_hw_enable = 0;
292         hw->flow_hw_enable = 0;
293         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
294                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
295                 ret = ipn3ke_hw_tm_init(hw);
296                 if (ret)
297                         return ret;
298                 hw->tm_hw_enable = 1;
299
300                 ret = ipn3ke_flow_init(hw);
301                 if (ret)
302                         return ret;
303                 hw->flow_hw_enable = 1;
304         }
305
306         hw->acc_tm = 0;
307         hw->acc_flow = 0;
308
309         return 0;
310 }
311
312 static void
313 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
314 {
315         int i;
316
317         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
318                 for (i = 0; i < hw->port_num; i++) {
319                         /* Disable the TX path */
320                         ipn3ke_xmac_tx_disable(hw, i, 1);
321
322                         /* Disable the RX path */
323                         ipn3ke_xmac_rx_disable(hw, i, 1);
324
325                         /* Clear all TX statistics counters */
326                         ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
327
328                         /* Clear all RX statistics counters */
329                         ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
330                 }
331         }
332 }
333
334 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
335 {
336         char name[RTE_ETH_NAME_MAX_LEN];
337         struct ipn3ke_hw *hw;
338         int i, retval;
339
340         /* check if the AFU device has been probed already */
341         /* allocate shared mcp_vswitch structure */
342         if (!afu_dev->shared.data) {
343                 snprintf(name, sizeof(name), "net_%s_hw",
344                         afu_dev->device.name);
345                 hw = rte_zmalloc_socket(name,
346                                         sizeof(struct ipn3ke_hw),
347                                         RTE_CACHE_LINE_SIZE,
348                                         afu_dev->device.numa_node);
349                 if (!hw) {
350                         IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
351                                 retval = -ENOMEM;
352                                 return -ENOMEM;
353                 }
354                 afu_dev->shared.data = hw;
355
356                 rte_spinlock_init(&afu_dev->shared.lock);
357         } else {
358                 hw = afu_dev->shared.data;
359         }
360
361         retval = ipn3ke_hw_init(afu_dev, hw);
362         if (retval)
363                 return retval;
364
365         /* probe representor ports */
366         for (i = 0; i < hw->port_num; i++) {
367                 struct ipn3ke_rpst rpst = {
368                         .port_id = i,
369                         .switch_domain_id = hw->switch_domain_id,
370                         .hw = hw
371                 };
372
373                 /* representor port net_bdf_port */
374                 snprintf(name, sizeof(name), "net_%s_representor_%d",
375                         afu_dev->device.name, i);
376
377                 retval = rte_eth_dev_create(&afu_dev->device, name,
378                         sizeof(struct ipn3ke_rpst), NULL, NULL,
379                         ipn3ke_rpst_init, &rpst);
380
381                 if (retval)
382                         IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
383                                                                 name);
384         }
385
386         return 0;
387 }
388
389 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
390 {
391         char name[RTE_ETH_NAME_MAX_LEN];
392         struct ipn3ke_hw *hw;
393         struct rte_eth_dev *ethdev;
394         int i, ret;
395
396         hw = afu_dev->shared.data;
397
398         /* remove representor ports */
399         for (i = 0; i < hw->port_num; i++) {
400                 /* representor port net_bdf_port */
401                 snprintf(name, sizeof(name), "net_%s_representor_%d",
402                         afu_dev->device.name, i);
403
404                 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
405                 if (!ethdev)
406                         return -ENODEV;
407
408                 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
409         }
410
411         ret = rte_eth_switch_domain_free(hw->switch_domain_id);
412         if (ret)
413                 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
414
415         /* hw uninit*/
416         ipn3ke_hw_uninit(hw);
417
418         return 0;
419 }
420
421 static struct rte_afu_driver afu_ipn3ke_driver = {
422         .id_table = afu_uuid_ipn3ke_map,
423         .probe = ipn3ke_vswitch_probe,
424         .remove = ipn3ke_vswitch_remove,
425 };
426
427 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
428
429 static const char * const valid_args[] = {
430 #define IPN3KE_AFU_NAME         "afu"
431                 IPN3KE_AFU_NAME,
432 #define IPN3KE_FPGA_ACCELERATION_LIST     "fpga_acc"
433                 IPN3KE_FPGA_ACCELERATION_LIST,
434 #define IPN3KE_I40E_PF_LIST     "i40e_pf"
435                 IPN3KE_I40E_PF_LIST,
436                 NULL
437 };
438
439 static int
440 ipn3ke_cfg_parse_acc_list(const char *afu_name,
441         const char *acc_list_name)
442 {
443         struct rte_afu_device *afu_dev;
444         struct ipn3ke_hw *hw;
445         const char *p_source;
446         char *p_start;
447         char name[RTE_ETH_NAME_MAX_LEN];
448
449         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
450         if (!afu_dev)
451                 return -1;
452         hw = afu_dev->shared.data;
453         if (!hw)
454                 return -1;
455
456         p_source = acc_list_name;
457         while (*p_source) {
458                 while ((*p_source == '{') || (*p_source == '|'))
459                         p_source++;
460                 p_start = name;
461                 while ((*p_source != '|') && (*p_source != '}'))
462                         *p_start++ = *p_source++;
463                 *p_start = 0;
464                 if (!strcmp(name, "tm") && hw->tm_hw_enable)
465                         hw->acc_tm = 1;
466
467                 if (!strcmp(name, "flow") && hw->flow_hw_enable)
468                         hw->acc_flow = 1;
469
470                 if (*p_source == '}')
471                         return 0;
472         }
473
474         return 0;
475 }
476
477 static int
478 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
479         const char *pf_name)
480 {
481         struct rte_eth_dev *i40e_eth, *rpst_eth;
482         struct rte_afu_device *afu_dev;
483         struct ipn3ke_rpst *rpst;
484         struct ipn3ke_hw *hw;
485         const char *p_source;
486         char *p_start;
487         char name[RTE_ETH_NAME_MAX_LEN];
488         uint16_t port_id;
489         int i;
490         int ret = -1;
491
492         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
493         if (!afu_dev)
494                 return -1;
495         hw = afu_dev->shared.data;
496         if (!hw)
497                 return -1;
498
499         p_source = pf_name;
500         for (i = 0; i < hw->port_num; i++) {
501                 snprintf(name, sizeof(name), "net_%s_representor_%d",
502                         afu_name, i);
503                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
504                 if (ret)
505                         return -1;
506                 rpst_eth = &rte_eth_devices[port_id];
507                 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
508
509                 while ((*p_source == '{') || (*p_source == '|'))
510                         p_source++;
511                 p_start = name;
512                 while ((*p_source != '|') && (*p_source != '}'))
513                         *p_start++ = *p_source++;
514                 *p_start = 0;
515
516                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
517                 if (ret)
518                         return -1;
519                 i40e_eth = &rte_eth_devices[port_id];
520
521                 rpst->i40e_pf_eth = i40e_eth;
522                 rpst->i40e_pf_eth_port_id = port_id;
523
524                 if ((*p_source == '}') || !(*p_source))
525                         break;
526         }
527
528         return 0;
529 }
530
531 static int
532 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
533 {
534         struct rte_devargs *devargs;
535         struct rte_kvargs *kvlist = NULL;
536         char *afu_name = NULL;
537         char *acc_name = NULL;
538         char *pf_name = NULL;
539         int afu_name_en = 0;
540         int acc_list_en = 0;
541         int pf_list_en = 0;
542         int ret = -1;
543
544         devargs = dev->device.devargs;
545
546         kvlist = rte_kvargs_parse(devargs->args, valid_args);
547         if (!kvlist) {
548                 IPN3KE_AFU_PMD_ERR("error when parsing param");
549                 goto end;
550         }
551
552         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
553                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
554                                        &rte_ifpga_get_string_arg,
555                                        &afu_name) < 0) {
556                         IPN3KE_AFU_PMD_ERR("error to parse %s",
557                                      IPN3KE_AFU_NAME);
558                         goto end;
559                 } else {
560                         afu_name_en = 1;
561                 }
562         }
563
564         if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
565                 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
566                                        &rte_ifpga_get_string_arg,
567                                        &acc_name) < 0) {
568                         IPN3KE_AFU_PMD_ERR("error to parse %s",
569                                      IPN3KE_FPGA_ACCELERATION_LIST);
570                         goto end;
571                 } else {
572                         acc_list_en = 1;
573                 }
574         }
575
576         if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
577                 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
578                                        &rte_ifpga_get_string_arg,
579                                        &pf_name) < 0) {
580                         IPN3KE_AFU_PMD_ERR("error to parse %s",
581                                      IPN3KE_I40E_PF_LIST);
582                         goto end;
583                 } else {
584                         pf_list_en = 1;
585                 }
586         }
587
588         if (!afu_name_en) {
589                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
590                           IPN3KE_AFU_NAME);
591                 goto end;
592         }
593
594         if (!pf_list_en) {
595                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
596                           IPN3KE_I40E_PF_LIST);
597                 goto end;
598         }
599
600         if (acc_list_en) {
601                 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
602                 if (ret) {
603                         IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
604                           IPN3KE_FPGA_ACCELERATION_LIST);
605                         goto end;
606                 }
607         } else {
608                 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
609                           IPN3KE_FPGA_ACCELERATION_LIST);
610         }
611
612         ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
613         if (ret)
614                 goto end;
615 end:
616         if (kvlist)
617                 rte_kvargs_free(kvlist);
618         if (afu_name)
619                 free(afu_name);
620         if (acc_name)
621                 free(acc_name);
622
623         return ret;
624 }
625
626 static int
627 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
628 {
629         struct rte_devargs *devargs;
630         struct rte_kvargs *kvlist = NULL;
631         char *afu_name = NULL;
632         struct rte_afu_device *afu_dev;
633         int ret = -1;
634
635         devargs = dev->device.devargs;
636
637         kvlist = rte_kvargs_parse(devargs->args, valid_args);
638         if (!kvlist) {
639                 IPN3KE_AFU_PMD_ERR("error when parsing param");
640                 goto end;
641         }
642
643         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
644                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
645                                        &rte_ifpga_get_string_arg,
646                                        &afu_name) < 0) {
647                         IPN3KE_AFU_PMD_ERR("error to parse %s",
648                                      IPN3KE_AFU_NAME);
649                 } else {
650                         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
651                         if (!afu_dev)
652                                 goto end;
653                         ret = ipn3ke_vswitch_remove(afu_dev);
654                 }
655         } else {
656                 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
657         }
658
659 end:
660         if (kvlist)
661                 rte_kvargs_free(kvlist);
662
663         return ret;
664 }
665
666 static struct rte_vdev_driver ipn3ke_cfg_driver = {
667         .probe = ipn3ke_cfg_probe,
668         .remove = ipn3ke_cfg_remove,
669 };
670
671 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
672 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
673         "afu=<string> "
674         "fpga_acc=<string>"
675         "i40e_pf=<string>");
676
677 RTE_INIT(ipn3ke_afu_init_log)
678 {
679         ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
680         if (ipn3ke_afu_logtype >= 0)
681                 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
682 }