net/ipn3ke: add stats register and clearing
[dpdk.git] / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
27
28 int ipn3ke_afu_logtype;
29
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31         { MAP_UUID_10G_LOW,  MAP_UUID_10G_HIGH },
32         { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33         { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34         { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35         { 0, 0 /* sentinel */ },
36 };
37
38 static int
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
41 {
42         uint32_t i, try_cnt;
43         uint64_t indirect_value;
44         volatile void *indirect_addrs;
45         uint64_t target_addr;
46         uint64_t read_data = 0;
47
48         if (eth_group_sel != 0 && eth_group_sel != 1)
49                 return -1;
50
51         target_addr = addr | dev_sel << 17;
52
53         indirect_value = RCMD | target_addr << 32;
54         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
55
56         rte_delay_us(10);
57
58         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
59
60         i = 0;
61         try_cnt = 10;
62         indirect_addrs = hw->eth_group_bar[eth_group_sel] +
63                 0x18;
64         do {
65                 read_data = rte_read64(indirect_addrs);
66                 if ((read_data >> 32) == 1)
67                         break;
68                 i++;
69         } while (i <= try_cnt);
70         if (i > try_cnt)
71                 return -1;
72
73         *rd_data = rte_le_to_cpu_32(read_data);
74         return 0;
75 }
76
77 static int
78 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
79         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
80 {
81         volatile void *indirect_addrs;
82         uint64_t indirect_value;
83         uint64_t target_addr;
84
85         if (eth_group_sel != 0 && eth_group_sel != 1)
86                 return -1;
87
88         target_addr = addr | dev_sel << 17;
89
90         indirect_value = WCMD | target_addr << 32 | wr_data;
91         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
92
93         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
94         return 0;
95 }
96
97 static int
98 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
99         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
100 {
101         uint32_t dev_sel;
102
103         if (mac_num >= hw->port_num)
104                 return -1;
105
106         mac_num &= 0x7;
107         dev_sel = mac_num * 2 + 3;
108
109         return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
110 }
111
112 static int
113 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
114         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
115 {
116         uint32_t dev_sel;
117
118         if (mac_num >= hw->port_num)
119                 return -1;
120
121         mac_num &= 0x7;
122         dev_sel = mac_num * 2 + 3;
123
124         return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
125 }
126
127 static void
128 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
129 {
130         hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
131                         (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
132         hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
133                         (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
134         hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135                         (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
136         hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137                         (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
138         hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
139                         (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
140         hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
141                         (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
142         hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
143                         (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
144         hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
145                         (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
146         hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
147                         (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
148         hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
149                         (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
150         hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
151                         (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
152         hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
153                         (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
154         hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
155                         (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
156         hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
157                         (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
158         hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
159                         (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
160         hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
161                         (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
162         hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
163                         (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
164         hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
165                         (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
166         hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
167                         (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
168         hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
169                         (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
170
171         hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
172                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
173                         0, 0xFFFF);
174         hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
175                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
176                         4, 0xFFFF);
177         hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
178                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
179                         8, 0xFFFF);
180         hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
181                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
182                         0xC, 0xFFFF);
183         hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
184                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
185                         0x10, 0xFFFF);
186
187         hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
188                         IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
189                         0, 0xFFFFF);
190 }
191
192 static int
193 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
194 {
195         uint32_t timeout = 10000;
196         while (timeout > 0) {
197                 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
198                         == IPN3KE_VBNG_INIT_DONE)
199                         break;
200                 rte_delay_us(1000);
201                 timeout--;
202         }
203
204         if (!timeout) {
205                 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
206                 return -1;
207         }
208
209         return 0;
210 }
211
212 static int
213 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
214         struct ipn3ke_hw *hw)
215 {
216         struct rte_rawdev *rawdev;
217         int ret;
218         int i;
219         uint64_t port_num, mac_type, index;
220
221         rawdev  = afu_dev->rawdev;
222
223         hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
224         hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
225         hw->afu_id.port = afu_dev->id.port;
226         hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
227         hw->f_mac_read = ipn3ke_indirect_mac_read;
228         hw->f_mac_write = ipn3ke_indirect_mac_write;
229         hw->rawdev = rawdev;
230         rawdev->dev_ops->attr_get(rawdev,
231                                 "LineSideBARIndex", &index);
232         hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
233         rawdev->dev_ops->attr_get(rawdev,
234                                 "NICSideBARIndex", &index);
235         hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
236         rawdev->dev_ops->attr_get(rawdev,
237                                 "LineSideLinkPortNum", &port_num);
238         hw->retimer.port_num = (int)port_num;
239         hw->port_num = hw->retimer.port_num;
240         rawdev->dev_ops->attr_get(rawdev,
241                                 "LineSideMACType", &mac_type);
242         hw->retimer.mac_type = (int)mac_type;
243
244         IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", IPN3KE_READ_REG(hw, 0));
245
246         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
247                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
248                 /* After power on, wait until init done */
249                 if (ipn3ke_vbng_init_done(hw))
250                         return -1;
251
252                 ipn3ke_hw_cap_init(hw);
253
254                 /* Reset vBNG IP */
255                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
256                 rte_delay_us(10);
257                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
258
259                 /* After reset, wait until init done */
260                 if (ipn3ke_vbng_init_done(hw))
261                         return -1;
262         }
263
264         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
265                 /* Enable inter connect channel */
266                 for (i = 0; i < hw->port_num; i++) {
267                         /* Enable the TX path */
268                         ipn3ke_xmac_tx_enable(hw, i, 1);
269
270                         /* Disables source address override */
271                         ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
272
273                         /* Enable the RX path */
274                         ipn3ke_xmac_rx_enable(hw, i, 1);
275
276                         /* Clear NIC side TX statistics counters */
277                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
278
279                         /* Clear NIC side RX statistics counters */
280                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
281
282                         /* Clear line side TX statistics counters */
283                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
284
285                         /* Clear line RX statistics counters */
286                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
287                 }
288         } else if (hw->retimer.mac_type ==
289                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
290                 /* Enable inter connect channel */
291                 for (i = 0; i < hw->port_num; i++) {
292                         /* Clear NIC side TX statistics counters */
293                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
294
295                         /* Clear NIC side RX statistics counters */
296                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
297
298                         /* Clear line side TX statistics counters */
299                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
300
301                         /* Clear line side RX statistics counters */
302                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
303                 }
304         }
305
306         ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
307         if (ret)
308                 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
309                 ret);
310
311         hw->tm_hw_enable = 0;
312         hw->flow_hw_enable = 0;
313         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
314                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
315                 ret = ipn3ke_hw_tm_init(hw);
316                 if (ret)
317                         return ret;
318                 hw->tm_hw_enable = 1;
319
320                 ret = ipn3ke_flow_init(hw);
321                 if (ret)
322                         return ret;
323                 hw->flow_hw_enable = 1;
324         }
325
326         hw->acc_tm = 0;
327         hw->acc_flow = 0;
328
329         return 0;
330 }
331
332 static void
333 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
334 {
335         int i;
336
337         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
338                 for (i = 0; i < hw->port_num; i++) {
339                         /* Disable the TX path */
340                         ipn3ke_xmac_tx_disable(hw, i, 1);
341
342                         /* Disable the RX path */
343                         ipn3ke_xmac_rx_disable(hw, i, 1);
344
345                         /* Clear NIC side TX statistics counters */
346                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
347
348                         /* Clear NIC side RX statistics counters */
349                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
350
351                         /* Clear line side TX statistics counters */
352                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
353
354                         /* Clear line side RX statistics counters */
355                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
356                 }
357         } else if (hw->retimer.mac_type ==
358                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
359                 for (i = 0; i < hw->port_num; i++) {
360                         /* Clear NIC side TX statistics counters */
361                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
362
363                         /* Clear NIC side RX statistics counters */
364                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
365
366                         /* Clear line side TX statistics counters */
367                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
368
369                         /* Clear line side RX statistics counters */
370                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
371                 }
372         }
373 }
374
375 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
376 {
377         char name[RTE_ETH_NAME_MAX_LEN];
378         struct ipn3ke_hw *hw;
379         int i, retval;
380
381         /* check if the AFU device has been probed already */
382         /* allocate shared mcp_vswitch structure */
383         if (!afu_dev->shared.data) {
384                 snprintf(name, sizeof(name), "net_%s_hw",
385                         afu_dev->device.name);
386                 hw = rte_zmalloc_socket(name,
387                                         sizeof(struct ipn3ke_hw),
388                                         RTE_CACHE_LINE_SIZE,
389                                         afu_dev->device.numa_node);
390                 if (!hw) {
391                         IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
392                                 retval = -ENOMEM;
393                                 return -ENOMEM;
394                 }
395                 afu_dev->shared.data = hw;
396
397                 rte_spinlock_init(&afu_dev->shared.lock);
398         } else {
399                 hw = afu_dev->shared.data;
400         }
401
402         retval = ipn3ke_hw_init(afu_dev, hw);
403         if (retval)
404                 return retval;
405
406         /* probe representor ports */
407         for (i = 0; i < hw->port_num; i++) {
408                 struct ipn3ke_rpst rpst = {
409                         .port_id = i,
410                         .switch_domain_id = hw->switch_domain_id,
411                         .hw = hw
412                 };
413
414                 /* representor port net_bdf_port */
415                 snprintf(name, sizeof(name), "net_%s_representor_%d",
416                         afu_dev->device.name, i);
417
418                 retval = rte_eth_dev_create(&afu_dev->device, name,
419                         sizeof(struct ipn3ke_rpst), NULL, NULL,
420                         ipn3ke_rpst_init, &rpst);
421
422                 if (retval)
423                         IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
424                                                                 name);
425         }
426
427         return 0;
428 }
429
430 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
431 {
432         char name[RTE_ETH_NAME_MAX_LEN];
433         struct ipn3ke_hw *hw;
434         struct rte_eth_dev *ethdev;
435         int i, ret;
436
437         hw = afu_dev->shared.data;
438
439         /* remove representor ports */
440         for (i = 0; i < hw->port_num; i++) {
441                 /* representor port net_bdf_port */
442                 snprintf(name, sizeof(name), "net_%s_representor_%d",
443                         afu_dev->device.name, i);
444
445                 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
446                 if (!ethdev)
447                         return -ENODEV;
448
449                 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
450         }
451
452         ret = rte_eth_switch_domain_free(hw->switch_domain_id);
453         if (ret)
454                 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
455
456         /* hw uninit*/
457         ipn3ke_hw_uninit(hw);
458
459         return 0;
460 }
461
462 static struct rte_afu_driver afu_ipn3ke_driver = {
463         .id_table = afu_uuid_ipn3ke_map,
464         .probe = ipn3ke_vswitch_probe,
465         .remove = ipn3ke_vswitch_remove,
466 };
467
468 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
469
470 static const char * const valid_args[] = {
471 #define IPN3KE_AFU_NAME         "afu"
472                 IPN3KE_AFU_NAME,
473 #define IPN3KE_FPGA_ACCELERATION_LIST     "fpga_acc"
474                 IPN3KE_FPGA_ACCELERATION_LIST,
475 #define IPN3KE_I40E_PF_LIST     "i40e_pf"
476                 IPN3KE_I40E_PF_LIST,
477                 NULL
478 };
479
480 static int
481 ipn3ke_cfg_parse_acc_list(const char *afu_name,
482         const char *acc_list_name)
483 {
484         struct rte_afu_device *afu_dev;
485         struct ipn3ke_hw *hw;
486         const char *p_source;
487         char *p_start;
488         char name[RTE_ETH_NAME_MAX_LEN];
489
490         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
491         if (!afu_dev)
492                 return -1;
493         hw = afu_dev->shared.data;
494         if (!hw)
495                 return -1;
496
497         p_source = acc_list_name;
498         while (*p_source) {
499                 while ((*p_source == '{') || (*p_source == '|'))
500                         p_source++;
501                 p_start = name;
502                 while ((*p_source != '|') && (*p_source != '}'))
503                         *p_start++ = *p_source++;
504                 *p_start = 0;
505                 if (!strcmp(name, "tm") && hw->tm_hw_enable)
506                         hw->acc_tm = 1;
507
508                 if (!strcmp(name, "flow") && hw->flow_hw_enable)
509                         hw->acc_flow = 1;
510
511                 if (*p_source == '}')
512                         return 0;
513         }
514
515         return 0;
516 }
517
518 static int
519 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
520         const char *pf_name)
521 {
522         struct rte_eth_dev *i40e_eth, *rpst_eth;
523         struct rte_afu_device *afu_dev;
524         struct ipn3ke_rpst *rpst;
525         struct ipn3ke_hw *hw;
526         const char *p_source;
527         char *p_start;
528         char name[RTE_ETH_NAME_MAX_LEN];
529         uint16_t port_id;
530         int i;
531         int ret = -1;
532
533         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
534         if (!afu_dev)
535                 return -1;
536         hw = afu_dev->shared.data;
537         if (!hw)
538                 return -1;
539
540         p_source = pf_name;
541         for (i = 0; i < hw->port_num; i++) {
542                 snprintf(name, sizeof(name), "net_%s_representor_%d",
543                         afu_name, i);
544                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
545                 if (ret)
546                         return -1;
547                 rpst_eth = &rte_eth_devices[port_id];
548                 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
549
550                 while ((*p_source == '{') || (*p_source == '|'))
551                         p_source++;
552                 p_start = name;
553                 while ((*p_source != '|') && (*p_source != '}'))
554                         *p_start++ = *p_source++;
555                 *p_start = 0;
556
557                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
558                 if (ret)
559                         return -1;
560                 i40e_eth = &rte_eth_devices[port_id];
561
562                 rpst->i40e_pf_eth = i40e_eth;
563                 rpst->i40e_pf_eth_port_id = port_id;
564
565                 if ((*p_source == '}') || !(*p_source))
566                         break;
567         }
568
569         return 0;
570 }
571
572 static int
573 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
574 {
575         struct rte_devargs *devargs;
576         struct rte_kvargs *kvlist = NULL;
577         char *afu_name = NULL;
578         char *acc_name = NULL;
579         char *pf_name = NULL;
580         int afu_name_en = 0;
581         int acc_list_en = 0;
582         int pf_list_en = 0;
583         int ret = -1;
584
585         devargs = dev->device.devargs;
586
587         kvlist = rte_kvargs_parse(devargs->args, valid_args);
588         if (!kvlist) {
589                 IPN3KE_AFU_PMD_ERR("error when parsing param");
590                 goto end;
591         }
592
593         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
594                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
595                                        &rte_ifpga_get_string_arg,
596                                        &afu_name) < 0) {
597                         IPN3KE_AFU_PMD_ERR("error to parse %s",
598                                      IPN3KE_AFU_NAME);
599                         goto end;
600                 } else {
601                         afu_name_en = 1;
602                 }
603         }
604
605         if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
606                 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
607                                        &rte_ifpga_get_string_arg,
608                                        &acc_name) < 0) {
609                         IPN3KE_AFU_PMD_ERR("error to parse %s",
610                                      IPN3KE_FPGA_ACCELERATION_LIST);
611                         goto end;
612                 } else {
613                         acc_list_en = 1;
614                 }
615         }
616
617         if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
618                 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
619                                        &rte_ifpga_get_string_arg,
620                                        &pf_name) < 0) {
621                         IPN3KE_AFU_PMD_ERR("error to parse %s",
622                                      IPN3KE_I40E_PF_LIST);
623                         goto end;
624                 } else {
625                         pf_list_en = 1;
626                 }
627         }
628
629         if (!afu_name_en) {
630                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
631                           IPN3KE_AFU_NAME);
632                 goto end;
633         }
634
635         if (!pf_list_en) {
636                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
637                           IPN3KE_I40E_PF_LIST);
638                 goto end;
639         }
640
641         if (acc_list_en) {
642                 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
643                 if (ret) {
644                         IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
645                           IPN3KE_FPGA_ACCELERATION_LIST);
646                         goto end;
647                 }
648         } else {
649                 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
650                           IPN3KE_FPGA_ACCELERATION_LIST);
651         }
652
653         ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
654         if (ret)
655                 goto end;
656 end:
657         if (kvlist)
658                 rte_kvargs_free(kvlist);
659         if (afu_name)
660                 free(afu_name);
661         if (acc_name)
662                 free(acc_name);
663
664         return ret;
665 }
666
667 static int
668 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
669 {
670         struct rte_devargs *devargs;
671         struct rte_kvargs *kvlist = NULL;
672         char *afu_name = NULL;
673         struct rte_afu_device *afu_dev;
674         int ret = -1;
675
676         devargs = dev->device.devargs;
677
678         kvlist = rte_kvargs_parse(devargs->args, valid_args);
679         if (!kvlist) {
680                 IPN3KE_AFU_PMD_ERR("error when parsing param");
681                 goto end;
682         }
683
684         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
685                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
686                                        &rte_ifpga_get_string_arg,
687                                        &afu_name) < 0) {
688                         IPN3KE_AFU_PMD_ERR("error to parse %s",
689                                      IPN3KE_AFU_NAME);
690                 } else {
691                         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
692                         if (!afu_dev)
693                                 goto end;
694                         ret = ipn3ke_vswitch_remove(afu_dev);
695                 }
696         } else {
697                 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
698         }
699
700 end:
701         if (kvlist)
702                 rte_kvargs_free(kvlist);
703
704         return ret;
705 }
706
707 static struct rte_vdev_driver ipn3ke_cfg_driver = {
708         .probe = ipn3ke_cfg_probe,
709         .remove = ipn3ke_cfg_remove,
710 };
711
712 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
713 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
714         "afu=<string> "
715         "fpga_acc=<string>"
716         "i40e_pf=<string>");
717
718 RTE_INIT(ipn3ke_afu_init_log)
719 {
720         ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
721         if (ipn3ke_afu_logtype >= 0)
722                 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
723 }