net: add new header file for VXLAN
[dpdk.git] / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
27
28 int ipn3ke_afu_logtype;
29
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31         { MAP_UUID_10G_LOW,  MAP_UUID_10G_HIGH },
32         { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33         { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34         { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35         { 0, 0 /* sentinel */ },
36 };
37
38 static int
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
41 {
42         uint32_t i, try_cnt;
43         uint64_t indirect_value;
44         volatile void *indirect_addrs;
45         uint64_t target_addr;
46         uint64_t read_data = 0;
47
48         if (eth_group_sel != 0 && eth_group_sel != 1)
49                 return -1;
50
51         target_addr = addr | dev_sel << 17;
52
53         indirect_value = RCMD | target_addr << 32;
54         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
55
56         rte_delay_us(10);
57
58         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
59
60         i = 0;
61         try_cnt = 10;
62         indirect_addrs = hw->eth_group_bar[eth_group_sel] +
63                 0x18;
64         do {
65                 read_data = rte_read64(indirect_addrs);
66                 if ((read_data >> 32) == 1)
67                         break;
68                 i++;
69         } while (i <= try_cnt);
70         if (i > try_cnt)
71                 return -1;
72
73         *rd_data = rte_le_to_cpu_32(read_data);
74         return 0;
75 }
76
77 static int
78 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
79         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
80 {
81         volatile void *indirect_addrs;
82         uint64_t indirect_value;
83         uint64_t target_addr;
84
85         if (eth_group_sel != 0 && eth_group_sel != 1)
86                 return -1;
87
88         target_addr = addr | dev_sel << 17;
89
90         indirect_value = WCMD | target_addr << 32 | wr_data;
91         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
92
93         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
94         return 0;
95 }
96
97 static int
98 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
99         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
100 {
101         uint32_t dev_sel;
102
103         if (mac_num >= hw->port_num)
104                 return -1;
105
106         mac_num &= 0x7;
107         dev_sel = mac_num * 2 + 3;
108
109         return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
110 }
111
112 static int
113 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
114         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
115 {
116         uint32_t dev_sel;
117
118         if (mac_num >= hw->port_num)
119                 return -1;
120
121         mac_num &= 0x7;
122         dev_sel = mac_num * 2 + 3;
123
124         return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
125 }
126
127 static void
128 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
129 {
130         hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
131                         (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
132         hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
133                         (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
134         hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135                         (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
136         hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137                         (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
138         hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
139                         (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
140         hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
141                         (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
142         hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
143                         (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
144         hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
145                         (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
146         hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
147                         (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
148         hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
149                         (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
150         hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
151                         (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
152         hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
153                         (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
154         hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
155                         (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
156         hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
157                         (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
158         hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
159                         (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
160         hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
161                         (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
162         hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
163                         (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
164         hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
165                         (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
166         hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
167                         (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
168         hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
169                         (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
170
171         hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
172                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
173                         0, 0xFFFF);
174         hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
175                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
176                         4, 0xFFFF);
177         hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
178                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
179                         8, 0xFFFF);
180         hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
181                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
182                         0xC, 0xFFFF);
183         hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
184                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
185                         0x10, 0xFFFF);
186
187         hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
188                         IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
189                         0, 0xFFFFF);
190 }
191
192 static int
193 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
194 {
195         uint32_t timeout = 10000;
196         while (timeout > 0) {
197                 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
198                         == IPN3KE_VBNG_INIT_DONE)
199                         break;
200                 rte_delay_us(1000);
201                 timeout--;
202         }
203
204         if (!timeout) {
205                 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
206                 return -1;
207         }
208
209         return 0;
210 }
211
212 static uint32_t
213 ipn3ke_mtu_cal(uint32_t tx, uint32_t rx)
214 {
215         uint32_t tmp;
216         tmp = RTE_MIN(tx, rx);
217         tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU);
218         tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX -
219                 IPN3KE_ETH_OVERHEAD));
220         return tmp;
221 }
222
223 static void
224 ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num,
225         uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr)
226 {
227         uint32_t tx;
228         uint32_t rx;
229         uint32_t tmp;
230
231         if (!(*hw->f_mac_read) || !(*hw->f_mac_write))
232                 return;
233
234         (*hw->f_mac_read)(hw,
235                         &tx,
236                         txaddr,
237                         mac_num,
238                         eth_group_sel);
239
240         (*hw->f_mac_read)(hw,
241                         &rx,
242                         rxaddr,
243                         mac_num,
244                         eth_group_sel);
245
246         tmp = ipn3ke_mtu_cal(tx, rx);
247
248         (*hw->f_mac_write)(hw,
249                         tmp,
250                         txaddr,
251                         mac_num,
252                         eth_group_sel);
253
254         (*hw->f_mac_write)(hw,
255                         tmp,
256                         rxaddr,
257                         mac_num,
258                         eth_group_sel);
259 }
260
261 static void
262 ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
263         uint32_t eth_group_sel)
264 {
265         ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
266                 IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH);
267 }
268
269 static void
270 ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
271         uint32_t eth_group_sel)
272 {
273         ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
274                 IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG);
275 }
276
277 static void
278 ipn3ke_mtu_setup(struct ipn3ke_hw *hw)
279 {
280         int i;
281         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
282                 for (i = 0; i < hw->port_num; i++) {
283                         ipn3ke_10G_mtu_setup(hw, i, 0);
284                         ipn3ke_10G_mtu_setup(hw, i, 1);
285                 }
286         } else if (hw->retimer.mac_type ==
287                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
288                 for (i = 0; i < hw->port_num; i++) {
289                         ipn3ke_25G_mtu_setup(hw, i, 0);
290                         ipn3ke_25G_mtu_setup(hw, i, 1);
291                 }
292         }
293 }
294
295 static int
296 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
297         struct ipn3ke_hw *hw)
298 {
299         struct rte_rawdev *rawdev;
300         int ret;
301         int i;
302         uint64_t port_num, mac_type, index;
303
304         rawdev  = afu_dev->rawdev;
305
306         hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
307         hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
308         hw->afu_id.port = afu_dev->id.port;
309         hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
310         hw->f_mac_read = ipn3ke_indirect_mac_read;
311         hw->f_mac_write = ipn3ke_indirect_mac_write;
312         hw->rawdev = rawdev;
313         rawdev->dev_ops->attr_get(rawdev,
314                                 "LineSideBARIndex", &index);
315         hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
316         rawdev->dev_ops->attr_get(rawdev,
317                                 "NICSideBARIndex", &index);
318         hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
319         rawdev->dev_ops->attr_get(rawdev,
320                                 "LineSideLinkPortNum", &port_num);
321         hw->retimer.port_num = (int)port_num;
322         hw->port_num = hw->retimer.port_num;
323         rawdev->dev_ops->attr_get(rawdev,
324                                 "LineSideMACType", &mac_type);
325         hw->retimer.mac_type = (int)mac_type;
326
327         IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", IPN3KE_READ_REG(hw, 0));
328
329         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
330                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
331                 /* After power on, wait until init done */
332                 if (ipn3ke_vbng_init_done(hw))
333                         return -1;
334
335                 ipn3ke_hw_cap_init(hw);
336
337                 /* Reset vBNG IP */
338                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
339                 rte_delay_us(10);
340                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
341
342                 /* After reset, wait until init done */
343                 if (ipn3ke_vbng_init_done(hw))
344                         return -1;
345         }
346
347         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
348                 /* Enable inter connect channel */
349                 for (i = 0; i < hw->port_num; i++) {
350                         /* Enable the TX path */
351                         ipn3ke_xmac_tx_enable(hw, i, 1);
352
353                         /* Disables source address override */
354                         ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
355
356                         /* Enable the RX path */
357                         ipn3ke_xmac_rx_enable(hw, i, 1);
358
359                         /* Clear NIC side TX statistics counters */
360                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
361
362                         /* Clear NIC side RX statistics counters */
363                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
364
365                         /* Clear line side TX statistics counters */
366                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
367
368                         /* Clear line RX statistics counters */
369                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
370                 }
371         } else if (hw->retimer.mac_type ==
372                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
373                 /* Enable inter connect channel */
374                 for (i = 0; i < hw->port_num; i++) {
375                         /* Clear NIC side TX statistics counters */
376                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
377
378                         /* Clear NIC side RX statistics counters */
379                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
380
381                         /* Clear line side TX statistics counters */
382                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
383
384                         /* Clear line side RX statistics counters */
385                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
386                 }
387         }
388
389         /* init mtu */
390         ipn3ke_mtu_setup(hw);
391
392         ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
393         if (ret)
394                 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
395                 ret);
396
397         hw->tm_hw_enable = 0;
398         hw->flow_hw_enable = 0;
399         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
400                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
401                 ret = ipn3ke_hw_tm_init(hw);
402                 if (ret)
403                         return ret;
404                 hw->tm_hw_enable = 1;
405
406                 ret = ipn3ke_flow_init(hw);
407                 if (ret)
408                         return ret;
409                 hw->flow_hw_enable = 1;
410         }
411
412         hw->acc_tm = 0;
413         hw->acc_flow = 0;
414
415         return 0;
416 }
417
418 static void
419 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
420 {
421         int i;
422
423         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
424                 for (i = 0; i < hw->port_num; i++) {
425                         /* Disable the TX path */
426                         ipn3ke_xmac_tx_disable(hw, i, 1);
427
428                         /* Disable the RX path */
429                         ipn3ke_xmac_rx_disable(hw, i, 1);
430
431                         /* Clear NIC side TX statistics counters */
432                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
433
434                         /* Clear NIC side RX statistics counters */
435                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
436
437                         /* Clear line side TX statistics counters */
438                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
439
440                         /* Clear line side RX statistics counters */
441                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
442                 }
443         } else if (hw->retimer.mac_type ==
444                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
445                 for (i = 0; i < hw->port_num; i++) {
446                         /* Clear NIC side TX statistics counters */
447                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
448
449                         /* Clear NIC side RX statistics counters */
450                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
451
452                         /* Clear line side TX statistics counters */
453                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
454
455                         /* Clear line side RX statistics counters */
456                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
457                 }
458         }
459 }
460
461 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
462 {
463         char name[RTE_ETH_NAME_MAX_LEN];
464         struct ipn3ke_hw *hw;
465         int i, retval;
466
467         /* check if the AFU device has been probed already */
468         /* allocate shared mcp_vswitch structure */
469         if (!afu_dev->shared.data) {
470                 snprintf(name, sizeof(name), "net_%s_hw",
471                         afu_dev->device.name);
472                 hw = rte_zmalloc_socket(name,
473                                         sizeof(struct ipn3ke_hw),
474                                         RTE_CACHE_LINE_SIZE,
475                                         afu_dev->device.numa_node);
476                 if (!hw) {
477                         IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
478                                 retval = -ENOMEM;
479                                 return -ENOMEM;
480                 }
481                 afu_dev->shared.data = hw;
482
483                 rte_spinlock_init(&afu_dev->shared.lock);
484         } else {
485                 hw = afu_dev->shared.data;
486         }
487
488         retval = ipn3ke_hw_init(afu_dev, hw);
489         if (retval)
490                 return retval;
491
492         /* probe representor ports */
493         for (i = 0; i < hw->port_num; i++) {
494                 struct ipn3ke_rpst rpst = {
495                         .port_id = i,
496                         .switch_domain_id = hw->switch_domain_id,
497                         .hw = hw
498                 };
499
500                 /* representor port net_bdf_port */
501                 snprintf(name, sizeof(name), "net_%s_representor_%d",
502                         afu_dev->device.name, i);
503
504                 retval = rte_eth_dev_create(&afu_dev->device, name,
505                         sizeof(struct ipn3ke_rpst), NULL, NULL,
506                         ipn3ke_rpst_init, &rpst);
507
508                 if (retval)
509                         IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
510                                                                 name);
511         }
512
513         return 0;
514 }
515
516 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
517 {
518         char name[RTE_ETH_NAME_MAX_LEN];
519         struct ipn3ke_hw *hw;
520         struct rte_eth_dev *ethdev;
521         int i, ret;
522
523         hw = afu_dev->shared.data;
524
525         /* remove representor ports */
526         for (i = 0; i < hw->port_num; i++) {
527                 /* representor port net_bdf_port */
528                 snprintf(name, sizeof(name), "net_%s_representor_%d",
529                         afu_dev->device.name, i);
530
531                 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
532                 if (!ethdev)
533                         return -ENODEV;
534
535                 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
536         }
537
538         ret = rte_eth_switch_domain_free(hw->switch_domain_id);
539         if (ret)
540                 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
541
542         /* hw uninit*/
543         ipn3ke_hw_uninit(hw);
544
545         return 0;
546 }
547
548 static struct rte_afu_driver afu_ipn3ke_driver = {
549         .id_table = afu_uuid_ipn3ke_map,
550         .probe = ipn3ke_vswitch_probe,
551         .remove = ipn3ke_vswitch_remove,
552 };
553
554 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
555
556 static const char * const valid_args[] = {
557 #define IPN3KE_AFU_NAME         "afu"
558                 IPN3KE_AFU_NAME,
559 #define IPN3KE_FPGA_ACCELERATION_LIST     "fpga_acc"
560                 IPN3KE_FPGA_ACCELERATION_LIST,
561 #define IPN3KE_I40E_PF_LIST     "i40e_pf"
562                 IPN3KE_I40E_PF_LIST,
563                 NULL
564 };
565
566 static int
567 ipn3ke_cfg_parse_acc_list(const char *afu_name,
568         const char *acc_list_name)
569 {
570         struct rte_afu_device *afu_dev;
571         struct ipn3ke_hw *hw;
572         const char *p_source;
573         char *p_start;
574         char name[RTE_ETH_NAME_MAX_LEN];
575
576         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
577         if (!afu_dev)
578                 return -1;
579         hw = afu_dev->shared.data;
580         if (!hw)
581                 return -1;
582
583         p_source = acc_list_name;
584         while (*p_source) {
585                 while ((*p_source == '{') || (*p_source == '|'))
586                         p_source++;
587                 p_start = name;
588                 while ((*p_source != '|') && (*p_source != '}'))
589                         *p_start++ = *p_source++;
590                 *p_start = 0;
591                 if (!strcmp(name, "tm") && hw->tm_hw_enable)
592                         hw->acc_tm = 1;
593
594                 if (!strcmp(name, "flow") && hw->flow_hw_enable)
595                         hw->acc_flow = 1;
596
597                 if (*p_source == '}')
598                         return 0;
599         }
600
601         return 0;
602 }
603
604 static int
605 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
606         const char *pf_name)
607 {
608         struct rte_eth_dev *i40e_eth, *rpst_eth;
609         struct rte_afu_device *afu_dev;
610         struct ipn3ke_rpst *rpst;
611         struct ipn3ke_hw *hw;
612         const char *p_source;
613         char *p_start;
614         char name[RTE_ETH_NAME_MAX_LEN];
615         uint16_t port_id;
616         int i;
617         int ret = -1;
618
619         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
620         if (!afu_dev)
621                 return -1;
622         hw = afu_dev->shared.data;
623         if (!hw)
624                 return -1;
625
626         p_source = pf_name;
627         for (i = 0; i < hw->port_num; i++) {
628                 snprintf(name, sizeof(name), "net_%s_representor_%d",
629                         afu_name, i);
630                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
631                 if (ret)
632                         return -1;
633                 rpst_eth = &rte_eth_devices[port_id];
634                 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
635
636                 while ((*p_source == '{') || (*p_source == '|'))
637                         p_source++;
638                 p_start = name;
639                 while ((*p_source != '|') && (*p_source != '}'))
640                         *p_start++ = *p_source++;
641                 *p_start = 0;
642
643                 ret = rte_eth_dev_get_port_by_name(name, &port_id);
644                 if (ret)
645                         return -1;
646                 i40e_eth = &rte_eth_devices[port_id];
647
648                 rpst->i40e_pf_eth = i40e_eth;
649                 rpst->i40e_pf_eth_port_id = port_id;
650
651                 if ((*p_source == '}') || !(*p_source))
652                         break;
653         }
654
655         return 0;
656 }
657
658 static int
659 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
660 {
661         struct rte_devargs *devargs;
662         struct rte_kvargs *kvlist = NULL;
663         char *afu_name = NULL;
664         char *acc_name = NULL;
665         char *pf_name = NULL;
666         int afu_name_en = 0;
667         int acc_list_en = 0;
668         int pf_list_en = 0;
669         int ret = -1;
670
671         devargs = dev->device.devargs;
672
673         kvlist = rte_kvargs_parse(devargs->args, valid_args);
674         if (!kvlist) {
675                 IPN3KE_AFU_PMD_ERR("error when parsing param");
676                 goto end;
677         }
678
679         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
680                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
681                                        &rte_ifpga_get_string_arg,
682                                        &afu_name) < 0) {
683                         IPN3KE_AFU_PMD_ERR("error to parse %s",
684                                      IPN3KE_AFU_NAME);
685                         goto end;
686                 } else {
687                         afu_name_en = 1;
688                 }
689         }
690
691         if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
692                 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
693                                        &rte_ifpga_get_string_arg,
694                                        &acc_name) < 0) {
695                         IPN3KE_AFU_PMD_ERR("error to parse %s",
696                                      IPN3KE_FPGA_ACCELERATION_LIST);
697                         goto end;
698                 } else {
699                         acc_list_en = 1;
700                 }
701         }
702
703         if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
704                 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
705                                        &rte_ifpga_get_string_arg,
706                                        &pf_name) < 0) {
707                         IPN3KE_AFU_PMD_ERR("error to parse %s",
708                                      IPN3KE_I40E_PF_LIST);
709                         goto end;
710                 } else {
711                         pf_list_en = 1;
712                 }
713         }
714
715         if (!afu_name_en) {
716                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
717                           IPN3KE_AFU_NAME);
718                 goto end;
719         }
720
721         if (!pf_list_en) {
722                 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
723                           IPN3KE_I40E_PF_LIST);
724                 goto end;
725         }
726
727         if (acc_list_en) {
728                 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
729                 if (ret) {
730                         IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
731                           IPN3KE_FPGA_ACCELERATION_LIST);
732                         goto end;
733                 }
734         } else {
735                 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
736                           IPN3KE_FPGA_ACCELERATION_LIST);
737         }
738
739         ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
740         if (ret)
741                 goto end;
742 end:
743         if (kvlist)
744                 rte_kvargs_free(kvlist);
745         if (afu_name)
746                 free(afu_name);
747         if (acc_name)
748                 free(acc_name);
749
750         return ret;
751 }
752
753 static int
754 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
755 {
756         struct rte_devargs *devargs;
757         struct rte_kvargs *kvlist = NULL;
758         char *afu_name = NULL;
759         struct rte_afu_device *afu_dev;
760         int ret = -1;
761
762         devargs = dev->device.devargs;
763
764         kvlist = rte_kvargs_parse(devargs->args, valid_args);
765         if (!kvlist) {
766                 IPN3KE_AFU_PMD_ERR("error when parsing param");
767                 goto end;
768         }
769
770         if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
771                 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
772                                        &rte_ifpga_get_string_arg,
773                                        &afu_name) < 0) {
774                         IPN3KE_AFU_PMD_ERR("error to parse %s",
775                                      IPN3KE_AFU_NAME);
776                 } else {
777                         afu_dev = rte_ifpga_find_afu_by_name(afu_name);
778                         if (!afu_dev)
779                                 goto end;
780                         ret = ipn3ke_vswitch_remove(afu_dev);
781                 }
782         } else {
783                 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
784         }
785
786 end:
787         if (kvlist)
788                 rte_kvargs_free(kvlist);
789
790         return ret;
791 }
792
793 static struct rte_vdev_driver ipn3ke_cfg_driver = {
794         .probe = ipn3ke_cfg_probe,
795         .remove = ipn3ke_cfg_remove,
796 };
797
798 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
799 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
800         "afu=<string> "
801         "fpga_acc=<string>"
802         "i40e_pf=<string>");
803
804 RTE_INIT(ipn3ke_afu_init_log)
805 {
806         ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
807         if (ipn3ke_afu_logtype >= 0)
808                 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
809 }