net/mlx5: fix metadata split with encap action
[dpdk.git] / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22 #include <ifpga_rawdev.h>
23
24 #include "ipn3ke_rawdev_api.h"
25 #include "ipn3ke_flow.h"
26 #include "ipn3ke_logs.h"
27 #include "ipn3ke_ethdev.h"
28
29 int ipn3ke_afu_logtype;
30
31 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
32         { MAP_UUID_10G_LOW,  MAP_UUID_10G_HIGH },
33         { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
34         { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
35         { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
36         { 0, 0 /* sentinel */ },
37 };
38
39 struct ipn3ke_pub_func ipn3ke_bridge_func;
40
41 static int
42 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
43         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
44 {
45         uint32_t i, try_cnt;
46         uint64_t indirect_value;
47         volatile void *indirect_addrs;
48         uint64_t target_addr;
49         uint64_t read_data = 0;
50
51         if (eth_group_sel != 0 && eth_group_sel != 1)
52                 return -1;
53
54         target_addr = addr | dev_sel << 17;
55
56         indirect_value = RCMD | target_addr << 32;
57         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
58
59         rte_delay_us(10);
60
61         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
62
63         i = 0;
64         try_cnt = 10;
65         indirect_addrs = hw->eth_group_bar[eth_group_sel] +
66                 0x18;
67         do {
68                 read_data = rte_read64(indirect_addrs);
69                 if ((read_data >> 32) == 1)
70                         break;
71                 i++;
72         } while (i <= try_cnt);
73         if (i > try_cnt)
74                 return -1;
75
76         *rd_data = rte_le_to_cpu_32(read_data);
77         return 0;
78 }
79
80 static int
81 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
82         uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
83 {
84         volatile void *indirect_addrs;
85         uint64_t indirect_value;
86         uint64_t target_addr;
87
88         if (eth_group_sel != 0 && eth_group_sel != 1)
89                 return -1;
90
91         target_addr = addr | dev_sel << 17;
92
93         indirect_value = WCMD | target_addr << 32 | wr_data;
94         indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
95
96         rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
97         return 0;
98 }
99
100 static int
101 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
102         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
103 {
104         uint32_t dev_sel;
105
106         if (mac_num >= hw->port_num)
107                 return -1;
108
109         mac_num &= 0x7;
110         dev_sel = mac_num * 2 + 3;
111
112         return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
113 }
114
115 static int
116 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
117         uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
118 {
119         uint32_t dev_sel;
120
121         if (mac_num >= hw->port_num)
122                 return -1;
123
124         mac_num &= 0x7;
125         dev_sel = mac_num * 2 + 3;
126
127         return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
128 }
129
130 static void
131 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
132 {
133         hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
134                         (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
135         hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
136                         (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
137         hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
138                         (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
139         hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
140                         (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
141         hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
142                         (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
143         hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
144                         (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
145         hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
146                         (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
147         hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
148                         (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
149         hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
150                         (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
151         hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
152                         (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
153         hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
154                         (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
155         hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
156                         (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
157         hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
158                         (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
159         hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
160                         (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
161         hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
162                         (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
163         hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
164                         (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
165         hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
166                         (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
167         hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
168                         (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
169         hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
170                         (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
171         hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
172                         (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
173
174         hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
175                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
176                         0, 0xFFFF);
177         hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
178                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
179                         4, 0xFFFF);
180         hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
181                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
182                         8, 0xFFFF);
183         hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
184                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
185                         0xC, 0xFFFF);
186         hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
187                         IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
188                         0x10, 0xFFFF);
189
190         hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
191                         IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
192                         0, 0xFFFFF);
193 }
194
195 static int
196 ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
197 {
198         uint32_t timeout = 10000;
199         while (timeout > 0) {
200                 if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
201                         == IPN3KE_VBNG_INIT_DONE)
202                         break;
203                 rte_delay_us(1000);
204                 timeout--;
205         }
206
207         if (!timeout) {
208                 IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
209                 return -1;
210         }
211
212         return 0;
213 }
214
215 static uint32_t
216 ipn3ke_mtu_cal(uint32_t tx, uint32_t rx)
217 {
218         uint32_t tmp;
219         tmp = RTE_MIN(tx, rx);
220         tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU);
221         tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX -
222                 IPN3KE_ETH_OVERHEAD));
223         return tmp;
224 }
225
226 static void
227 ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num,
228         uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr)
229 {
230         uint32_t tx;
231         uint32_t rx;
232         uint32_t tmp;
233
234         if (!(*hw->f_mac_read) || !(*hw->f_mac_write))
235                 return;
236
237         (*hw->f_mac_read)(hw,
238                         &tx,
239                         txaddr,
240                         mac_num,
241                         eth_group_sel);
242
243         (*hw->f_mac_read)(hw,
244                         &rx,
245                         rxaddr,
246                         mac_num,
247                         eth_group_sel);
248
249         tmp = ipn3ke_mtu_cal(tx, rx);
250
251         (*hw->f_mac_write)(hw,
252                         tmp,
253                         txaddr,
254                         mac_num,
255                         eth_group_sel);
256
257         (*hw->f_mac_write)(hw,
258                         tmp,
259                         rxaddr,
260                         mac_num,
261                         eth_group_sel);
262 }
263
264 static void
265 ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
266         uint32_t eth_group_sel)
267 {
268         ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
269                 IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH);
270 }
271
272 static void
273 ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
274         uint32_t eth_group_sel)
275 {
276         ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
277                 IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG);
278 }
279
280 static void
281 ipn3ke_mtu_setup(struct ipn3ke_hw *hw)
282 {
283         int i;
284         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
285                 for (i = 0; i < hw->port_num; i++) {
286                         ipn3ke_10G_mtu_setup(hw, i, 0);
287                         ipn3ke_10G_mtu_setup(hw, i, 1);
288                 }
289         } else if (hw->retimer.mac_type ==
290                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
291                 for (i = 0; i < hw->port_num; i++) {
292                         ipn3ke_25G_mtu_setup(hw, i, 0);
293                         ipn3ke_25G_mtu_setup(hw, i, 1);
294                 }
295         }
296 }
297
298 static int
299 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
300         struct ipn3ke_hw *hw)
301 {
302         struct rte_rawdev *rawdev;
303         int ret;
304         int i;
305         uint64_t port_num, mac_type, index;
306
307         rawdev  = afu_dev->rawdev;
308
309         hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
310         hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
311         hw->afu_id.port = afu_dev->id.port;
312         hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
313         hw->f_mac_read = ipn3ke_indirect_mac_read;
314         hw->f_mac_write = ipn3ke_indirect_mac_write;
315         hw->rawdev = rawdev;
316         rawdev->dev_ops->attr_get(rawdev,
317                                 "LineSideBARIndex", &index);
318         hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
319         rawdev->dev_ops->attr_get(rawdev,
320                                 "NICSideBARIndex", &index);
321         hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
322         rawdev->dev_ops->attr_get(rawdev,
323                                 "LineSideLinkPortNum", &port_num);
324         hw->retimer.port_num = (int)port_num;
325         hw->port_num = hw->retimer.port_num;
326         rawdev->dev_ops->attr_get(rawdev,
327                                 "LineSideMACType", &mac_type);
328         hw->retimer.mac_type = (int)mac_type;
329
330         hw->acc_tm = 0;
331         hw->acc_flow = 0;
332
333         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
334                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
335                 /* After power on, wait until init done */
336                 if (ipn3ke_vbng_init_done(hw))
337                         return -1;
338
339                 ipn3ke_hw_cap_init(hw);
340
341                 /* Reset vBNG IP */
342                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
343                 rte_delay_us(10);
344                 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
345
346                 /* After reset, wait until init done */
347                 if (ipn3ke_vbng_init_done(hw))
348                         return -1;
349
350                 hw->acc_tm = 1;
351                 hw->acc_flow = 1;
352
353                 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
354                         IPN3KE_READ_REG(hw, 0));
355         }
356
357         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
358                 /* Enable inter connect channel */
359                 for (i = 0; i < hw->port_num; i++) {
360                         /* Enable the TX path */
361                         ipn3ke_xmac_tx_enable(hw, i, 1);
362
363                         /* Disables source address override */
364                         ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
365
366                         /* Enable the RX path */
367                         ipn3ke_xmac_rx_enable(hw, i, 1);
368
369                         /* Clear NIC side TX statistics counters */
370                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
371
372                         /* Clear NIC side RX statistics counters */
373                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
374
375                         /* Clear line side TX statistics counters */
376                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
377
378                         /* Clear line RX statistics counters */
379                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
380                 }
381         } else if (hw->retimer.mac_type ==
382                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
383                 /* Enable inter connect channel */
384                 for (i = 0; i < hw->port_num; i++) {
385                         /* Clear NIC side TX statistics counters */
386                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
387
388                         /* Clear NIC side RX statistics counters */
389                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
390
391                         /* Clear line side TX statistics counters */
392                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
393
394                         /* Clear line side RX statistics counters */
395                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
396                 }
397         }
398
399         /* init mtu */
400         ipn3ke_mtu_setup(hw);
401
402         ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
403         if (ret)
404                 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
405                 ret);
406
407         hw->tm_hw_enable = 0;
408         hw->flow_hw_enable = 0;
409         if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
410                 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
411                 ret = ipn3ke_hw_tm_init(hw);
412                 if (ret)
413                         return ret;
414                 hw->tm_hw_enable = 1;
415
416                 ret = ipn3ke_flow_init(hw);
417                 if (ret)
418                         return ret;
419                 hw->flow_hw_enable = 1;
420         }
421
422         return 0;
423 }
424
425 static void
426 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
427 {
428         int i;
429
430         if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
431                 for (i = 0; i < hw->port_num; i++) {
432                         /* Disable the TX path */
433                         ipn3ke_xmac_tx_disable(hw, i, 1);
434
435                         /* Disable the RX path */
436                         ipn3ke_xmac_rx_disable(hw, i, 1);
437
438                         /* Clear NIC side TX statistics counters */
439                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
440
441                         /* Clear NIC side RX statistics counters */
442                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
443
444                         /* Clear line side TX statistics counters */
445                         ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
446
447                         /* Clear line side RX statistics counters */
448                         ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
449                 }
450         } else if (hw->retimer.mac_type ==
451                         IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
452                 for (i = 0; i < hw->port_num; i++) {
453                         /* Clear NIC side TX statistics counters */
454                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
455
456                         /* Clear NIC side RX statistics counters */
457                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
458
459                         /* Clear line side TX statistics counters */
460                         ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
461
462                         /* Clear line side RX statistics counters */
463                         ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
464                 }
465         }
466 }
467
468 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
469 {
470         char name[RTE_ETH_NAME_MAX_LEN];
471         struct ipn3ke_hw *hw;
472         struct rte_eth_dev *i40e_eth;
473         struct ifpga_rawdev *ifpga_dev;
474         uint16_t port_id;
475         int i, j, retval;
476         char *fvl_bdf;
477
478         /* check if the AFU device has been probed already */
479         /* allocate shared mcp_vswitch structure */
480         if (!afu_dev->shared.data) {
481                 snprintf(name, sizeof(name), "net_%s_hw",
482                         afu_dev->device.name);
483                 hw = rte_zmalloc_socket(name,
484                                         sizeof(struct ipn3ke_hw),
485                                         RTE_CACHE_LINE_SIZE,
486                                         afu_dev->device.numa_node);
487                 if (!hw) {
488                         IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
489                                 retval = -ENOMEM;
490                                 return -ENOMEM;
491                 }
492                 afu_dev->shared.data = hw;
493
494                 rte_spinlock_init(&afu_dev->shared.lock);
495         } else {
496                 hw = afu_dev->shared.data;
497         }
498
499         retval = ipn3ke_hw_init(afu_dev, hw);
500         if (retval)
501                 return retval;
502
503         if (ipn3ke_bridge_func.get_ifpga_rawdev == NULL)
504                 return -ENOMEM;
505         ifpga_dev = ipn3ke_bridge_func.get_ifpga_rawdev(hw->rawdev);
506                 if (!ifpga_dev)
507                         IPN3KE_AFU_PMD_ERR("failed to find ifpga_device.");
508
509         /* probe representor ports */
510         j = 0;
511         for (i = 0; i < hw->port_num; i++) {
512                 struct ipn3ke_rpst rpst = {
513                         .port_id = i,
514                         .switch_domain_id = hw->switch_domain_id,
515                         .hw = hw
516                 };
517
518                 /* representor port net_bdf_port */
519                 snprintf(name, sizeof(name), "net_%s_representor_%d",
520                         afu_dev->device.name, i);
521
522                 for (; j < 8; j++) {
523                         fvl_bdf = ifpga_dev->fvl_bdf[j];
524                         retval = rte_eth_dev_get_port_by_name(fvl_bdf,
525                                 &port_id);
526                         if (retval) {
527                                 continue;
528                         } else {
529                                 i40e_eth = &rte_eth_devices[port_id];
530                                 rpst.i40e_pf_eth = i40e_eth;
531                                 rpst.i40e_pf_eth_port_id = port_id;
532
533                                 j++;
534                                 break;
535                         }
536                 }
537
538                 retval = rte_eth_dev_create(&afu_dev->device, name,
539                         sizeof(struct ipn3ke_rpst), NULL, NULL,
540                         ipn3ke_rpst_init, &rpst);
541
542                 if (retval)
543                         IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
544                                                                 name);
545
546         }
547
548         return 0;
549 }
550
551 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
552 {
553         char name[RTE_ETH_NAME_MAX_LEN];
554         struct ipn3ke_hw *hw;
555         struct rte_eth_dev *ethdev;
556         int i, ret;
557
558         hw = afu_dev->shared.data;
559
560         /* remove representor ports */
561         for (i = 0; i < hw->port_num; i++) {
562                 /* representor port net_bdf_port */
563                 snprintf(name, sizeof(name), "net_%s_representor_%d",
564                         afu_dev->device.name, i);
565
566                 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
567                 if (!ethdev)
568                         return -ENODEV;
569
570                 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
571         }
572
573         ret = rte_eth_switch_domain_free(hw->switch_domain_id);
574         if (ret)
575                 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
576
577         /* hw uninit*/
578         ipn3ke_hw_uninit(hw);
579
580         return 0;
581 }
582
583 static struct rte_afu_driver afu_ipn3ke_driver = {
584         .id_table = afu_uuid_ipn3ke_map,
585         .probe = ipn3ke_vswitch_probe,
586         .remove = ipn3ke_vswitch_remove,
587 };
588
589 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
590
591 RTE_INIT(ipn3ke_afu_init_log)
592 {
593         ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
594         if (ipn3ke_afu_logtype >= 0)
595                 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
596 }