net/i40e: fix request queue in VF
[dpdk.git] / drivers / net / ixgbe / rte_pmd_ixgbe.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6
7 #include "base/ixgbe_api.h"
8 #include "base/ixgbe_x550.h"
9 #include "ixgbe_ethdev.h"
10 #include "rte_pmd_ixgbe.h"
11
12 int
13 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
14                               struct rte_ether_addr *mac_addr)
15 {
16         struct ixgbe_hw *hw;
17         struct ixgbe_vf_info *vfinfo;
18         int rar_entry;
19         uint8_t *new_mac = (uint8_t *)(mac_addr);
20         struct rte_eth_dev *dev;
21         struct rte_pci_device *pci_dev;
22
23         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
24
25         dev = &rte_eth_devices[port];
26         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
27
28         if (!is_ixgbe_supported(dev))
29                 return -ENOTSUP;
30
31         if (vf >= pci_dev->max_vfs)
32                 return -EINVAL;
33
34         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
35         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
36         rar_entry = hw->mac.num_rar_entries - (vf + 1);
37
38         if (rte_is_valid_assigned_ether_addr(
39                         (struct rte_ether_addr *)new_mac)) {
40                 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
41                            RTE_ETHER_ADDR_LEN);
42                 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
43                                            IXGBE_RAH_AV);
44         }
45         return -EINVAL;
46 }
47
48 int
49 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
50 {
51         struct ixgbe_hw *hw;
52         struct ixgbe_vf_info *vfinfo;
53         struct rte_eth_dev *dev;
54         struct rte_pci_device *pci_dev;
55         uint32_t ctrl;
56
57         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
58
59         dev = &rte_eth_devices[port];
60         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
61
62         if (!is_ixgbe_supported(dev))
63                 return -ENOTSUP;
64
65         if (vf >= pci_dev->max_vfs)
66                 return -EINVAL;
67
68         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
69         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
70
71         ctrl = IXGBE_PF_CONTROL_MSG;
72         if (vfinfo[vf].clear_to_send)
73                 ctrl |= IXGBE_VT_MSGTYPE_CTS;
74
75         ixgbe_write_mbx(hw, &ctrl, 1, vf);
76
77         return 0;
78 }
79
80 int
81 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
82 {
83         struct ixgbe_hw *hw;
84         struct ixgbe_mac_info *mac;
85         struct rte_eth_dev *dev;
86         struct rte_pci_device *pci_dev;
87
88         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
89
90         dev = &rte_eth_devices[port];
91         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
92
93         if (!is_ixgbe_supported(dev))
94                 return -ENOTSUP;
95
96         if (vf >= pci_dev->max_vfs)
97                 return -EINVAL;
98
99         if (on > 1)
100                 return -EINVAL;
101
102         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
103         mac = &hw->mac;
104
105         mac->ops.set_vlan_anti_spoofing(hw, on, vf);
106
107         return 0;
108 }
109
110 int
111 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
112 {
113         struct ixgbe_hw *hw;
114         struct ixgbe_mac_info *mac;
115         struct rte_eth_dev *dev;
116         struct rte_pci_device *pci_dev;
117
118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
119
120         dev = &rte_eth_devices[port];
121         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
122
123         if (!is_ixgbe_supported(dev))
124                 return -ENOTSUP;
125
126         if (vf >= pci_dev->max_vfs)
127                 return -EINVAL;
128
129         if (on > 1)
130                 return -EINVAL;
131
132         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
133         mac = &hw->mac;
134         mac->ops.set_mac_anti_spoofing(hw, on, vf);
135
136         return 0;
137 }
138
139 int
140 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
141 {
142         struct ixgbe_hw *hw;
143         uint32_t ctrl;
144         struct rte_eth_dev *dev;
145         struct rte_pci_device *pci_dev;
146
147         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
148
149         dev = &rte_eth_devices[port];
150         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
151
152         if (!is_ixgbe_supported(dev))
153                 return -ENOTSUP;
154
155         if (vf >= pci_dev->max_vfs)
156                 return -EINVAL;
157
158         if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
159                 return -EINVAL;
160
161         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
162         ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
163         if (vlan_id) {
164                 ctrl = vlan_id;
165                 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
166         } else {
167                 ctrl = 0;
168         }
169
170         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
171
172         return 0;
173 }
174
175 int
176 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
177 {
178         struct ixgbe_hw *hw;
179         uint32_t ctrl;
180         struct rte_eth_dev *dev;
181
182         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
183
184         dev = &rte_eth_devices[port];
185
186         if (!is_ixgbe_supported(dev))
187                 return -ENOTSUP;
188
189         if (on > 1)
190                 return -EINVAL;
191
192         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
193         ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
194         /* enable or disable VMDQ loopback */
195         if (on)
196                 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
197         else
198                 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
199
200         IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
201
202         return 0;
203 }
204
205 int
206 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
207 {
208         struct ixgbe_hw *hw;
209         uint32_t reg_value;
210         int i;
211         int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
212         struct rte_eth_dev *dev;
213
214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
215
216         dev = &rte_eth_devices[port];
217
218         if (!is_ixgbe_supported(dev))
219                 return -ENOTSUP;
220
221         if (on > 1)
222                 return -EINVAL;
223
224         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
225         for (i = 0; i <= num_queues; i++) {
226                 reg_value = IXGBE_QDE_WRITE |
227                                 (i << IXGBE_QDE_IDX_SHIFT) |
228                                 (on & IXGBE_QDE_ENABLE);
229                 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
230         }
231
232         return 0;
233 }
234
235 int
236 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
237 {
238         struct ixgbe_hw *hw;
239         uint32_t reg_value;
240         struct rte_eth_dev *dev;
241         struct rte_pci_device *pci_dev;
242
243         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
244
245         dev = &rte_eth_devices[port];
246         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
247
248         if (!is_ixgbe_supported(dev))
249                 return -ENOTSUP;
250
251         /* only support VF's 0 to 63 */
252         if ((vf >= pci_dev->max_vfs) || (vf > 63))
253                 return -EINVAL;
254
255         if (on > 1)
256                 return -EINVAL;
257
258         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
259         reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
260         if (on)
261                 reg_value |= IXGBE_SRRCTL_DROP_EN;
262         else
263                 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
264
265         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
266
267         return 0;
268 }
269
270 int
271 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
272 {
273         struct rte_eth_dev *dev;
274         struct rte_pci_device *pci_dev;
275         struct ixgbe_hw *hw;
276         uint16_t queues_per_pool;
277         uint32_t q;
278
279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
280
281         dev = &rte_eth_devices[port];
282         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
283         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
284
285         if (!is_ixgbe_supported(dev))
286                 return -ENOTSUP;
287
288         if (vf >= pci_dev->max_vfs)
289                 return -EINVAL;
290
291         if (on > 1)
292                 return -EINVAL;
293
294         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
295
296         /* The PF has 128 queue pairs and in SRIOV configuration
297          * those queues will be assigned to VF's, so RXDCTL
298          * registers will be dealing with queues which will be
299          * assigned to VF's.
300          * Let's say we have SRIOV configured with 31 VF's then the
301          * first 124 queues 0-123 will be allocated to VF's and only
302          * the last 4 queues 123-127 will be assigned to the PF.
303          */
304         if (hw->mac.type == ixgbe_mac_82598EB)
305                 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
306                                   ETH_16_POOLS;
307         else
308                 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
309                                   ETH_64_POOLS;
310
311         for (q = 0; q < queues_per_pool; q++)
312                 (*dev->dev_ops->vlan_strip_queue_set)(dev,
313                                 q + vf * queues_per_pool, on);
314         return 0;
315 }
316
317 int
318 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
319                             uint16_t rx_mask, uint8_t on)
320 {
321         int val = 0;
322         struct rte_eth_dev *dev;
323         struct rte_pci_device *pci_dev;
324         struct ixgbe_hw *hw;
325         uint32_t vmolr;
326
327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
328
329         dev = &rte_eth_devices[port];
330         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
331
332         if (!is_ixgbe_supported(dev))
333                 return -ENOTSUP;
334
335         if (vf >= pci_dev->max_vfs)
336                 return -EINVAL;
337
338         if (on > 1)
339                 return -EINVAL;
340
341         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
342         vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
343
344         if (hw->mac.type == ixgbe_mac_82598EB) {
345                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
346                              " on 82599 hardware and newer");
347                 return -ENOTSUP;
348         }
349         if (ixgbe_vt_check(hw) < 0)
350                 return -ENOTSUP;
351
352         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
353
354         if (on)
355                 vmolr |= val;
356         else
357                 vmolr &= ~val;
358
359         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
360
361         return 0;
362 }
363
364 int
365 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
366 {
367         struct rte_eth_dev *dev;
368         struct rte_pci_device *pci_dev;
369         uint32_t reg, addr;
370         uint32_t val;
371         const uint8_t bit1 = 0x1;
372         struct ixgbe_hw *hw;
373
374         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
375
376         dev = &rte_eth_devices[port];
377         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
378
379         if (!is_ixgbe_supported(dev))
380                 return -ENOTSUP;
381
382         if (vf >= pci_dev->max_vfs)
383                 return -EINVAL;
384
385         if (on > 1)
386                 return -EINVAL;
387
388         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
389
390         if (ixgbe_vt_check(hw) < 0)
391                 return -ENOTSUP;
392
393         /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
394         if (vf >= 32) {
395                 addr = IXGBE_VFRE(1);
396                 val = bit1 << (vf - 32);
397         } else {
398                 addr = IXGBE_VFRE(0);
399                 val = bit1 << vf;
400         }
401
402         reg = IXGBE_READ_REG(hw, addr);
403
404         if (on)
405                 reg |= val;
406         else
407                 reg &= ~val;
408
409         IXGBE_WRITE_REG(hw, addr, reg);
410
411         return 0;
412 }
413
414 int
415 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
416 {
417         struct rte_eth_dev *dev;
418         struct rte_pci_device *pci_dev;
419         uint32_t reg, addr;
420         uint32_t val;
421         const uint8_t bit1 = 0x1;
422
423         struct ixgbe_hw *hw;
424
425         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
426
427         dev = &rte_eth_devices[port];
428         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
429
430         if (!is_ixgbe_supported(dev))
431                 return -ENOTSUP;
432
433         if (vf >= pci_dev->max_vfs)
434                 return -EINVAL;
435
436         if (on > 1)
437                 return -EINVAL;
438
439         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440         if (ixgbe_vt_check(hw) < 0)
441                 return -ENOTSUP;
442
443         /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
444         if (vf >= 32) {
445                 addr = IXGBE_VFTE(1);
446                 val = bit1 << (vf - 32);
447         } else {
448                 addr = IXGBE_VFTE(0);
449                 val = bit1 << vf;
450         }
451
452         reg = IXGBE_READ_REG(hw, addr);
453
454         if (on)
455                 reg |= val;
456         else
457                 reg &= ~val;
458
459         IXGBE_WRITE_REG(hw, addr, reg);
460
461         return 0;
462 }
463
464 int
465 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
466                                  uint64_t vf_mask, uint8_t vlan_on)
467 {
468         struct rte_eth_dev *dev;
469         int ret = 0;
470         uint16_t vf_idx;
471         struct ixgbe_hw *hw;
472
473         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
474
475         dev = &rte_eth_devices[port];
476
477         if (!is_ixgbe_supported(dev))
478                 return -ENOTSUP;
479
480         if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)
481                 return -EINVAL;
482
483         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484         if (ixgbe_vt_check(hw) < 0)
485                 return -ENOTSUP;
486
487         for (vf_idx = 0; vf_idx < 64; vf_idx++) {
488                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
489                         ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
490                                                    vlan_on, false);
491                         if (ret < 0)
492                                 return ret;
493                 }
494         }
495
496         return ret;
497 }
498
499 int
500 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
501                                 uint16_t tx_rate, uint64_t q_msk)
502 {
503         struct rte_eth_dev *dev;
504
505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
506
507         dev = &rte_eth_devices[port];
508
509         if (!is_ixgbe_supported(dev))
510                 return -ENOTSUP;
511
512         return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
513 }
514
515 int
516 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
517 {
518         struct ixgbe_hw *hw;
519         struct rte_eth_dev *dev;
520         uint32_t ctrl;
521
522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
523
524         dev = &rte_eth_devices[port];
525
526         if (!is_ixgbe_supported(dev))
527                 return -ENOTSUP;
528
529         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
530
531         /* Stop the data paths */
532         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
533                 return -ENOTSUP;
534         /**
535          * Workaround:
536          * As no ixgbe_disable_sec_rx_path equivalent is
537          * implemented for tx in the base code, and we are
538          * not allowed to modify the base code in DPDK, so
539          * just call the hand-written one directly for now.
540          * The hardware support has been checked by
541          * ixgbe_disable_sec_rx_path().
542          */
543         ixgbe_disable_sec_tx_path_generic(hw);
544
545         /* Enable Ethernet CRC (required by MACsec offload) */
546         ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
547         ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
548         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
549
550         /* Enable the TX and RX crypto engines */
551         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
552         ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
553         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
554
555         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
556         ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
557         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
558
559         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
560         ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
561         ctrl |= 0x3;
562         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
563
564         /* Enable SA lookup */
565         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
566         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
567         ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
568                      IXGBE_LSECTXCTRL_AUTH;
569         ctrl |= IXGBE_LSECTXCTRL_AISCI;
570         ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
571         ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
572         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
573
574         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
575         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
576         ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
577         ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
578         if (rp)
579                 ctrl |= IXGBE_LSECRXCTRL_RP;
580         else
581                 ctrl &= ~IXGBE_LSECRXCTRL_RP;
582         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
583
584         /* Start the data paths */
585         ixgbe_enable_sec_rx_path(hw);
586         /**
587          * Workaround:
588          * As no ixgbe_enable_sec_rx_path equivalent is
589          * implemented for tx in the base code, and we are
590          * not allowed to modify the base code in DPDK, so
591          * just call the hand-written one directly for now.
592          */
593         ixgbe_enable_sec_tx_path_generic(hw);
594
595         return 0;
596 }
597
598 int
599 rte_pmd_ixgbe_macsec_disable(uint16_t port)
600 {
601         struct ixgbe_hw *hw;
602         struct rte_eth_dev *dev;
603         uint32_t ctrl;
604
605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
606
607         dev = &rte_eth_devices[port];
608
609         if (!is_ixgbe_supported(dev))
610                 return -ENOTSUP;
611
612         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
613
614         /* Stop the data paths */
615         if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
616                 return -ENOTSUP;
617         /**
618          * Workaround:
619          * As no ixgbe_disable_sec_rx_path equivalent is
620          * implemented for tx in the base code, and we are
621          * not allowed to modify the base code in DPDK, so
622          * just call the hand-written one directly for now.
623          * The hardware support has been checked by
624          * ixgbe_disable_sec_rx_path().
625          */
626         ixgbe_disable_sec_tx_path_generic(hw);
627
628         /* Disable the TX and RX crypto engines */
629         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
630         ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
631         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
632
633         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
634         ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
635         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
636
637         /* Disable SA lookup */
638         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
639         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
640         ctrl |= IXGBE_LSECTXCTRL_DISABLE;
641         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
642
643         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
644         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
645         ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
646         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
647
648         /* Start the data paths */
649         ixgbe_enable_sec_rx_path(hw);
650         /**
651          * Workaround:
652          * As no ixgbe_enable_sec_rx_path equivalent is
653          * implemented for tx in the base code, and we are
654          * not allowed to modify the base code in DPDK, so
655          * just call the hand-written one directly for now.
656          */
657         ixgbe_enable_sec_tx_path_generic(hw);
658
659         return 0;
660 }
661
662 int
663 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
664 {
665         struct ixgbe_hw *hw;
666         struct rte_eth_dev *dev;
667         uint32_t ctrl;
668
669         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
670
671         dev = &rte_eth_devices[port];
672
673         if (!is_ixgbe_supported(dev))
674                 return -ENOTSUP;
675
676         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
677
678         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
679         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
680
681         ctrl = mac[4] | (mac[5] << 8);
682         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
683
684         return 0;
685 }
686
687 int
688 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
689 {
690         struct ixgbe_hw *hw;
691         struct rte_eth_dev *dev;
692         uint32_t ctrl;
693
694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
695
696         dev = &rte_eth_devices[port];
697
698         if (!is_ixgbe_supported(dev))
699                 return -ENOTSUP;
700
701         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
702
703         ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
704         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
705
706         pi = rte_cpu_to_be_16(pi);
707         ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
708         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
709
710         return 0;
711 }
712
713 int
714 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
715                                  uint32_t pn, uint8_t *key)
716 {
717         struct ixgbe_hw *hw;
718         struct rte_eth_dev *dev;
719         uint32_t ctrl, i;
720
721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
722
723         dev = &rte_eth_devices[port];
724
725         if (!is_ixgbe_supported(dev))
726                 return -ENOTSUP;
727
728         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
729
730         if (idx != 0 && idx != 1)
731                 return -EINVAL;
732
733         if (an >= 4)
734                 return -EINVAL;
735
736         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
737
738         /* Set the PN and key */
739         pn = rte_cpu_to_be_32(pn);
740         if (idx == 0) {
741                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
742
743                 for (i = 0; i < 4; i++) {
744                         ctrl = (key[i * 4 + 0] <<  0) |
745                                (key[i * 4 + 1] <<  8) |
746                                (key[i * 4 + 2] << 16) |
747                                (key[i * 4 + 3] << 24);
748                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
749                 }
750         } else {
751                 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
752
753                 for (i = 0; i < 4; i++) {
754                         ctrl = (key[i * 4 + 0] <<  0) |
755                                (key[i * 4 + 1] <<  8) |
756                                (key[i * 4 + 2] << 16) |
757                                (key[i * 4 + 3] << 24);
758                         IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
759                 }
760         }
761
762         /* Set AN and select the SA */
763         ctrl = (an << idx * 2) | (idx << 4);
764         IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
765
766         return 0;
767 }
768
769 int
770 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
771                                  uint32_t pn, uint8_t *key)
772 {
773         struct ixgbe_hw *hw;
774         struct rte_eth_dev *dev;
775         uint32_t ctrl, i;
776
777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
778
779         dev = &rte_eth_devices[port];
780
781         if (!is_ixgbe_supported(dev))
782                 return -ENOTSUP;
783
784         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785
786         if (idx != 0 && idx != 1)
787                 return -EINVAL;
788
789         if (an >= 4)
790                 return -EINVAL;
791
792         /* Set the PN */
793         pn = rte_cpu_to_be_32(pn);
794         IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
795
796         /* Set the key */
797         for (i = 0; i < 4; i++) {
798                 ctrl = (key[i * 4 + 0] <<  0) |
799                        (key[i * 4 + 1] <<  8) |
800                        (key[i * 4 + 2] << 16) |
801                        (key[i * 4 + 3] << 24);
802                 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
803         }
804
805         /* Set the AN and validate the SA */
806         ctrl = an | (1 << 2);
807         IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
808
809         return 0;
810 }
811
812 int
813 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
814                               uint8_t tc_num,
815                               uint8_t *bw_weight)
816 {
817         struct rte_eth_dev *dev;
818         struct ixgbe_dcb_config *dcb_config;
819         struct ixgbe_dcb_tc_config *tc;
820         struct rte_eth_conf *eth_conf;
821         struct ixgbe_bw_conf *bw_conf;
822         uint8_t i;
823         uint8_t nb_tcs;
824         uint16_t sum;
825
826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
827
828         dev = &rte_eth_devices[port];
829
830         if (!is_ixgbe_supported(dev))
831                 return -ENOTSUP;
832
833         if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
834                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
835                             IXGBE_DCB_MAX_TRAFFIC_CLASS);
836                 return -EINVAL;
837         }
838
839         dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
840         bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
841         eth_conf = &dev->data->dev_conf;
842
843         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
844                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
845         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
846                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
847                     ETH_32_POOLS)
848                         nb_tcs = ETH_4_TCS;
849                 else
850                         nb_tcs = ETH_8_TCS;
851         } else {
852                 nb_tcs = 1;
853         }
854
855         if (nb_tcs != tc_num) {
856                 PMD_DRV_LOG(ERR,
857                             "Weight should be set for all %d enabled TCs.",
858                             nb_tcs);
859                 return -EINVAL;
860         }
861
862         sum = 0;
863         for (i = 0; i < nb_tcs; i++)
864                 sum += bw_weight[i];
865         if (sum != 100) {
866                 PMD_DRV_LOG(ERR,
867                             "The summary of the TC weight should be 100.");
868                 return -EINVAL;
869         }
870
871         for (i = 0; i < nb_tcs; i++) {
872                 tc = &dcb_config->tc_config[i];
873                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
874         }
875         for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
876                 tc = &dcb_config->tc_config[i];
877                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
878         }
879
880         bw_conf->tc_num = nb_tcs;
881
882         return 0;
883 }
884
885 int
886 rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
887 {
888         struct ixgbe_hw *hw;
889         struct rte_eth_dev *dev;
890         uint32_t fctrl;
891
892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
893         dev = &rte_eth_devices[port];
894         if (!is_ixgbe_supported(dev))
895                 return -ENOTSUP;
896
897         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
898         if (!hw)
899                 return -ENOTSUP;
900
901         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
902
903         /* If 'enable' set the SBP bit else clear it */
904         if (enable)
905                 fctrl |= IXGBE_FCTRL_SBP;
906         else
907                 fctrl &= ~(IXGBE_FCTRL_SBP);
908
909         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
910         return 0;
911 }
912
913 #ifdef RTE_LIBRTE_IXGBE_BYPASS
914 int
915 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
916 {
917         struct rte_eth_dev *dev;
918
919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
920
921         dev = &rte_eth_devices[port_id];
922         if (!is_ixgbe_supported(dev))
923                 return -ENOTSUP;
924
925         ixgbe_bypass_init(dev);
926         return 0;
927 }
928
929 int
930 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
931 {
932         struct rte_eth_dev *dev;
933
934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
935
936         dev = &rte_eth_devices[port_id];
937         if (!is_ixgbe_supported(dev))
938                 return -ENOTSUP;
939
940         return ixgbe_bypass_state_show(dev, state);
941 }
942
943 int
944 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
945 {
946         struct rte_eth_dev *dev;
947
948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
949
950         dev = &rte_eth_devices[port_id];
951         if (!is_ixgbe_supported(dev))
952                 return -ENOTSUP;
953
954         return ixgbe_bypass_state_store(dev, new_state);
955 }
956
957 int
958 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
959                                 uint32_t event,
960                                 uint32_t *state)
961 {
962         struct rte_eth_dev *dev;
963
964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
965
966         dev = &rte_eth_devices[port_id];
967         if (!is_ixgbe_supported(dev))
968                 return -ENOTSUP;
969
970         return ixgbe_bypass_event_show(dev, event, state);
971 }
972
973 int
974 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
975                                  uint32_t event,
976                                  uint32_t state)
977 {
978         struct rte_eth_dev *dev;
979
980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
981
982         dev = &rte_eth_devices[port_id];
983         if (!is_ixgbe_supported(dev))
984                 return -ENOTSUP;
985
986         return ixgbe_bypass_event_store(dev, event, state);
987 }
988
989 int
990 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
991 {
992         struct rte_eth_dev *dev;
993
994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
995
996         dev = &rte_eth_devices[port_id];
997         if (!is_ixgbe_supported(dev))
998                 return -ENOTSUP;
999
1000         return ixgbe_bypass_wd_timeout_store(dev, timeout);
1001 }
1002
1003 int
1004 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
1005 {
1006         struct rte_eth_dev *dev;
1007
1008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1009
1010         dev = &rte_eth_devices[port_id];
1011         if (!is_ixgbe_supported(dev))
1012                 return -ENOTSUP;
1013
1014         return ixgbe_bypass_ver_show(dev, ver);
1015 }
1016
1017 int
1018 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
1019 {
1020         struct rte_eth_dev *dev;
1021
1022         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1023
1024         dev = &rte_eth_devices[port_id];
1025         if (!is_ixgbe_supported(dev))
1026                 return -ENOTSUP;
1027
1028         return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
1029 }
1030
1031 int
1032 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
1033 {
1034         struct rte_eth_dev *dev;
1035
1036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1037
1038         dev = &rte_eth_devices[port_id];
1039         if (!is_ixgbe_supported(dev))
1040                 return -ENOTSUP;
1041
1042         return ixgbe_bypass_wd_reset(dev);
1043 }
1044 #endif
1045
1046 /**
1047  *  rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
1048  *  @hw: pointer to hardware structure
1049  *  @mask: Mask to specify which semaphore to acquire
1050  *
1051  *  Acquires the SWFW semaphore and get the shared phy token as needed
1052  */
1053 STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
1054 {
1055         int retries = FW_PHY_TOKEN_RETRIES;
1056         s32 status = IXGBE_SUCCESS;
1057
1058         while (--retries) {
1059                 status = ixgbe_acquire_swfw_semaphore(hw, mask);
1060                 if (status) {
1061                         PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
1062                                     status);
1063                         return status;
1064                 }
1065                 status = ixgbe_get_phy_token(hw);
1066                 if (status == IXGBE_SUCCESS)
1067                         return IXGBE_SUCCESS;
1068
1069                 if (status == IXGBE_ERR_TOKEN_RETRY)
1070                         PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
1071                                     status);
1072
1073                 ixgbe_release_swfw_semaphore(hw, mask);
1074                 if (status != IXGBE_ERR_TOKEN_RETRY) {
1075                         PMD_DRV_LOG(ERR,
1076                                     "Retry get PHY token failed, Status=%d\n",
1077                                     status);
1078                         return status;
1079                 }
1080         }
1081         PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
1082                     hw->phy.id);
1083         return status;
1084 }
1085
1086 /**
1087  *  rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
1088  *  @hw: pointer to hardware structure
1089  *  @mask: Mask to specify which semaphore to release
1090  *
1091  *  Releases the SWFW semaphore and puts the shared phy token as needed
1092  */
1093 STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
1094 {
1095         ixgbe_put_phy_token(hw);
1096         ixgbe_release_swfw_semaphore(hw, mask);
1097 }
1098
1099 int
1100 rte_pmd_ixgbe_mdio_lock(uint16_t port)
1101 {
1102         struct ixgbe_hw *hw;
1103         struct rte_eth_dev *dev;
1104         u32 swfw_mask;
1105
1106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1107         dev = &rte_eth_devices[port];
1108         if (!is_ixgbe_supported(dev))
1109                 return -ENOTSUP;
1110
1111         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1112         if (!hw)
1113                 return -ENOTSUP;
1114
1115         if (hw->bus.lan_id)
1116                 swfw_mask = IXGBE_GSSR_PHY1_SM;
1117         else
1118                 swfw_mask = IXGBE_GSSR_PHY0_SM;
1119
1120         if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
1121                 return IXGBE_ERR_SWFW_SYNC;
1122
1123         return IXGBE_SUCCESS;
1124 }
1125
1126 int
1127 rte_pmd_ixgbe_mdio_unlock(uint16_t port)
1128 {
1129         struct rte_eth_dev *dev;
1130         struct ixgbe_hw *hw;
1131         u32 swfw_mask;
1132
1133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1134
1135         dev = &rte_eth_devices[port];
1136         if (!is_ixgbe_supported(dev))
1137                 return -ENOTSUP;
1138
1139         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1140         if (!hw)
1141                 return -ENOTSUP;
1142
1143         if (hw->bus.lan_id)
1144                 swfw_mask = IXGBE_GSSR_PHY1_SM;
1145         else
1146                 swfw_mask = IXGBE_GSSR_PHY0_SM;
1147
1148         rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
1149
1150         return IXGBE_SUCCESS;
1151 }
1152
1153 int
1154 rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
1155                                  uint32_t dev_type, uint16_t *phy_data)
1156 {
1157         struct ixgbe_hw *hw;
1158         struct rte_eth_dev *dev;
1159         u32 i, data, command;
1160
1161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1162         dev = &rte_eth_devices[port];
1163         if (!is_ixgbe_supported(dev))
1164                 return -ENOTSUP;
1165
1166         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167         if (!hw)
1168                 return -ENOTSUP;
1169
1170         /* Setup and write the read command */
1171         command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1172                   (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1173                   IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
1174                   IXGBE_MSCA_MDI_COMMAND;
1175
1176         IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1177
1178         /* Check every 10 usec to see if the access completed.
1179          * The MDI Command bit will clear when the operation is
1180          * complete
1181          */
1182         for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1183                 usec_delay(10);
1184
1185                 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1186                 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1187                         break;
1188         }
1189         if (command & IXGBE_MSCA_MDI_COMMAND)
1190                 return IXGBE_ERR_PHY;
1191
1192         /* Read operation is complete.  Get the data from MSRWD */
1193         data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
1194         data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
1195         *phy_data = (u16)data;
1196
1197         return 0;
1198 }
1199
1200 int
1201 rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
1202                                   uint32_t dev_type, uint16_t phy_data)
1203 {
1204         struct ixgbe_hw *hw;
1205         u32 i, command;
1206         struct rte_eth_dev *dev;
1207
1208         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1209         dev = &rte_eth_devices[port];
1210         if (!is_ixgbe_supported(dev))
1211                 return -ENOTSUP;
1212
1213         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214         if (!hw)
1215                 return -ENOTSUP;
1216
1217         /* Put the data in the MDI single read and write data register*/
1218         IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
1219
1220         /* Setup and write the write command */
1221         command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1222                   (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1223                   IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
1224                   IXGBE_MSCA_MDI_COMMAND;
1225
1226         IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1227
1228         /* Check every 10 usec to see if the access completed.
1229          * The MDI Command bit will clear when the operation is
1230          * complete
1231          */
1232         for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1233                 usec_delay(10);
1234
1235                 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1236                 if (!(command & IXGBE_MSCA_MDI_COMMAND))
1237                         break;
1238         }
1239         if (command & IXGBE_MSCA_MDI_COMMAND) {
1240                 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1241                               "PHY write cmd didn't complete\n");
1242                 return IXGBE_ERR_PHY;
1243         }
1244         return 0;
1245 }