net/bnxt: support Thor WC TCAM
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <rte_malloc.h>
7 #include <rte_tailq.h>
8
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_dcb.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_pf.h"
13 #include "i40e_rxtx.h"
14 #include "rte_pmd_i40e.h"
15
16 int
17 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
18 {
19         struct rte_eth_dev *dev;
20         struct i40e_pf *pf;
21
22         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
23
24         dev = &rte_eth_devices[port];
25
26         if (!is_i40e_supported(dev))
27                 return -ENOTSUP;
28
29         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
30
31         if (vf >= pf->vf_num || !pf->vfs) {
32                 PMD_DRV_LOG(ERR, "Invalid argument.");
33                 return -EINVAL;
34         }
35
36         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
37
38         return 0;
39 }
40
41 int
42 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
43 {
44         struct rte_eth_dev *dev;
45         struct i40e_pf *pf;
46         struct i40e_vsi *vsi;
47         struct i40e_hw *hw;
48         struct i40e_vsi_context ctxt;
49         int ret;
50
51         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52
53         dev = &rte_eth_devices[port];
54
55         if (!is_i40e_supported(dev))
56                 return -ENOTSUP;
57
58         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59
60         if (vf_id >= pf->vf_num || !pf->vfs) {
61                 PMD_DRV_LOG(ERR, "Invalid argument.");
62                 return -EINVAL;
63         }
64
65         vsi = pf->vfs[vf_id].vsi;
66         if (!vsi) {
67                 PMD_DRV_LOG(ERR, "Invalid VSI.");
68                 return -EINVAL;
69         }
70
71         /* Check if it has been already on or off */
72         if (vsi->info.valid_sections &
73                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
74                 if (on) {
75                         if ((vsi->info.sec_flags &
76                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
77                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
78                                 return 0; /* already on */
79                 } else {
80                         if ((vsi->info.sec_flags &
81                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
82                                 return 0; /* already off */
83                 }
84         }
85
86         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
87         if (on)
88                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
89         else
90                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
91
92         memset(&ctxt, 0, sizeof(ctxt));
93         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
94         ctxt.seid = vsi->seid;
95
96         hw = I40E_VSI_TO_HW(vsi);
97         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
98         if (ret != I40E_SUCCESS) {
99                 ret = -ENOTSUP;
100                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
101         }
102
103         return ret;
104 }
105
106 static int
107 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
108 {
109         uint32_t j, k;
110         uint16_t vlan_id;
111         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
112         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
113         int ret;
114
115         for (j = 0; j < I40E_VFTA_SIZE; j++) {
116                 if (!vsi->vfta[j])
117                         continue;
118
119                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
120                         if (!(vsi->vfta[j] & (1 << k)))
121                                 continue;
122
123                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
124                         if (!vlan_id)
125                                 continue;
126
127                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
128                         if (add)
129                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
130                                                        &vlan_data, 1, NULL);
131                         else
132                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
133                                                           &vlan_data, 1, NULL);
134                         if (ret != I40E_SUCCESS) {
135                                 PMD_DRV_LOG(ERR,
136                                             "Failed to add/rm vlan filter");
137                                 return ret;
138                         }
139                 }
140         }
141
142         return I40E_SUCCESS;
143 }
144
145 int
146 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
147 {
148         struct rte_eth_dev *dev;
149         struct i40e_pf *pf;
150         struct i40e_vsi *vsi;
151         struct i40e_hw *hw;
152         struct i40e_vsi_context ctxt;
153         int ret;
154
155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
156
157         dev = &rte_eth_devices[port];
158
159         if (!is_i40e_supported(dev))
160                 return -ENOTSUP;
161
162         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
163
164         if (vf_id >= pf->vf_num || !pf->vfs) {
165                 PMD_DRV_LOG(ERR, "Invalid argument.");
166                 return -EINVAL;
167         }
168
169         vsi = pf->vfs[vf_id].vsi;
170         if (!vsi) {
171                 PMD_DRV_LOG(ERR, "Invalid VSI.");
172                 return -EINVAL;
173         }
174
175         /* Check if it has been already on or off */
176         if (vsi->vlan_anti_spoof_on == on)
177                 return 0; /* already on or off */
178
179         vsi->vlan_anti_spoof_on = on;
180         if (!vsi->vlan_filter_on) {
181                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
182                 if (ret) {
183                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
184                         return -ENOTSUP;
185                 }
186         }
187
188         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
189         if (on)
190                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
191         else
192                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
193
194         memset(&ctxt, 0, sizeof(ctxt));
195         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
196         ctxt.seid = vsi->seid;
197
198         hw = I40E_VSI_TO_HW(vsi);
199         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
200         if (ret != I40E_SUCCESS) {
201                 ret = -ENOTSUP;
202                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
203         }
204
205         return ret;
206 }
207
208 static int
209 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
210 {
211         struct i40e_mac_filter *f;
212         struct i40e_macvlan_filter *mv_f;
213         int i, vlan_num;
214         enum i40e_mac_filter_type filter_type;
215         int ret = I40E_SUCCESS;
216         void *temp;
217
218         /* remove all the MACs */
219         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
220                 vlan_num = vsi->vlan_num;
221                 filter_type = f->mac_info.filter_type;
222                 if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
223                     filter_type == I40E_MACVLAN_HASH_MATCH) {
224                         if (vlan_num == 0) {
225                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
226                                 return I40E_ERR_PARAM;
227                         }
228                 } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
229                            filter_type == I40E_MAC_HASH_MATCH)
230                         vlan_num = 1;
231
232                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
233                 if (!mv_f) {
234                         PMD_DRV_LOG(ERR, "failed to allocate memory");
235                         return I40E_ERR_NO_MEMORY;
236                 }
237
238                 for (i = 0; i < vlan_num; i++) {
239                         mv_f[i].filter_type = filter_type;
240                         rte_memcpy(&mv_f[i].macaddr,
241                                          &f->mac_info.mac_addr,
242                                          ETH_ADDR_LEN);
243                 }
244                 if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
245                     filter_type == I40E_MACVLAN_HASH_MATCH) {
246                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
247                                                          &f->mac_info.mac_addr);
248                         if (ret != I40E_SUCCESS) {
249                                 rte_free(mv_f);
250                                 return ret;
251                         }
252                 }
253
254                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
255                 if (ret != I40E_SUCCESS) {
256                         rte_free(mv_f);
257                         return ret;
258                 }
259
260                 rte_free(mv_f);
261                 ret = I40E_SUCCESS;
262         }
263
264         return ret;
265 }
266
267 static int
268 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
269 {
270         struct i40e_mac_filter *f;
271         struct i40e_macvlan_filter *mv_f;
272         int i, vlan_num = 0;
273         int ret = I40E_SUCCESS;
274         void *temp;
275
276         /* restore all the MACs */
277         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
278                 if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH ||
279                     f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) {
280                         /**
281                          * If vlan_num is 0, that's the first time to add mac,
282                          * set mask for vlan_id 0.
283                          */
284                         if (vsi->vlan_num == 0) {
285                                 i40e_set_vlan_filter(vsi, 0, 1);
286                                 vsi->vlan_num = 1;
287                         }
288                         vlan_num = vsi->vlan_num;
289                 } else if (f->mac_info.filter_type == I40E_MAC_PERFECT_MATCH ||
290                            f->mac_info.filter_type == I40E_MAC_HASH_MATCH)
291                         vlan_num = 1;
292
293                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
294                 if (!mv_f) {
295                         PMD_DRV_LOG(ERR, "failed to allocate memory");
296                         return I40E_ERR_NO_MEMORY;
297                 }
298
299                 for (i = 0; i < vlan_num; i++) {
300                         mv_f[i].filter_type = f->mac_info.filter_type;
301                         rte_memcpy(&mv_f[i].macaddr,
302                                          &f->mac_info.mac_addr,
303                                          ETH_ADDR_LEN);
304                 }
305
306                 if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH ||
307                     f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) {
308                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
309                                                          &f->mac_info.mac_addr);
310                         if (ret != I40E_SUCCESS) {
311                                 rte_free(mv_f);
312                                 return ret;
313                         }
314                 }
315
316                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
317                 if (ret != I40E_SUCCESS) {
318                         rte_free(mv_f);
319                         return ret;
320                 }
321
322                 rte_free(mv_f);
323                 ret = I40E_SUCCESS;
324         }
325
326         return ret;
327 }
328
329 static int
330 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
331 {
332         struct i40e_vsi_context ctxt;
333         struct i40e_hw *hw;
334         int ret;
335
336         if (!vsi)
337                 return -EINVAL;
338
339         hw = I40E_VSI_TO_HW(vsi);
340
341         /* Use the FW API if FW >= v5.0 */
342         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
343                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
344                 return -ENOTSUP;
345         }
346
347         /* Check if it has been already on or off */
348         if (vsi->info.valid_sections &
349                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
350                 if (on) {
351                         if ((vsi->info.switch_id &
352                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
353                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
354                                 return 0; /* already on */
355                 } else {
356                         if ((vsi->info.switch_id &
357                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
358                                 return 0; /* already off */
359                 }
360         }
361
362         /* remove all the MAC and VLAN first */
363         ret = i40e_vsi_rm_mac_filter(vsi);
364         if (ret) {
365                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
366                 return ret;
367         }
368         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
369                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
370                 if (ret) {
371                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
372                         return ret;
373                 }
374         }
375
376         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
377         if (on)
378                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
379         else
380                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
381
382         memset(&ctxt, 0, sizeof(ctxt));
383         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
384         ctxt.seid = vsi->seid;
385
386         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
387         if (ret != I40E_SUCCESS) {
388                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
389                 return ret;
390         }
391
392         /* add all the MAC and VLAN back */
393         ret = i40e_vsi_restore_mac_filter(vsi);
394         if (ret)
395                 return ret;
396         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
398                 if (ret)
399                         return ret;
400         }
401
402         return ret;
403 }
404
405 int
406 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
407 {
408         struct rte_eth_dev *dev;
409         struct i40e_pf *pf;
410         struct i40e_pf_vf *vf;
411         struct i40e_vsi *vsi;
412         uint16_t vf_id;
413         int ret;
414
415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
416
417         dev = &rte_eth_devices[port];
418
419         if (!is_i40e_supported(dev))
420                 return -ENOTSUP;
421
422         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
423
424         /* setup PF TX loopback */
425         vsi = pf->main_vsi;
426         ret = i40e_vsi_set_tx_loopback(vsi, on);
427         if (ret)
428                 return -ENOTSUP;
429
430         /* setup TX loopback for all the VFs */
431         if (!pf->vfs) {
432                 /* if no VF, do nothing. */
433                 return 0;
434         }
435
436         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
437                 vf = &pf->vfs[vf_id];
438                 vsi = vf->vsi;
439
440                 ret = i40e_vsi_set_tx_loopback(vsi, on);
441                 if (ret)
442                         return -ENOTSUP;
443         }
444
445         return ret;
446 }
447
448 int
449 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
450 {
451         struct rte_eth_dev *dev;
452         struct i40e_pf *pf;
453         struct i40e_vsi *vsi;
454         struct i40e_hw *hw;
455         int ret;
456
457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
458
459         dev = &rte_eth_devices[port];
460
461         if (!is_i40e_supported(dev))
462                 return -ENOTSUP;
463
464         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
465
466         if (vf_id >= pf->vf_num || !pf->vfs) {
467                 PMD_DRV_LOG(ERR, "Invalid argument.");
468                 return -EINVAL;
469         }
470
471         vsi = pf->vfs[vf_id].vsi;
472         if (!vsi) {
473                 PMD_DRV_LOG(ERR, "Invalid VSI.");
474                 return -EINVAL;
475         }
476
477         hw = I40E_VSI_TO_HW(vsi);
478
479         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
480                                                   on, NULL, true);
481         if (ret != I40E_SUCCESS) {
482                 ret = -ENOTSUP;
483                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
484         }
485
486         return ret;
487 }
488
489 int
490 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
491 {
492         struct rte_eth_dev *dev;
493         struct i40e_pf *pf;
494         struct i40e_vsi *vsi;
495         struct i40e_hw *hw;
496         int ret;
497
498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
499
500         dev = &rte_eth_devices[port];
501
502         if (!is_i40e_supported(dev))
503                 return -ENOTSUP;
504
505         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
506
507         if (vf_id >= pf->vf_num || !pf->vfs) {
508                 PMD_DRV_LOG(ERR, "Invalid argument.");
509                 return -EINVAL;
510         }
511
512         vsi = pf->vfs[vf_id].vsi;
513         if (!vsi) {
514                 PMD_DRV_LOG(ERR, "Invalid VSI.");
515                 return -EINVAL;
516         }
517
518         hw = I40E_VSI_TO_HW(vsi);
519
520         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
521                                                     on, NULL);
522         if (ret != I40E_SUCCESS) {
523                 ret = -ENOTSUP;
524                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
525         }
526
527         return ret;
528 }
529
530 int
531 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
532                              struct rte_ether_addr *mac_addr)
533 {
534         struct i40e_mac_filter *f;
535         struct rte_eth_dev *dev;
536         struct i40e_pf_vf *vf;
537         struct i40e_vsi *vsi;
538         struct i40e_pf *pf;
539         void *temp;
540
541         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
542                 return -EINVAL;
543
544         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
545
546         dev = &rte_eth_devices[port];
547
548         if (!is_i40e_supported(dev))
549                 return -ENOTSUP;
550
551         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
552
553         if (vf_id >= pf->vf_num || !pf->vfs)
554                 return -EINVAL;
555
556         vf = &pf->vfs[vf_id];
557         vsi = vf->vsi;
558         if (!vsi) {
559                 PMD_DRV_LOG(ERR, "Invalid VSI.");
560                 return -EINVAL;
561         }
562
563         rte_ether_addr_copy(mac_addr, &vf->mac_addr);
564
565         /* Remove all existing mac */
566         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
567                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
568                                 != I40E_SUCCESS)
569                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
570
571         return 0;
572 }
573
574 static const struct rte_ether_addr null_mac_addr;
575
576 int
577 rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
578         struct rte_ether_addr *mac_addr)
579 {
580         struct rte_eth_dev *dev;
581         struct i40e_pf_vf *vf;
582         struct i40e_vsi *vsi;
583         struct i40e_pf *pf;
584         int ret;
585
586         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
587                 return -EINVAL;
588
589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
590
591         dev = &rte_eth_devices[port];
592
593         if (!is_i40e_supported(dev))
594                 return -ENOTSUP;
595
596         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
597
598         if (vf_id >= pf->vf_num || !pf->vfs)
599                 return -EINVAL;
600
601         vf = &pf->vfs[vf_id];
602         vsi = vf->vsi;
603         if (!vsi) {
604                 PMD_DRV_LOG(ERR, "Invalid VSI.");
605                 return -EINVAL;
606         }
607
608         if (rte_is_same_ether_addr(mac_addr, &vf->mac_addr))
609                 /* Reset the mac with NULL address */
610                 rte_ether_addr_copy(&null_mac_addr, &vf->mac_addr);
611
612         /* Remove the mac */
613         ret = i40e_vsi_delete_mac(vsi, mac_addr);
614         if (ret != I40E_SUCCESS)
615                 return ret;
616         return 0;
617 }
618
619 /* Set vlan strip on/off for specific VF from host */
620 int
621 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
622 {
623         struct rte_eth_dev *dev;
624         struct i40e_pf *pf;
625         struct i40e_vsi *vsi;
626         int ret;
627
628         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
629
630         dev = &rte_eth_devices[port];
631
632         if (!is_i40e_supported(dev))
633                 return -ENOTSUP;
634
635         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
636
637         if (vf_id >= pf->vf_num || !pf->vfs) {
638                 PMD_DRV_LOG(ERR, "Invalid argument.");
639                 return -EINVAL;
640         }
641
642         vsi = pf->vfs[vf_id].vsi;
643
644         if (!vsi)
645                 return -EINVAL;
646
647         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
648         if (ret != I40E_SUCCESS) {
649                 ret = -ENOTSUP;
650                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
651         }
652
653         return ret;
654 }
655
656 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
657                                     uint16_t vlan_id)
658 {
659         struct rte_eth_dev *dev;
660         struct i40e_pf *pf;
661         struct i40e_hw *hw;
662         struct i40e_vsi *vsi;
663         struct i40e_vsi_context ctxt;
664         int ret;
665
666         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
667
668         if (vlan_id > RTE_ETHER_MAX_VLAN_ID) {
669                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
670                 return -EINVAL;
671         }
672
673         dev = &rte_eth_devices[port];
674
675         if (!is_i40e_supported(dev))
676                 return -ENOTSUP;
677
678         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
679         hw = I40E_PF_TO_HW(pf);
680
681         /**
682          * return -ENODEV if SRIOV not enabled, VF number not configured
683          * or no queue assigned.
684          */
685         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
686             pf->vf_nb_qps == 0)
687                 return -ENODEV;
688
689         if (vf_id >= pf->vf_num || !pf->vfs) {
690                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
691                 return -EINVAL;
692         }
693
694         vsi = pf->vfs[vf_id].vsi;
695         if (!vsi) {
696                 PMD_DRV_LOG(ERR, "Invalid VSI.");
697                 return -EINVAL;
698         }
699
700         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
701         vsi->info.pvid = vlan_id;
702         if (vlan_id > 0)
703                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
704         else
705                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
706
707         memset(&ctxt, 0, sizeof(ctxt));
708         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
709         ctxt.seid = vsi->seid;
710
711         hw = I40E_VSI_TO_HW(vsi);
712         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
713         if (ret != I40E_SUCCESS) {
714                 ret = -ENOTSUP;
715                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
716         }
717
718         return ret;
719 }
720
721 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
722                                   uint8_t on)
723 {
724         struct rte_eth_dev *dev;
725         struct i40e_pf *pf;
726         struct i40e_vsi *vsi;
727         struct i40e_hw *hw;
728         struct i40e_mac_filter_info filter;
729         struct rte_ether_addr broadcast = {
730                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
731         int ret;
732
733         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
734
735         if (on > 1) {
736                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
737                 return -EINVAL;
738         }
739
740         dev = &rte_eth_devices[port];
741
742         if (!is_i40e_supported(dev))
743                 return -ENOTSUP;
744
745         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
746         hw = I40E_PF_TO_HW(pf);
747
748         if (vf_id >= pf->vf_num || !pf->vfs) {
749                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
750                 return -EINVAL;
751         }
752
753         /**
754          * return -ENODEV if SRIOV not enabled, VF number not configured
755          * or no queue assigned.
756          */
757         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
758             pf->vf_nb_qps == 0) {
759                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
760                 return -ENODEV;
761         }
762
763         vsi = pf->vfs[vf_id].vsi;
764         if (!vsi) {
765                 PMD_DRV_LOG(ERR, "Invalid VSI.");
766                 return -EINVAL;
767         }
768
769         if (on) {
770                 rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
771                 filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
772                 ret = i40e_vsi_add_mac(vsi, &filter);
773         } else {
774                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
775         }
776
777         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
778                 ret = -ENOTSUP;
779                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
780         } else {
781                 ret = 0;
782         }
783
784         return ret;
785 }
786
787 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
788 {
789         struct rte_eth_dev *dev;
790         struct i40e_pf *pf;
791         struct i40e_hw *hw;
792         struct i40e_vsi *vsi;
793         struct i40e_vsi_context ctxt;
794         int ret;
795
796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
797
798         if (on > 1) {
799                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
800                 return -EINVAL;
801         }
802
803         dev = &rte_eth_devices[port];
804
805         if (!is_i40e_supported(dev))
806                 return -ENOTSUP;
807
808         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
809         hw = I40E_PF_TO_HW(pf);
810
811         /**
812          * return -ENODEV if SRIOV not enabled, VF number not configured
813          * or no queue assigned.
814          */
815         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
816             pf->vf_nb_qps == 0) {
817                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
818                 return -ENODEV;
819         }
820
821         if (vf_id >= pf->vf_num || !pf->vfs) {
822                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
823                 return -EINVAL;
824         }
825
826         vsi = pf->vfs[vf_id].vsi;
827         if (!vsi) {
828                 PMD_DRV_LOG(ERR, "Invalid VSI.");
829                 return -EINVAL;
830         }
831
832         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
833         if (on) {
834                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
835                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
836         } else {
837                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
838                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
839         }
840
841         memset(&ctxt, 0, sizeof(ctxt));
842         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
843         ctxt.seid = vsi->seid;
844
845         hw = I40E_VSI_TO_HW(vsi);
846         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
847         if (ret != I40E_SUCCESS) {
848                 ret = -ENOTSUP;
849                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
850         }
851
852         return ret;
853 }
854
855 static int
856 i40e_vlan_filter_count(struct i40e_vsi *vsi)
857 {
858         uint32_t j, k;
859         uint16_t vlan_id;
860         int count = 0;
861
862         for (j = 0; j < I40E_VFTA_SIZE; j++) {
863                 if (!vsi->vfta[j])
864                         continue;
865
866                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
867                         if (!(vsi->vfta[j] & (1 << k)))
868                                 continue;
869
870                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
871                         if (!vlan_id)
872                                 continue;
873
874                         count++;
875                 }
876         }
877
878         return count;
879 }
880
881 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
882                                     uint64_t vf_mask, uint8_t on)
883 {
884         struct rte_eth_dev *dev;
885         struct i40e_pf *pf;
886         struct i40e_hw *hw;
887         struct i40e_vsi *vsi;
888         uint16_t vf_idx;
889         int ret = I40E_SUCCESS;
890
891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
892
893         dev = &rte_eth_devices[port];
894
895         if (!is_i40e_supported(dev))
896                 return -ENOTSUP;
897
898         if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) {
899                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
900                 return -EINVAL;
901         }
902
903         if (vf_mask == 0) {
904                 PMD_DRV_LOG(ERR, "No VF.");
905                 return -EINVAL;
906         }
907
908         if (on > 1) {
909                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
910                 return -EINVAL;
911         }
912
913         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
914         hw = I40E_PF_TO_HW(pf);
915
916         /**
917          * return -ENODEV if SRIOV not enabled, VF number not configured
918          * or no queue assigned.
919          */
920         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
921             pf->vf_nb_qps == 0) {
922                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
923                 return -ENODEV;
924         }
925
926         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
927                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
928                         vsi = pf->vfs[vf_idx].vsi;
929                         if (on) {
930                                 if (!vsi->vlan_filter_on) {
931                                         vsi->vlan_filter_on = true;
932                                         i40e_aq_set_vsi_vlan_promisc(hw,
933                                                                      vsi->seid,
934                                                                      false,
935                                                                      NULL);
936                                         if (!vsi->vlan_anti_spoof_on)
937                                                 i40e_add_rm_all_vlan_filter(
938                                                         vsi, true);
939                                 }
940                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
941                         } else {
942                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
943
944                                 if (!i40e_vlan_filter_count(vsi)) {
945                                         vsi->vlan_filter_on = false;
946                                         i40e_aq_set_vsi_vlan_promisc(hw,
947                                                                      vsi->seid,
948                                                                      true,
949                                                                      NULL);
950                                 }
951                         }
952                 }
953         }
954
955         if (ret != I40E_SUCCESS) {
956                 ret = -ENOTSUP;
957                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
958         }
959
960         return ret;
961 }
962
963 int
964 rte_pmd_i40e_get_vf_stats(uint16_t port,
965                           uint16_t vf_id,
966                           struct rte_eth_stats *stats)
967 {
968         struct rte_eth_dev *dev;
969         struct i40e_pf *pf;
970         struct i40e_vsi *vsi;
971
972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
973
974         dev = &rte_eth_devices[port];
975
976         if (!is_i40e_supported(dev))
977                 return -ENOTSUP;
978
979         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
980
981         if (vf_id >= pf->vf_num || !pf->vfs) {
982                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
983                 return -EINVAL;
984         }
985
986         vsi = pf->vfs[vf_id].vsi;
987         if (!vsi) {
988                 PMD_DRV_LOG(ERR, "Invalid VSI.");
989                 return -EINVAL;
990         }
991
992         i40e_update_vsi_stats(vsi);
993
994         stats->ipackets = vsi->eth_stats.rx_unicast +
995                         vsi->eth_stats.rx_multicast +
996                         vsi->eth_stats.rx_broadcast;
997         stats->opackets = vsi->eth_stats.tx_unicast +
998                         vsi->eth_stats.tx_multicast +
999                         vsi->eth_stats.tx_broadcast;
1000         stats->ibytes   = vsi->eth_stats.rx_bytes;
1001         stats->obytes   = vsi->eth_stats.tx_bytes;
1002         stats->ierrors  = vsi->eth_stats.rx_discards;
1003         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
1004
1005         return 0;
1006 }
1007
1008 int
1009 rte_pmd_i40e_reset_vf_stats(uint16_t port,
1010                             uint16_t vf_id)
1011 {
1012         struct rte_eth_dev *dev;
1013         struct i40e_pf *pf;
1014         struct i40e_vsi *vsi;
1015
1016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1017
1018         dev = &rte_eth_devices[port];
1019
1020         if (!is_i40e_supported(dev))
1021                 return -ENOTSUP;
1022
1023         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1024
1025         if (vf_id >= pf->vf_num || !pf->vfs) {
1026                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1027                 return -EINVAL;
1028         }
1029
1030         vsi = pf->vfs[vf_id].vsi;
1031         if (!vsi) {
1032                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1033                 return -EINVAL;
1034         }
1035
1036         vsi->offset_loaded = false;
1037         i40e_update_vsi_stats(vsi);
1038
1039         return 0;
1040 }
1041
1042 int
1043 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1044 {
1045         struct rte_eth_dev *dev;
1046         struct i40e_pf *pf;
1047         struct i40e_vsi *vsi;
1048         struct i40e_hw *hw;
1049         int ret = 0;
1050         int i;
1051
1052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1053
1054         dev = &rte_eth_devices[port];
1055
1056         if (!is_i40e_supported(dev))
1057                 return -ENOTSUP;
1058
1059         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1060
1061         if (vf_id >= pf->vf_num || !pf->vfs) {
1062                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1063                 return -EINVAL;
1064         }
1065
1066         vsi = pf->vfs[vf_id].vsi;
1067         if (!vsi) {
1068                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1069                 return -EINVAL;
1070         }
1071
1072         if (bw > I40E_QOS_BW_MAX) {
1073                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1074                             I40E_QOS_BW_MAX);
1075                 return -EINVAL;
1076         }
1077
1078         if (bw % I40E_QOS_BW_GRANULARITY) {
1079                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1080                             I40E_QOS_BW_GRANULARITY);
1081                 return -EINVAL;
1082         }
1083
1084         bw /= I40E_QOS_BW_GRANULARITY;
1085
1086         hw = I40E_VSI_TO_HW(vsi);
1087
1088         /* No change. */
1089         if (bw == vsi->bw_info.bw_limit) {
1090                 PMD_DRV_LOG(INFO,
1091                             "No change for VF max bandwidth. Nothing to do.");
1092                 return 0;
1093         }
1094
1095         /**
1096          * VF bandwidth limitation and TC bandwidth limitation cannot be
1097          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1098          *
1099          * If bw is 0, means disable bandwidth limitation. Then no need to
1100          * check TC bandwidth limitation.
1101          */
1102         if (bw) {
1103                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1104                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1105                             vsi->bw_info.bw_ets_credits[i])
1106                                 break;
1107                 }
1108                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1109                         PMD_DRV_LOG(ERR,
1110                                     "TC max bandwidth has been set on this VF,"
1111                                     " please disable it first.");
1112                         return -EINVAL;
1113                 }
1114         }
1115
1116         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1117         if (ret) {
1118                 PMD_DRV_LOG(ERR,
1119                             "Failed to set VF %d bandwidth, err(%d).",
1120                             vf_id, ret);
1121                 return -EINVAL;
1122         }
1123
1124         /* Store the configuration. */
1125         vsi->bw_info.bw_limit = (uint16_t)bw;
1126         vsi->bw_info.bw_max = 0;
1127
1128         return 0;
1129 }
1130
1131 int
1132 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1133                                 uint8_t tc_num, uint8_t *bw_weight)
1134 {
1135         struct rte_eth_dev *dev;
1136         struct i40e_pf *pf;
1137         struct i40e_vsi *vsi;
1138         struct i40e_hw *hw;
1139         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1140         int ret = 0;
1141         int i, j;
1142         uint16_t sum;
1143         bool b_change = false;
1144
1145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1146
1147         dev = &rte_eth_devices[port];
1148
1149         if (!is_i40e_supported(dev))
1150                 return -ENOTSUP;
1151
1152         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1153
1154         if (vf_id >= pf->vf_num || !pf->vfs) {
1155                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1156                 return -EINVAL;
1157         }
1158
1159         vsi = pf->vfs[vf_id].vsi;
1160         if (!vsi) {
1161                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1162                 return -EINVAL;
1163         }
1164
1165         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1166                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1167                             I40E_MAX_TRAFFIC_CLASS);
1168                 return -EINVAL;
1169         }
1170
1171         sum = 0;
1172         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1173                 if (vsi->enabled_tc & BIT_ULL(i))
1174                         sum++;
1175         }
1176         if (sum != tc_num) {
1177                 PMD_DRV_LOG(ERR,
1178                             "Weight should be set for all %d enabled TCs.",
1179                             sum);
1180                 return -EINVAL;
1181         }
1182
1183         sum = 0;
1184         for (i = 0; i < tc_num; i++) {
1185                 if (!bw_weight[i]) {
1186                         PMD_DRV_LOG(ERR,
1187                                     "The weight should be 1 at least.");
1188                         return -EINVAL;
1189                 }
1190                 sum += bw_weight[i];
1191         }
1192         if (sum != 100) {
1193                 PMD_DRV_LOG(ERR,
1194                             "The summary of the TC weight should be 100.");
1195                 return -EINVAL;
1196         }
1197
1198         /**
1199          * Create the configuration for all the TCs.
1200          */
1201         memset(&tc_bw, 0, sizeof(tc_bw));
1202         tc_bw.tc_valid_bits = vsi->enabled_tc;
1203         j = 0;
1204         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1205                 if (vsi->enabled_tc & BIT_ULL(i)) {
1206                         if (bw_weight[j] !=
1207                                 vsi->bw_info.bw_ets_share_credits[i])
1208                                 b_change = true;
1209
1210                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1211                         j++;
1212                 }
1213         }
1214
1215         /* No change. */
1216         if (!b_change) {
1217                 PMD_DRV_LOG(INFO,
1218                             "No change for TC allocated bandwidth."
1219                             " Nothing to do.");
1220                 return 0;
1221         }
1222
1223         hw = I40E_VSI_TO_HW(vsi);
1224
1225         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1226         if (ret) {
1227                 PMD_DRV_LOG(ERR,
1228                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1229                             vf_id, ret);
1230                 return -EINVAL;
1231         }
1232
1233         /* Store the configuration. */
1234         j = 0;
1235         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1236                 if (vsi->enabled_tc & BIT_ULL(i)) {
1237                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1238                         j++;
1239                 }
1240         }
1241
1242         return 0;
1243 }
1244
1245 int
1246 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1247                               uint8_t tc_no, uint32_t bw)
1248 {
1249         struct rte_eth_dev *dev;
1250         struct i40e_pf *pf;
1251         struct i40e_vsi *vsi;
1252         struct i40e_hw *hw;
1253         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1254         int ret = 0;
1255         int i;
1256
1257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1258
1259         dev = &rte_eth_devices[port];
1260
1261         if (!is_i40e_supported(dev))
1262                 return -ENOTSUP;
1263
1264         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1265
1266         if (vf_id >= pf->vf_num || !pf->vfs) {
1267                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1268                 return -EINVAL;
1269         }
1270
1271         vsi = pf->vfs[vf_id].vsi;
1272         if (!vsi) {
1273                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1274                 return -EINVAL;
1275         }
1276
1277         if (bw > I40E_QOS_BW_MAX) {
1278                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1279                             I40E_QOS_BW_MAX);
1280                 return -EINVAL;
1281         }
1282
1283         if (bw % I40E_QOS_BW_GRANULARITY) {
1284                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1285                             I40E_QOS_BW_GRANULARITY);
1286                 return -EINVAL;
1287         }
1288
1289         bw /= I40E_QOS_BW_GRANULARITY;
1290
1291         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1292                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1293                             I40E_MAX_TRAFFIC_CLASS);
1294                 return -EINVAL;
1295         }
1296
1297         hw = I40E_VSI_TO_HW(vsi);
1298
1299         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1300                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1301                             vf_id, tc_no);
1302                 return -EINVAL;
1303         }
1304
1305         /* No change. */
1306         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1307                 PMD_DRV_LOG(INFO,
1308                             "No change for TC max bandwidth. Nothing to do.");
1309                 return 0;
1310         }
1311
1312         /**
1313          * VF bandwidth limitation and TC bandwidth limitation cannot be
1314          * enabled in parallel, disable VF bandwidth limitation if it's
1315          * enabled.
1316          * If bw is 0, means disable bandwidth limitation. Then no need to
1317          * care about VF bandwidth limitation configuration.
1318          */
1319         if (bw && vsi->bw_info.bw_limit) {
1320                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1321                 if (ret) {
1322                         PMD_DRV_LOG(ERR,
1323                                     "Failed to disable VF(%d)"
1324                                     " bandwidth limitation, err(%d).",
1325                                     vf_id, ret);
1326                         return -EINVAL;
1327                 }
1328
1329                 PMD_DRV_LOG(INFO,
1330                             "VF max bandwidth is disabled according"
1331                             " to TC max bandwidth setting.");
1332         }
1333
1334         /**
1335          * Get all the TCs' info to create a whole picture.
1336          * Because the incremental change isn't permitted.
1337          */
1338         memset(&tc_bw, 0, sizeof(tc_bw));
1339         tc_bw.tc_valid_bits = vsi->enabled_tc;
1340         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1341                 if (vsi->enabled_tc & BIT_ULL(i)) {
1342                         tc_bw.tc_bw_credits[i] =
1343                                 rte_cpu_to_le_16(
1344                                         vsi->bw_info.bw_ets_credits[i]);
1345                 }
1346         }
1347         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1348
1349         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1350         if (ret) {
1351                 PMD_DRV_LOG(ERR,
1352                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1353                             vf_id, tc_no, ret);
1354                 return -EINVAL;
1355         }
1356
1357         /* Store the configuration. */
1358         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1359
1360         return 0;
1361 }
1362
1363 int
1364 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1365 {
1366         struct rte_eth_dev *dev;
1367         struct i40e_pf *pf;
1368         struct i40e_vsi *vsi;
1369         struct i40e_veb *veb;
1370         struct i40e_hw *hw;
1371         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1372         int i;
1373         int ret;
1374
1375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1376
1377         dev = &rte_eth_devices[port];
1378
1379         if (!is_i40e_supported(dev))
1380                 return -ENOTSUP;
1381
1382         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1383
1384         vsi = pf->main_vsi;
1385         if (!vsi) {
1386                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1387                 return -EINVAL;
1388         }
1389
1390         veb = vsi->veb;
1391         if (!veb) {
1392                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1393                 return -EINVAL;
1394         }
1395
1396         if ((tc_map & veb->enabled_tc) != tc_map) {
1397                 PMD_DRV_LOG(ERR,
1398                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1399                             veb->enabled_tc);
1400                 return -EINVAL;
1401         }
1402
1403         if (tc_map == veb->strict_prio_tc) {
1404                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1405                 return 0;
1406         }
1407
1408         hw = I40E_VSI_TO_HW(vsi);
1409
1410         /* Disable DCBx if it's the first time to set strict priority. */
1411         if (!veb->strict_prio_tc) {
1412                 ret = i40e_aq_stop_lldp(hw, true, true, NULL);
1413                 if (ret)
1414                         PMD_DRV_LOG(INFO,
1415                                     "Failed to disable DCBx as it's already"
1416                                     " disabled.");
1417                 else
1418                         PMD_DRV_LOG(INFO,
1419                                     "DCBx is disabled according to strict"
1420                                     " priority setting.");
1421         }
1422
1423         memset(&ets_data, 0, sizeof(ets_data));
1424         ets_data.tc_valid_bits = veb->enabled_tc;
1425         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1426         ets_data.tc_strict_priority_flags = tc_map;
1427         /* Get all TCs' bandwidth. */
1428         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1429                 if (veb->enabled_tc & BIT_ULL(i)) {
1430                         /* For rubust, if bandwidth is 0, use 1 instead. */
1431                         if (veb->bw_info.bw_ets_share_credits[i])
1432                                 ets_data.tc_bw_share_credits[i] =
1433                                         veb->bw_info.bw_ets_share_credits[i];
1434                         else
1435                                 ets_data.tc_bw_share_credits[i] =
1436                                         I40E_QOS_BW_WEIGHT_MIN;
1437                 }
1438         }
1439
1440         if (!veb->strict_prio_tc)
1441                 ret = i40e_aq_config_switch_comp_ets(
1442                         hw, veb->uplink_seid,
1443                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1444                         NULL);
1445         else if (tc_map)
1446                 ret = i40e_aq_config_switch_comp_ets(
1447                         hw, veb->uplink_seid,
1448                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1449                         NULL);
1450         else
1451                 ret = i40e_aq_config_switch_comp_ets(
1452                         hw, veb->uplink_seid,
1453                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1454                         NULL);
1455
1456         if (ret) {
1457                 PMD_DRV_LOG(ERR,
1458                             "Failed to set TCs' strict priority mode."
1459                             " err (%d)", ret);
1460                 return -EINVAL;
1461         }
1462
1463         veb->strict_prio_tc = tc_map;
1464
1465         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1466         if (!tc_map) {
1467                 ret = i40e_aq_start_lldp(hw, true, NULL);
1468                 if (ret) {
1469                         PMD_DRV_LOG(ERR,
1470                                     "Failed to enable DCBx, err(%d).", ret);
1471                         return -EINVAL;
1472                 }
1473
1474                 PMD_DRV_LOG(INFO,
1475                             "DCBx is enabled again according to strict"
1476                             " priority setting.");
1477         }
1478
1479         return ret;
1480 }
1481
1482 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1483 #define I40E_MAX_PROFILE_NUM 16
1484
1485 static void
1486 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1487                                uint32_t track_id, uint8_t *profile_info_sec,
1488                                bool add)
1489 {
1490         struct i40e_profile_section_header *sec = NULL;
1491         struct i40e_profile_info *pinfo;
1492
1493         sec = (struct i40e_profile_section_header *)profile_info_sec;
1494         sec->tbl_size = 1;
1495         sec->data_end = sizeof(struct i40e_profile_section_header) +
1496                 sizeof(struct i40e_profile_info);
1497         sec->section.type = SECTION_TYPE_INFO;
1498         sec->section.offset = sizeof(struct i40e_profile_section_header);
1499         sec->section.size = sizeof(struct i40e_profile_info);
1500         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1501                                              sec->section.offset);
1502         pinfo->track_id = track_id;
1503         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1504         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1505         if (add)
1506                 pinfo->op = I40E_DDP_ADD_TRACKID;
1507         else
1508                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1509 }
1510
1511 static enum i40e_status_code
1512 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1513 {
1514         enum i40e_status_code status = I40E_SUCCESS;
1515         struct i40e_profile_section_header *sec;
1516         uint32_t track_id;
1517         uint32_t offset = 0;
1518         uint32_t info = 0;
1519
1520         sec = (struct i40e_profile_section_header *)profile_info_sec;
1521         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1522                                          sec->section.offset))->track_id;
1523
1524         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1525                                    track_id, &offset, &info, NULL);
1526         if (status)
1527                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1528                             "offset %d, info %d",
1529                             offset, info);
1530
1531         return status;
1532 }
1533
1534 /* Check if the profile info exists */
1535 static int
1536 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1537 {
1538         struct rte_eth_dev *dev = &rte_eth_devices[port];
1539         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1540         uint8_t *buff;
1541         struct rte_pmd_i40e_profile_list *p_list;
1542         struct rte_pmd_i40e_profile_info *pinfo, *p;
1543         uint32_t i;
1544         int ret;
1545         static const uint32_t group_mask = 0x00ff0000;
1546
1547         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1548                              sizeof(struct i40e_profile_section_header));
1549         if (pinfo->track_id == 0) {
1550                 PMD_DRV_LOG(INFO, "Read-only profile.");
1551                 return 0;
1552         }
1553         buff = rte_zmalloc("pinfo_list",
1554                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1555                            0);
1556         if (!buff) {
1557                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1558                 return -1;
1559         }
1560
1561         ret = i40e_aq_get_ddp_list(
1562                 hw, (void *)buff,
1563                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1564                 0, NULL);
1565         if (ret) {
1566                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1567                 rte_free(buff);
1568                 return -1;
1569         }
1570         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1571         for (i = 0; i < p_list->p_count; i++) {
1572                 p = &p_list->p_info[i];
1573                 if (pinfo->track_id == p->track_id) {
1574                         PMD_DRV_LOG(INFO, "Profile exists.");
1575                         rte_free(buff);
1576                         return 1;
1577                 }
1578         }
1579         /* profile with group id 0xff is compatible with any other profile */
1580         if ((pinfo->track_id & group_mask) == group_mask) {
1581                 rte_free(buff);
1582                 return 0;
1583         }
1584         for (i = 0; i < p_list->p_count; i++) {
1585                 p = &p_list->p_info[i];
1586                 if ((p->track_id & group_mask) == 0) {
1587                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1588                         rte_free(buff);
1589                         return 2;
1590                 }
1591         }
1592         for (i = 0; i < p_list->p_count; i++) {
1593                 p = &p_list->p_info[i];
1594                 if ((p->track_id & group_mask) == group_mask)
1595                         continue;
1596                 if ((pinfo->track_id & group_mask) !=
1597                     (p->track_id & group_mask)) {
1598                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1599                         rte_free(buff);
1600                         return 3;
1601                 }
1602         }
1603
1604         rte_free(buff);
1605         return 0;
1606 }
1607
1608 int
1609 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1610                                  uint32_t size,
1611                                  enum rte_pmd_i40e_package_op op)
1612 {
1613         struct rte_eth_dev *dev;
1614         struct i40e_hw *hw;
1615         struct i40e_package_header *pkg_hdr;
1616         struct i40e_generic_seg_header *profile_seg_hdr;
1617         struct i40e_generic_seg_header *metadata_seg_hdr;
1618         uint32_t track_id;
1619         uint8_t *profile_info_sec;
1620         int is_exist;
1621         enum i40e_status_code status = I40E_SUCCESS;
1622         static const uint32_t type_mask = 0xff000000;
1623
1624         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1625                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1626                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1627                 PMD_DRV_LOG(ERR, "Operation not supported.");
1628                 return -ENOTSUP;
1629         }
1630
1631         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1632
1633         dev = &rte_eth_devices[port];
1634
1635         if (!is_i40e_supported(dev))
1636                 return -ENOTSUP;
1637
1638         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639
1640         if (size < (sizeof(struct i40e_package_header) +
1641                     sizeof(struct i40e_metadata_segment) +
1642                     sizeof(uint32_t) * 2)) {
1643                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1644                 return -EINVAL;
1645         }
1646
1647         pkg_hdr = (struct i40e_package_header *)buff;
1648
1649         if (!pkg_hdr) {
1650                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1651                 return -EINVAL;
1652         }
1653
1654         if (pkg_hdr->segment_count < 2) {
1655                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1656                 return -EINVAL;
1657         }
1658
1659         /* Find metadata segment */
1660         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1661                                                         pkg_hdr);
1662         if (!metadata_seg_hdr) {
1663                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1664                 return -EINVAL;
1665         }
1666         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1667         if (track_id == I40E_DDP_TRACKID_INVALID) {
1668                 PMD_DRV_LOG(ERR, "Invalid track_id");
1669                 return -EINVAL;
1670         }
1671
1672         /* force read-only track_id for type 0 */
1673         if ((track_id & type_mask) == 0)
1674                 track_id = 0;
1675
1676         /* Find profile segment */
1677         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1678                                                        pkg_hdr);
1679         if (!profile_seg_hdr) {
1680                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1681                 return -EINVAL;
1682         }
1683
1684         profile_info_sec = rte_zmalloc(
1685                 "i40e_profile_info",
1686                 sizeof(struct i40e_profile_section_header) +
1687                 sizeof(struct i40e_profile_info),
1688                 0);
1689         if (!profile_info_sec) {
1690                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1691                 return -EINVAL;
1692         }
1693
1694         /* Check if the profile already loaded */
1695         i40e_generate_profile_info_sec(
1696                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1697                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1698                 track_id, profile_info_sec,
1699                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1700         is_exist = i40e_check_profile_info(port, profile_info_sec);
1701         if (is_exist < 0) {
1702                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1703                 rte_free(profile_info_sec);
1704                 return -EINVAL;
1705         }
1706
1707         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1708                 if (is_exist) {
1709                         if (is_exist == 1)
1710                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1711                         else if (is_exist == 2)
1712                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1713                         else if (is_exist == 3)
1714                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1715                         i40e_update_customized_info(dev, buff, size, op);
1716                         rte_free(profile_info_sec);
1717                         return -EEXIST;
1718                 }
1719         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1720                 if (is_exist != 1) {
1721                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1722                         rte_free(profile_info_sec);
1723                         return -EACCES;
1724                 }
1725         }
1726
1727         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1728                 status = i40e_rollback_profile(
1729                         hw,
1730                         (struct i40e_profile_segment *)profile_seg_hdr,
1731                         track_id);
1732                 if (status) {
1733                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1734                         rte_free(profile_info_sec);
1735                         return status;
1736                 }
1737         } else {
1738                 status = i40e_write_profile(
1739                         hw,
1740                         (struct i40e_profile_segment *)profile_seg_hdr,
1741                         track_id);
1742                 if (status) {
1743                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1744                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1745                         else
1746                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1747                         rte_free(profile_info_sec);
1748                         return status;
1749                 }
1750         }
1751
1752         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1753                 /* Modify loaded profiles info list */
1754                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1755                 if (status) {
1756                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1757                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1758                         else
1759                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1760                 }
1761         }
1762
1763         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1764             op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1765                 i40e_update_customized_info(dev, buff, size, op);
1766
1767         rte_free(profile_info_sec);
1768         return status;
1769 }
1770
1771 /* Get number of tvl records in the section */
1772 static unsigned int
1773 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1774 {
1775         unsigned int i, nb_rec, nb_tlv = 0;
1776         struct i40e_profile_tlv_section_record *tlv;
1777
1778         if (!sec)
1779                 return nb_tlv;
1780
1781         /* get number of records in the section */
1782         nb_rec = sec->section.size /
1783                                 sizeof(struct i40e_profile_tlv_section_record);
1784         for (i = 0; i < nb_rec; ) {
1785                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1786                 i += tlv->len;
1787                 nb_tlv++;
1788         }
1789         return nb_tlv;
1790 }
1791
1792 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1793         uint8_t *info_buff, uint32_t info_size,
1794         enum rte_pmd_i40e_package_info type)
1795 {
1796         uint32_t ret_size;
1797         struct i40e_package_header *pkg_hdr;
1798         struct i40e_generic_seg_header *i40e_seg_hdr;
1799         struct i40e_generic_seg_header *note_seg_hdr;
1800         struct i40e_generic_seg_header *metadata_seg_hdr;
1801
1802         if (!info_buff) {
1803                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1804                 return -EINVAL;
1805         }
1806
1807         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1808                 sizeof(struct i40e_metadata_segment) +
1809                 sizeof(uint32_t) * 2)) {
1810                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1811                 return -EINVAL;
1812         }
1813
1814         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1815         if (pkg_hdr->segment_count < 2) {
1816                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1817                 return -EINVAL;
1818         }
1819
1820         /* Find metadata segment */
1821         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1822                 pkg_hdr);
1823
1824         /* Find global notes segment */
1825         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1826                 pkg_hdr);
1827
1828         /* Find i40e profile segment */
1829         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1830
1831         /* get global header info */
1832         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1833                 struct rte_pmd_i40e_profile_info *info =
1834                         (struct rte_pmd_i40e_profile_info *)info_buff;
1835
1836                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1837                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1838                         return -EINVAL;
1839                 }
1840
1841                 if (!metadata_seg_hdr) {
1842                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1843                         return -EINVAL;
1844                 }
1845
1846                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1847                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1848                 info->track_id =
1849                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1850
1851                 memcpy(info->name,
1852                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1853                         I40E_DDP_NAME_SIZE);
1854                 memcpy(&info->version,
1855                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1856                         sizeof(struct i40e_ddp_version));
1857                 return I40E_SUCCESS;
1858         }
1859
1860         /* get global note size */
1861         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1862                 if (info_size < sizeof(uint32_t)) {
1863                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1864                         return -EINVAL;
1865                 }
1866                 if (note_seg_hdr == NULL)
1867                         ret_size = 0;
1868                 else
1869                         ret_size = note_seg_hdr->size;
1870                 *(uint32_t *)info_buff = ret_size;
1871                 return I40E_SUCCESS;
1872         }
1873
1874         /* get global note */
1875         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1876                 if (note_seg_hdr == NULL)
1877                         return -ENOTSUP;
1878                 if (info_size < note_seg_hdr->size) {
1879                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1880                         return -EINVAL;
1881                 }
1882                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1883                 return I40E_SUCCESS;
1884         }
1885
1886         /* get i40e segment header info */
1887         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1888                 struct rte_pmd_i40e_profile_info *info =
1889                         (struct rte_pmd_i40e_profile_info *)info_buff;
1890
1891                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1892                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1893                         return -EINVAL;
1894                 }
1895
1896                 if (!metadata_seg_hdr) {
1897                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1898                         return -EINVAL;
1899                 }
1900
1901                 if (!i40e_seg_hdr) {
1902                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1903                         return -EINVAL;
1904                 }
1905
1906                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1907                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1908                 info->track_id =
1909                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1910
1911                 memcpy(info->name,
1912                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1913                         I40E_DDP_NAME_SIZE);
1914                 memcpy(&info->version,
1915                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1916                         sizeof(struct i40e_ddp_version));
1917                 return I40E_SUCCESS;
1918         }
1919
1920         /* get number of devices */
1921         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1922                 if (info_size < sizeof(uint32_t)) {
1923                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1924                         return -EINVAL;
1925                 }
1926                 *(uint32_t *)info_buff =
1927                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1928                 return I40E_SUCCESS;
1929         }
1930
1931         /* get list of devices */
1932         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1933                 uint32_t dev_num;
1934                 dev_num =
1935                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1936                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1937                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1938                         return -EINVAL;
1939                 }
1940                 memcpy(info_buff,
1941                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1942                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1943                 return I40E_SUCCESS;
1944         }
1945
1946         /* get number of protocols */
1947         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1948                 struct i40e_profile_section_header *proto;
1949
1950                 if (info_size < sizeof(uint32_t)) {
1951                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1952                         return -EINVAL;
1953                 }
1954                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1955                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1956                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1957                 return I40E_SUCCESS;
1958         }
1959
1960         /* get list of protocols */
1961         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1962                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1963                 struct rte_pmd_i40e_proto_info *pinfo;
1964                 struct i40e_profile_section_header *proto;
1965                 struct i40e_profile_tlv_section_record *tlv;
1966
1967                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1968                 nb_proto_info = info_size /
1969                                         sizeof(struct rte_pmd_i40e_proto_info);
1970                 for (i = 0; i < nb_proto_info; i++) {
1971                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1972                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1973                 }
1974                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1975                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1976                 nb_tlv = i40e_get_tlv_section_size(proto);
1977                 if (nb_tlv == 0)
1978                         return I40E_SUCCESS;
1979                 if (nb_proto_info < nb_tlv) {
1980                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1981                         return -EINVAL;
1982                 }
1983                 /* get number of records in the section */
1984                 nb_rec = proto->section.size /
1985                                 sizeof(struct i40e_profile_tlv_section_record);
1986                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1987                 for (i = j = 0; i < nb_rec; j++) {
1988                         pinfo[j].proto_id = tlv->data[0];
1989                         strlcpy(pinfo[j].name, (const char *)&tlv->data[1],
1990                                 I40E_DDP_NAME_SIZE);
1991                         i += tlv->len;
1992                         tlv = &tlv[tlv->len];
1993                 }
1994                 return I40E_SUCCESS;
1995         }
1996
1997         /* get number of packet classification types */
1998         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1999                 struct i40e_profile_section_header *pctype;
2000
2001                 if (info_size < sizeof(uint32_t)) {
2002                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2003                         return -EINVAL;
2004                 }
2005                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2006                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2007                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
2008                 return I40E_SUCCESS;
2009         }
2010
2011         /* get list of packet classification types */
2012         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
2013                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2014                 struct rte_pmd_i40e_ptype_info *pinfo;
2015                 struct i40e_profile_section_header *pctype;
2016                 struct i40e_profile_tlv_section_record *tlv;
2017
2018                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2019                 nb_proto_info = info_size /
2020                                         sizeof(struct rte_pmd_i40e_ptype_info);
2021                 for (i = 0; i < nb_proto_info; i++)
2022                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2023                                sizeof(struct rte_pmd_i40e_ptype_info));
2024                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2025                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2026                 nb_tlv = i40e_get_tlv_section_size(pctype);
2027                 if (nb_tlv == 0)
2028                         return I40E_SUCCESS;
2029                 if (nb_proto_info < nb_tlv) {
2030                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2031                         return -EINVAL;
2032                 }
2033
2034                 /* get number of records in the section */
2035                 nb_rec = pctype->section.size /
2036                                 sizeof(struct i40e_profile_tlv_section_record);
2037                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2038                 for (i = j = 0; i < nb_rec; j++) {
2039                         memcpy(&pinfo[j], tlv->data,
2040                                sizeof(struct rte_pmd_i40e_ptype_info));
2041                         i += tlv->len;
2042                         tlv = &tlv[tlv->len];
2043                 }
2044                 return I40E_SUCCESS;
2045         }
2046
2047         /* get number of packet types */
2048         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2049                 struct i40e_profile_section_header *ptype;
2050
2051                 if (info_size < sizeof(uint32_t)) {
2052                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2053                         return -EINVAL;
2054                 }
2055                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2056                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2057                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2058                 return I40E_SUCCESS;
2059         }
2060
2061         /* get list of packet types */
2062         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2063                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2064                 struct rte_pmd_i40e_ptype_info *pinfo;
2065                 struct i40e_profile_section_header *ptype;
2066                 struct i40e_profile_tlv_section_record *tlv;
2067
2068                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2069                 nb_proto_info = info_size /
2070                                         sizeof(struct rte_pmd_i40e_ptype_info);
2071                 for (i = 0; i < nb_proto_info; i++)
2072                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2073                                sizeof(struct rte_pmd_i40e_ptype_info));
2074                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2075                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2076                 nb_tlv = i40e_get_tlv_section_size(ptype);
2077                 if (nb_tlv == 0)
2078                         return I40E_SUCCESS;
2079                 if (nb_proto_info < nb_tlv) {
2080                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2081                         return -EINVAL;
2082                 }
2083                 /* get number of records in the section */
2084                 nb_rec = ptype->section.size /
2085                                 sizeof(struct i40e_profile_tlv_section_record);
2086                 for (i = j = 0; i < nb_rec; j++) {
2087                         tlv = (struct i40e_profile_tlv_section_record *)
2088                                                                 &ptype[1 + i];
2089                         memcpy(&pinfo[j], tlv->data,
2090                                sizeof(struct rte_pmd_i40e_ptype_info));
2091                         i += tlv->len;
2092                 }
2093                 return I40E_SUCCESS;
2094         }
2095
2096         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2097         return -EINVAL;
2098 }
2099
2100 int
2101 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2102 {
2103         struct rte_eth_dev *dev;
2104         struct i40e_hw *hw;
2105         enum i40e_status_code status = I40E_SUCCESS;
2106
2107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2108
2109         dev = &rte_eth_devices[port];
2110
2111         if (!is_i40e_supported(dev))
2112                 return -ENOTSUP;
2113
2114         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2115                 return -EINVAL;
2116
2117         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2118
2119         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2120                                       size, 0, NULL);
2121
2122         return status;
2123 }
2124
2125 static int check_invalid_pkt_type(uint32_t pkt_type)
2126 {
2127         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2128
2129         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2130         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2131         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2132         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2133         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2134         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2135         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2136
2137         if (l2 &&
2138             l2 != RTE_PTYPE_L2_ETHER &&
2139             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2140             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2141             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2142             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2143             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2144             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2145             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2146                 return -1;
2147
2148         if (l3 &&
2149             l3 != RTE_PTYPE_L3_IPV4 &&
2150             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2151             l3 != RTE_PTYPE_L3_IPV6 &&
2152             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2153             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2154             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2155                 return -1;
2156
2157         if (l4 &&
2158             l4 != RTE_PTYPE_L4_TCP &&
2159             l4 != RTE_PTYPE_L4_UDP &&
2160             l4 != RTE_PTYPE_L4_FRAG &&
2161             l4 != RTE_PTYPE_L4_SCTP &&
2162             l4 != RTE_PTYPE_L4_ICMP &&
2163             l4 != RTE_PTYPE_L4_NONFRAG)
2164                 return -1;
2165
2166         if (tnl &&
2167             tnl != RTE_PTYPE_TUNNEL_IP &&
2168             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2169             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2170             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2171             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2172             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2173             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2174             tnl != RTE_PTYPE_TUNNEL_L2TP &&
2175             tnl != RTE_PTYPE_TUNNEL_ESP)
2176                 return -1;
2177
2178         if (il2 &&
2179             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2180             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2181             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2182                 return -1;
2183
2184         if (il3 &&
2185             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2186             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2187             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2188             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2189             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2190             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2191                 return -1;
2192
2193         if (il4 &&
2194             il4 != RTE_PTYPE_INNER_L4_TCP &&
2195             il4 != RTE_PTYPE_INNER_L4_UDP &&
2196             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2197             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2198             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2199             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2200                 return -1;
2201
2202         return 0;
2203 }
2204
2205 static int check_invalid_ptype_mapping(
2206                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2207                 uint16_t count)
2208 {
2209         int i;
2210
2211         for (i = 0; i < count; i++) {
2212                 uint16_t ptype = mapping_table[i].hw_ptype;
2213                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2214
2215                 if (ptype >= I40E_MAX_PKT_TYPE)
2216                         return -1;
2217
2218                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2219                         continue;
2220
2221                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2222                         continue;
2223
2224                 if (check_invalid_pkt_type(pkt_type))
2225                         return -1;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int
2232 rte_pmd_i40e_ptype_mapping_update(
2233                         uint16_t port,
2234                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2235                         uint16_t count,
2236                         uint8_t exclusive)
2237 {
2238         struct rte_eth_dev *dev;
2239         struct i40e_adapter *ad;
2240         int i;
2241
2242         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2243
2244         dev = &rte_eth_devices[port];
2245
2246         if (!is_i40e_supported(dev))
2247                 return -ENOTSUP;
2248
2249         if (count > I40E_MAX_PKT_TYPE)
2250                 return -EINVAL;
2251
2252         if (check_invalid_ptype_mapping(mapping_items, count))
2253                 return -EINVAL;
2254
2255         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2256
2257         if (exclusive) {
2258                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2259                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2260         }
2261
2262         for (i = 0; i < count; i++)
2263                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2264                         = mapping_items[i].sw_ptype;
2265
2266         return 0;
2267 }
2268
2269 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2270 {
2271         struct rte_eth_dev *dev;
2272
2273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2274
2275         dev = &rte_eth_devices[port];
2276
2277         if (!is_i40e_supported(dev))
2278                 return -ENOTSUP;
2279
2280         i40e_set_default_ptype_table(dev);
2281
2282         return 0;
2283 }
2284
2285 int rte_pmd_i40e_ptype_mapping_get(
2286                         uint16_t port,
2287                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2288                         uint16_t size,
2289                         uint16_t *count,
2290                         uint8_t valid_only)
2291 {
2292         struct rte_eth_dev *dev;
2293         struct i40e_adapter *ad;
2294         int n = 0;
2295         uint16_t i;
2296
2297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2298
2299         dev = &rte_eth_devices[port];
2300
2301         if (!is_i40e_supported(dev))
2302                 return -ENOTSUP;
2303
2304         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2305
2306         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2307                 if (n >= size)
2308                         break;
2309                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2310                         continue;
2311                 mapping_items[n].hw_ptype = i;
2312                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2313                 n++;
2314         }
2315
2316         *count = n;
2317         return 0;
2318 }
2319
2320 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2321                                        uint32_t target,
2322                                        uint8_t mask,
2323                                        uint32_t pkt_type)
2324 {
2325         struct rte_eth_dev *dev;
2326         struct i40e_adapter *ad;
2327         uint16_t i;
2328
2329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2330
2331         dev = &rte_eth_devices[port];
2332
2333         if (!is_i40e_supported(dev))
2334                 return -ENOTSUP;
2335
2336         if (!mask && check_invalid_pkt_type(target))
2337                 return -EINVAL;
2338
2339         if (check_invalid_pkt_type(pkt_type))
2340                 return -EINVAL;
2341
2342         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2343
2344         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2345                 if (mask) {
2346                         if ((target | ad->ptype_tbl[i]) == target &&
2347                             (target & ad->ptype_tbl[i]))
2348                                 ad->ptype_tbl[i] = pkt_type;
2349                 } else {
2350                         if (ad->ptype_tbl[i] == target)
2351                                 ad->ptype_tbl[i] = pkt_type;
2352                 }
2353         }
2354
2355         return 0;
2356 }
2357
2358 int
2359 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2360                              struct rte_ether_addr *mac_addr)
2361 {
2362         struct rte_eth_dev *dev;
2363         struct i40e_pf_vf *vf;
2364         struct i40e_vsi *vsi;
2365         struct i40e_pf *pf;
2366         struct i40e_mac_filter_info mac_filter;
2367         int ret;
2368
2369         if (mac_addr == NULL)
2370                 return -EINVAL;
2371
2372         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2373                 return -EINVAL;
2374
2375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2376
2377         dev = &rte_eth_devices[port];
2378
2379         if (!is_i40e_supported(dev))
2380                 return -ENOTSUP;
2381
2382         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2383
2384         if (vf_id >= pf->vf_num || !pf->vfs)
2385                 return -EINVAL;
2386
2387         vf = &pf->vfs[vf_id];
2388         vsi = vf->vsi;
2389         if (!vsi) {
2390                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2391                 return -EINVAL;
2392         }
2393
2394         mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
2395         rte_ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2396         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2397         if (ret != I40E_SUCCESS) {
2398                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2399                 return -1;
2400         }
2401
2402         return 0;
2403 }
2404
2405 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2406 {
2407         struct rte_eth_dev *dev;
2408
2409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2410
2411         dev = &rte_eth_devices[port];
2412
2413         if (!is_i40e_supported(dev) &&
2414             !is_i40evf_supported(dev))
2415                 return -ENOTSUP;
2416
2417         i40e_set_default_pctype_table(dev);
2418
2419         return 0;
2420 }
2421
2422 int rte_pmd_i40e_flow_type_mapping_get(
2423                         uint16_t port,
2424                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2425 {
2426         struct rte_eth_dev *dev;
2427         struct i40e_adapter *ad;
2428         uint16_t i;
2429
2430         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2431
2432         dev = &rte_eth_devices[port];
2433
2434         if (!is_i40e_supported(dev) &&
2435             !is_i40evf_supported(dev))
2436                 return -ENOTSUP;
2437
2438         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2439
2440         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2441                 mapping_items[i].flow_type = i;
2442                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2443         }
2444
2445         return 0;
2446 }
2447
2448 int
2449 rte_pmd_i40e_flow_type_mapping_update(
2450                         uint16_t port,
2451                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2452                         uint16_t count,
2453                         uint8_t exclusive)
2454 {
2455         struct rte_eth_dev *dev;
2456         struct i40e_adapter *ad;
2457         int i;
2458
2459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2460
2461         dev = &rte_eth_devices[port];
2462
2463         if (!is_i40e_supported(dev) &&
2464             !is_i40evf_supported(dev))
2465                 return -ENOTSUP;
2466
2467         if (count > I40E_FLOW_TYPE_MAX)
2468                 return -EINVAL;
2469
2470         for (i = 0; i < count; i++)
2471                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2472                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2473                     (mapping_items[i].pctype &
2474                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2475                         return -EINVAL;
2476
2477         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2478
2479         if (exclusive) {
2480                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2481                         ad->pctypes_tbl[i] = 0ULL;
2482                 ad->flow_types_mask = 0ULL;
2483         }
2484
2485         for (i = 0; i < count; i++) {
2486                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2487                                                 mapping_items[i].pctype;
2488                 if (mapping_items[i].pctype)
2489                         ad->flow_types_mask |=
2490                                         (1ULL << mapping_items[i].flow_type);
2491                 else
2492                         ad->flow_types_mask &=
2493                                         ~(1ULL << mapping_items[i].flow_type);
2494         }
2495
2496         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2497                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2498
2499         return 0;
2500 }
2501
2502 int
2503 rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
2504                         const struct rte_ether_addr *vf_mac)
2505 {
2506         struct rte_eth_dev *dev;
2507         struct rte_ether_addr *mac;
2508         struct i40e_pf *pf;
2509         int vf_id;
2510         struct i40e_pf_vf *vf;
2511         uint16_t vf_num;
2512
2513         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2514         dev = &rte_eth_devices[port];
2515
2516         if (!is_i40e_supported(dev))
2517                 return -ENOTSUP;
2518
2519         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2520         vf_num = pf->vf_num;
2521
2522         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2523                 vf = &pf->vfs[vf_id];
2524                 mac = &vf->mac_addr;
2525
2526                 if (rte_is_same_ether_addr(mac, vf_mac))
2527                         return vf_id;
2528         }
2529
2530         return -EINVAL;
2531 }
2532
2533 static int
2534 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2535                               struct i40e_pf *pf)
2536 {
2537         uint16_t i;
2538         struct i40e_vsi *vsi = pf->main_vsi;
2539         uint16_t queue_offset, bsf, tc_index;
2540         struct i40e_vsi_context ctxt;
2541         struct i40e_aqc_vsi_properties_data *vsi_info;
2542         struct i40e_queue_regions *region_info =
2543                                 &pf->queue_region;
2544         int32_t ret = -EINVAL;
2545
2546         if (!region_info->queue_region_number) {
2547                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2548                 return ret;
2549         }
2550
2551         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2552
2553         /* Update Queue Pairs Mapping for currently enabled UPs */
2554         ctxt.seid = vsi->seid;
2555         ctxt.pf_num = hw->pf_id;
2556         ctxt.vf_num = 0;
2557         ctxt.uplink_seid = vsi->uplink_seid;
2558         ctxt.info = vsi->info;
2559         vsi_info = &ctxt.info;
2560
2561         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2562         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2563
2564         /* Configure queue region and queue mapping parameters,
2565          * for enabled queue region, allocate queues to this region.
2566          */
2567
2568         for (i = 0; i < region_info->queue_region_number; i++) {
2569                 tc_index = region_info->region[i].region_id;
2570                 bsf = rte_bsf32(region_info->region[i].queue_num);
2571                 queue_offset = region_info->region[i].queue_start_index;
2572                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2573                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2574                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2575         }
2576
2577         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2578         vsi_info->mapping_flags |=
2579                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2580         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2581         vsi_info->valid_sections |=
2582                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2583
2584         /* Update the VSI after updating the VSI queue-mapping information */
2585         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2586         if (ret) {
2587                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2588                                 hw->aq.asq_last_status);
2589                 return ret;
2590         }
2591         /* update the local VSI info with updated queue map */
2592         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2593                                         sizeof(vsi->info.tc_mapping));
2594         rte_memcpy(&vsi->info.queue_mapping,
2595                         &ctxt.info.queue_mapping,
2596                         sizeof(vsi->info.queue_mapping));
2597         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2598         vsi->info.valid_sections = 0;
2599
2600         return 0;
2601 }
2602
2603
2604 static int
2605 i40e_queue_region_set_region(struct i40e_pf *pf,
2606                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2607 {
2608         uint16_t i;
2609         struct i40e_vsi *main_vsi = pf->main_vsi;
2610         struct i40e_queue_regions *info = &pf->queue_region;
2611         int32_t ret = -EINVAL;
2612
2613         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2614                                 conf_ptr->queue_num <= 64)) {
2615                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2616                         "total number of queues do not exceed the VSI allocation");
2617                 return ret;
2618         }
2619
2620         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2621                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2622                 return ret;
2623         }
2624
2625         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2626                                         > main_vsi->nb_used_qps) {
2627                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2628                 return ret;
2629         }
2630
2631         for (i = 0; i < info->queue_region_number; i++)
2632                 if (conf_ptr->region_id == info->region[i].region_id)
2633                         break;
2634
2635         if (i == info->queue_region_number &&
2636                                 i <= I40E_REGION_MAX_INDEX) {
2637                 info->region[i].region_id = conf_ptr->region_id;
2638                 info->region[i].queue_num = conf_ptr->queue_num;
2639                 info->region[i].queue_start_index =
2640                         conf_ptr->queue_start_index;
2641                 info->queue_region_number++;
2642         } else {
2643                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2644                 return ret;
2645         }
2646
2647         return 0;
2648 }
2649
2650 static int
2651 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2652                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2653 {
2654         int32_t ret = -EINVAL;
2655         struct i40e_queue_regions *info = &pf->queue_region;
2656         uint16_t i, j;
2657         uint16_t region_index, flowtype_index;
2658
2659         /* For the pctype or hardware flowtype of packet,
2660          * the specific index for each type has been defined
2661          * in file i40e_type.h as enum i40e_filter_pctype.
2662          */
2663
2664         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2665                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2666                 return ret;
2667         }
2668
2669         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2670                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2671                 return ret;
2672         }
2673
2674
2675         for (i = 0; i < info->queue_region_number; i++)
2676                 if (rss_region_conf->region_id == info->region[i].region_id)
2677                         break;
2678
2679         if (i == info->queue_region_number) {
2680                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2681                 ret = -EINVAL;
2682                 return ret;
2683         }
2684         region_index = i;
2685
2686         for (i = 0; i < info->queue_region_number; i++) {
2687                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2688                         if (rss_region_conf->hw_flowtype ==
2689                                 info->region[i].hw_flowtype[j]) {
2690                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2691                                 return 0;
2692                         }
2693                 }
2694         }
2695
2696         flowtype_index = info->region[region_index].flowtype_num;
2697         info->region[region_index].hw_flowtype[flowtype_index] =
2698                                         rss_region_conf->hw_flowtype;
2699         info->region[region_index].flowtype_num++;
2700
2701         return 0;
2702 }
2703
2704 static void
2705 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2706                                 struct i40e_pf *pf)
2707 {
2708         uint8_t hw_flowtype;
2709         uint32_t pfqf_hregion;
2710         uint16_t i, j, index;
2711         struct i40e_queue_regions *info = &pf->queue_region;
2712
2713         /* For the pctype or hardware flowtype of packet,
2714          * the specific index for each type has been defined
2715          * in file i40e_type.h as enum i40e_filter_pctype.
2716          */
2717
2718         for (i = 0; i < info->queue_region_number; i++) {
2719                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2720                         hw_flowtype = info->region[i].hw_flowtype[j];
2721                         index = hw_flowtype >> 3;
2722                         pfqf_hregion =
2723                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2724
2725                         if ((hw_flowtype & 0x7) == 0) {
2726                                 pfqf_hregion |= info->region[i].region_id <<
2727                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2728                                 pfqf_hregion |= 1 <<
2729                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2730                         } else if ((hw_flowtype & 0x7) == 1) {
2731                                 pfqf_hregion |= info->region[i].region_id  <<
2732                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2733                                 pfqf_hregion |= 1 <<
2734                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2735                         } else if ((hw_flowtype & 0x7) == 2) {
2736                                 pfqf_hregion |= info->region[i].region_id  <<
2737                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2738                                 pfqf_hregion |= 1 <<
2739                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2740                         } else if ((hw_flowtype & 0x7) == 3) {
2741                                 pfqf_hregion |= info->region[i].region_id  <<
2742                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2743                                 pfqf_hregion |= 1 <<
2744                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2745                         } else if ((hw_flowtype & 0x7) == 4) {
2746                                 pfqf_hregion |= info->region[i].region_id  <<
2747                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2748                                 pfqf_hregion |= 1 <<
2749                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2750                         } else if ((hw_flowtype & 0x7) == 5) {
2751                                 pfqf_hregion |= info->region[i].region_id  <<
2752                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2753                                 pfqf_hregion |= 1 <<
2754                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2755                         } else if ((hw_flowtype & 0x7) == 6) {
2756                                 pfqf_hregion |= info->region[i].region_id  <<
2757                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2758                                 pfqf_hregion |= 1 <<
2759                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2760                         } else {
2761                                 pfqf_hregion |= info->region[i].region_id  <<
2762                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2763                                 pfqf_hregion |= 1 <<
2764                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2765                         }
2766
2767                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2768                                                 pfqf_hregion);
2769                 }
2770         }
2771 }
2772
2773 static int
2774 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2775                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2776 {
2777         struct i40e_queue_regions *info = &pf->queue_region;
2778         int32_t ret = -EINVAL;
2779         uint16_t i, j, region_index;
2780
2781         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2782                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2783                 return ret;
2784         }
2785
2786         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2787                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2788                 return ret;
2789         }
2790
2791         for (i = 0; i < info->queue_region_number; i++)
2792                 if (rss_region_conf->region_id == info->region[i].region_id)
2793                         break;
2794
2795         if (i == info->queue_region_number) {
2796                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2797                 ret = -EINVAL;
2798                 return ret;
2799         }
2800
2801         region_index = i;
2802
2803         for (i = 0; i < info->queue_region_number; i++) {
2804                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2805                         if (info->region[i].user_priority[j] ==
2806                                 rss_region_conf->user_priority) {
2807                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2808                                 return 0;
2809                         }
2810                 }
2811         }
2812
2813         j = info->region[region_index].user_priority_num;
2814         info->region[region_index].user_priority[j] =
2815                                         rss_region_conf->user_priority;
2816         info->region[region_index].user_priority_num++;
2817
2818         return 0;
2819 }
2820
2821 static int
2822 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2823                                 struct i40e_pf *pf)
2824 {
2825         struct i40e_dcbx_config dcb_cfg_local;
2826         struct i40e_dcbx_config *dcb_cfg;
2827         struct i40e_queue_regions *info = &pf->queue_region;
2828         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2829         int32_t ret = -EINVAL;
2830         uint16_t i, j, prio_index, region_index;
2831         uint8_t tc_map, tc_bw, bw_lf, dcb_flag = 0;
2832
2833         if (!info->queue_region_number) {
2834                 PMD_DRV_LOG(ERR, "No queue region been set before");
2835                 return ret;
2836         }
2837
2838         for (i = 0; i < info->queue_region_number; i++) {
2839                 if (info->region[i].user_priority_num) {
2840                         dcb_flag = 1;
2841                         break;
2842                 }
2843         }
2844
2845         if (dcb_flag == 0)
2846                 return 0;
2847
2848         dcb_cfg = &dcb_cfg_local;
2849         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2850
2851         /* assume each tc has the same bw */
2852         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2853         for (i = 0; i < info->queue_region_number; i++)
2854                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2855         /* to ensure the sum of tcbw is equal to 100 */
2856         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2857         for (i = 0; i < bw_lf; i++)
2858                 dcb_cfg->etscfg.tcbwtable[i]++;
2859
2860         /* assume each tc has the same Transmission Selection Algorithm */
2861         for (i = 0; i < info->queue_region_number; i++)
2862                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2863
2864         for (i = 0; i < info->queue_region_number; i++) {
2865                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2866                         prio_index = info->region[i].user_priority[j];
2867                         region_index = info->region[i].region_id;
2868                         dcb_cfg->etscfg.prioritytable[prio_index] =
2869                                                 region_index;
2870                 }
2871         }
2872
2873         /* FW needs one App to configure HW */
2874         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2875         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2876         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2877         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2878
2879         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2880
2881         dcb_cfg->pfc.willing = 0;
2882         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2883         dcb_cfg->pfc.pfcenable = tc_map;
2884
2885         /* Copy the new config to the current config */
2886         *old_cfg = *dcb_cfg;
2887         old_cfg->etsrec = old_cfg->etscfg;
2888         ret = i40e_set_dcb_config(hw);
2889
2890         if (ret) {
2891                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2892                          i40e_stat_str(hw, ret),
2893                          i40e_aq_str(hw, hw->aq.asq_last_status));
2894                 return ret;
2895         }
2896
2897         return 0;
2898 }
2899
2900 int
2901 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2902         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2903 {
2904         int32_t ret = -EINVAL;
2905         struct i40e_queue_regions *info = &pf->queue_region;
2906         struct i40e_vsi *main_vsi = pf->main_vsi;
2907
2908         if (on) {
2909                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2910
2911                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2912                 if (ret != I40E_SUCCESS) {
2913                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2914                         return ret;
2915                 }
2916
2917                 ret = i40e_queue_region_dcb_configure(hw, pf);
2918                 if (ret != I40E_SUCCESS) {
2919                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2920                         return ret;
2921                 }
2922
2923                 return 0;
2924         }
2925
2926         if (info->queue_region_number) {
2927                 info->queue_region_number = 1;
2928                 info->region[0].queue_num = main_vsi->nb_used_qps;
2929                 info->region[0].queue_start_index = 0;
2930
2931                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2932                 if (ret != I40E_SUCCESS)
2933                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2934
2935                 ret = i40e_dcb_init_configure(dev, TRUE);
2936                 if (ret != I40E_SUCCESS) {
2937                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2938                         pf->flags &= ~I40E_FLAG_DCB;
2939                 }
2940
2941                 i40e_init_queue_region_conf(dev);
2942         }
2943         return 0;
2944 }
2945
2946 static int
2947 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2948 {
2949         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2950         uint64_t hena;
2951
2952         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2953         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2954
2955         if (!hena)
2956                 return -ENOTSUP;
2957
2958         return 0;
2959 }
2960
2961 static int
2962 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2963                 struct i40e_queue_regions *regions_ptr)
2964 {
2965         struct i40e_queue_regions *info = &pf->queue_region;
2966
2967         rte_memcpy(regions_ptr, info,
2968                         sizeof(struct i40e_queue_regions));
2969
2970         return 0;
2971 }
2972
2973 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2974                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2975 {
2976         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2977         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2978         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2979         int32_t ret;
2980
2981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2982
2983         if (!is_i40e_supported(dev))
2984                 return -ENOTSUP;
2985
2986         if (!(!i40e_queue_region_pf_check_rss(pf)))
2987                 return -ENOTSUP;
2988
2989         /* This queue region feature only support pf by now. It should
2990          * be called after dev_start, and will be clear after dev_stop.
2991          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2992          * is just an enable function which server for other configuration,
2993          * it is for all configuration about queue region from up layer,
2994          * at first will only keep in DPDK softwarestored in driver,
2995          * only after "FLUSH_ON", it commit all configuration to HW.
2996          * Because PMD had to set hardware configuration at a time, so
2997          * it will record all up layer command at first.
2998          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2999          * just clean all configuration about queue region just now,
3000          * and restore all to DPDK i40e driver default
3001          * config when start up.
3002          */
3003
3004         switch (op_type) {
3005         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
3006                 ret = i40e_queue_region_set_region(pf,
3007                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3008                 break;
3009         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
3010                 ret = i40e_queue_region_set_flowtype(pf,
3011                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3012                 break;
3013         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
3014                 ret = i40e_queue_region_set_user_priority(pf,
3015                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3016                 break;
3017         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
3018                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
3019                 break;
3020         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
3021                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
3022                 break;
3023         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
3024                 ret = i40e_queue_region_get_all_info(pf,
3025                                 (struct i40e_queue_regions *)arg);
3026                 break;
3027         default:
3028                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
3029                             op_type);
3030                 ret = -EINVAL;
3031         }
3032
3033         I40E_WRITE_FLUSH(hw);
3034
3035         return ret;
3036 }
3037
3038 int rte_pmd_i40e_flow_add_del_packet_template(
3039                         uint16_t port,
3040                         const struct rte_pmd_i40e_pkt_template_conf *conf,
3041                         uint8_t add)
3042 {
3043         struct rte_eth_dev *dev = &rte_eth_devices[port];
3044         struct i40e_fdir_filter_conf filter_conf;
3045
3046         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3047
3048         if (conf == NULL)
3049                 return -EINVAL;
3050
3051         if (!is_i40e_supported(dev))
3052                 return -ENOTSUP;
3053
3054         memset(&filter_conf, 0, sizeof(filter_conf));
3055         filter_conf.soft_id = conf->soft_id;
3056         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3057         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3058         filter_conf.input.flow.raw_flow.length = conf->input.length;
3059         filter_conf.input.flow_ext.pkt_template = true;
3060
3061         filter_conf.action.rx_queue = conf->action.rx_queue;
3062         filter_conf.action.behavior =
3063                 (enum i40e_fdir_behavior)conf->action.behavior;
3064         filter_conf.action.report_status =
3065                 (enum i40e_fdir_status)conf->action.report_status;
3066         filter_conf.action.flex_off = conf->action.flex_off;
3067
3068         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3069 }
3070
3071 int
3072 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
3073                        struct rte_pmd_i40e_inset *inset,
3074                        enum rte_pmd_i40e_inset_type inset_type)
3075 {
3076         struct rte_eth_dev *dev;
3077         struct i40e_hw *hw;
3078         uint64_t inset_reg;
3079         uint32_t mask_reg[2];
3080         int i;
3081
3082         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3083
3084         dev = &rte_eth_devices[port];
3085
3086         if (!is_i40e_supported(dev))
3087                 return -ENOTSUP;
3088
3089         if (pctype > 63)
3090                 return -EINVAL;
3091
3092         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3093         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3094
3095         switch (inset_type) {
3096         case INSET_HASH:
3097                 /* Get input set */
3098                 inset_reg =
3099                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3100                 inset_reg <<= I40E_32_BIT_WIDTH;
3101                 inset_reg |=
3102                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3103                 /* Get field mask */
3104                 mask_reg[0] =
3105                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3106                 mask_reg[1] =
3107                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3108                 break;
3109         case INSET_FDIR:
3110                 inset_reg =
3111                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3112                 inset_reg <<= I40E_32_BIT_WIDTH;
3113                 inset_reg |=
3114                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3115                 mask_reg[0] =
3116                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3117                 mask_reg[1] =
3118                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3119                 break;
3120         case INSET_FDIR_FLX:
3121                 inset_reg =
3122                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3123                 mask_reg[0] =
3124                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3125                 mask_reg[1] =
3126                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3127                 break;
3128         default:
3129                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3130                 return -EINVAL;
3131         }
3132
3133         inset->inset = inset_reg;
3134
3135         for (i = 0; i < 2; i++) {
3136                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3137                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3138         }
3139
3140         return 0;
3141 }
3142
3143 int
3144 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3145                        struct rte_pmd_i40e_inset *inset,
3146                        enum rte_pmd_i40e_inset_type inset_type)
3147 {
3148         struct rte_eth_dev *dev;
3149         struct i40e_hw *hw;
3150         struct i40e_pf *pf;
3151         uint64_t inset_reg;
3152         uint32_t mask_reg[2];
3153         int i;
3154
3155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3156
3157         dev = &rte_eth_devices[port];
3158
3159         if (!is_i40e_supported(dev))
3160                 return -ENOTSUP;
3161
3162         if (pctype > 63)
3163                 return -EINVAL;
3164
3165         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3166         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3167
3168         if (pf->support_multi_driver) {
3169                 PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
3170                 return -ENOTSUP;
3171         }
3172
3173         inset_reg = inset->inset;
3174         for (i = 0; i < 2; i++)
3175                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3176                         inset->mask[i].mask;
3177
3178         switch (inset_type) {
3179         case INSET_HASH:
3180                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3181                                             (uint32_t)(inset_reg & UINT32_MAX));
3182                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3183                                             (uint32_t)((inset_reg >>
3184                                              I40E_32_BIT_WIDTH) & UINT32_MAX));
3185                 for (i = 0; i < 2; i++)
3186                         i40e_check_write_global_reg(hw,
3187                                                   I40E_GLQF_HASH_MSK(i, pctype),
3188                                                   mask_reg[i]);
3189                 break;
3190         case INSET_FDIR:
3191                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3192                                      (uint32_t)(inset_reg & UINT32_MAX));
3193                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3194                                      (uint32_t)((inset_reg >>
3195                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3196                 for (i = 0; i < 2; i++)
3197                         i40e_check_write_global_reg(hw,
3198                                                     I40E_GLQF_FD_MSK(i, pctype),
3199                                                     mask_reg[i]);
3200                 break;
3201         case INSET_FDIR_FLX:
3202                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3203                                      (uint32_t)(inset_reg & UINT32_MAX));
3204                 for (i = 0; i < 2; i++)
3205                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3206                                              mask_reg[i]);
3207                 break;
3208         default:
3209                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3210                 return -EINVAL;
3211         }
3212
3213         I40E_WRITE_FLUSH(hw);
3214         return 0;
3215 }
3216
3217 int
3218 rte_pmd_i40e_get_fdir_info(uint16_t port, struct rte_eth_fdir_info *fdir_info)
3219 {
3220         struct rte_eth_dev *dev;
3221
3222         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3223
3224         dev = &rte_eth_devices[port];
3225         if (!is_i40e_supported(dev))
3226                 return -ENOTSUP;
3227
3228         i40e_fdir_info_get(dev, fdir_info);
3229
3230         return 0;
3231 }
3232
3233 int
3234 rte_pmd_i40e_get_fdir_stats(uint16_t port, struct rte_eth_fdir_stats *fdir_stat)
3235 {
3236         struct rte_eth_dev *dev;
3237
3238         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3239
3240         dev = &rte_eth_devices[port];
3241         if (!is_i40e_supported(dev))
3242                 return -ENOTSUP;
3243
3244         i40e_fdir_stats_get(dev, fdir_stat);
3245
3246         return 0;
3247 }
3248
3249 int
3250 rte_pmd_i40e_set_gre_key_len(uint16_t port, uint8_t len)
3251 {
3252         struct rte_eth_dev *dev;
3253         struct i40e_pf *pf;
3254         struct i40e_hw *hw;
3255
3256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3257
3258         dev = &rte_eth_devices[port];
3259         if (!is_i40e_supported(dev))
3260                 return -ENOTSUP;
3261
3262         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3263         hw = I40E_PF_TO_HW(pf);
3264
3265         return i40e_dev_set_gre_key_len(hw, len);
3266 }
3267
3268 int
3269 rte_pmd_i40e_set_switch_dev(uint16_t port_id, struct rte_eth_dev *switch_dev)
3270 {
3271         struct rte_eth_dev *i40e_dev;
3272         struct i40e_hw *hw;
3273
3274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3275
3276         i40e_dev = &rte_eth_devices[port_id];
3277         if (!is_i40e_supported(i40e_dev))
3278                 return -ENOTSUP;
3279
3280         hw = I40E_DEV_PRIVATE_TO_HW(i40e_dev->data->dev_private);
3281         if (!hw)
3282                 return -1;
3283
3284         hw->switch_dev = switch_dev;
3285
3286         return 0;
3287 }