net: add rte prefix to ether structures
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <rte_malloc.h>
7 #include <rte_tailq.h>
8
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_dcb.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_pf.h"
13 #include "i40e_rxtx.h"
14 #include "rte_pmd_i40e.h"
15
16 int
17 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
18 {
19         struct rte_eth_dev *dev;
20         struct i40e_pf *pf;
21
22         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
23
24         dev = &rte_eth_devices[port];
25
26         if (!is_i40e_supported(dev))
27                 return -ENOTSUP;
28
29         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
30
31         if (vf >= pf->vf_num || !pf->vfs) {
32                 PMD_DRV_LOG(ERR, "Invalid argument.");
33                 return -EINVAL;
34         }
35
36         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
37
38         return 0;
39 }
40
41 int
42 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
43 {
44         struct rte_eth_dev *dev;
45         struct i40e_pf *pf;
46         struct i40e_vsi *vsi;
47         struct i40e_hw *hw;
48         struct i40e_vsi_context ctxt;
49         int ret;
50
51         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52
53         dev = &rte_eth_devices[port];
54
55         if (!is_i40e_supported(dev))
56                 return -ENOTSUP;
57
58         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59
60         if (vf_id >= pf->vf_num || !pf->vfs) {
61                 PMD_DRV_LOG(ERR, "Invalid argument.");
62                 return -EINVAL;
63         }
64
65         vsi = pf->vfs[vf_id].vsi;
66         if (!vsi) {
67                 PMD_DRV_LOG(ERR, "Invalid VSI.");
68                 return -EINVAL;
69         }
70
71         /* Check if it has been already on or off */
72         if (vsi->info.valid_sections &
73                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
74                 if (on) {
75                         if ((vsi->info.sec_flags &
76                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
77                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
78                                 return 0; /* already on */
79                 } else {
80                         if ((vsi->info.sec_flags &
81                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
82                                 return 0; /* already off */
83                 }
84         }
85
86         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
87         if (on)
88                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
89         else
90                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
91
92         memset(&ctxt, 0, sizeof(ctxt));
93         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
94         ctxt.seid = vsi->seid;
95
96         hw = I40E_VSI_TO_HW(vsi);
97         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
98         if (ret != I40E_SUCCESS) {
99                 ret = -ENOTSUP;
100                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
101         }
102
103         return ret;
104 }
105
106 static int
107 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
108 {
109         uint32_t j, k;
110         uint16_t vlan_id;
111         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
112         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
113         int ret;
114
115         for (j = 0; j < I40E_VFTA_SIZE; j++) {
116                 if (!vsi->vfta[j])
117                         continue;
118
119                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
120                         if (!(vsi->vfta[j] & (1 << k)))
121                                 continue;
122
123                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
124                         if (!vlan_id)
125                                 continue;
126
127                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
128                         if (add)
129                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
130                                                        &vlan_data, 1, NULL);
131                         else
132                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
133                                                           &vlan_data, 1, NULL);
134                         if (ret != I40E_SUCCESS) {
135                                 PMD_DRV_LOG(ERR,
136                                             "Failed to add/rm vlan filter");
137                                 return ret;
138                         }
139                 }
140         }
141
142         return I40E_SUCCESS;
143 }
144
145 int
146 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
147 {
148         struct rte_eth_dev *dev;
149         struct i40e_pf *pf;
150         struct i40e_vsi *vsi;
151         struct i40e_hw *hw;
152         struct i40e_vsi_context ctxt;
153         int ret;
154
155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
156
157         dev = &rte_eth_devices[port];
158
159         if (!is_i40e_supported(dev))
160                 return -ENOTSUP;
161
162         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
163
164         if (vf_id >= pf->vf_num || !pf->vfs) {
165                 PMD_DRV_LOG(ERR, "Invalid argument.");
166                 return -EINVAL;
167         }
168
169         vsi = pf->vfs[vf_id].vsi;
170         if (!vsi) {
171                 PMD_DRV_LOG(ERR, "Invalid VSI.");
172                 return -EINVAL;
173         }
174
175         /* Check if it has been already on or off */
176         if (vsi->vlan_anti_spoof_on == on)
177                 return 0; /* already on or off */
178
179         vsi->vlan_anti_spoof_on = on;
180         if (!vsi->vlan_filter_on) {
181                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
182                 if (ret) {
183                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
184                         return -ENOTSUP;
185                 }
186         }
187
188         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
189         if (on)
190                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
191         else
192                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
193
194         memset(&ctxt, 0, sizeof(ctxt));
195         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
196         ctxt.seid = vsi->seid;
197
198         hw = I40E_VSI_TO_HW(vsi);
199         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
200         if (ret != I40E_SUCCESS) {
201                 ret = -ENOTSUP;
202                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
203         }
204
205         return ret;
206 }
207
208 static int
209 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
210 {
211         struct i40e_mac_filter *f;
212         struct i40e_macvlan_filter *mv_f;
213         int i, vlan_num;
214         enum rte_mac_filter_type filter_type;
215         int ret = I40E_SUCCESS;
216         void *temp;
217
218         /* remove all the MACs */
219         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
220                 vlan_num = vsi->vlan_num;
221                 filter_type = f->mac_info.filter_type;
222                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
223                     filter_type == RTE_MACVLAN_HASH_MATCH) {
224                         if (vlan_num == 0) {
225                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
226                                 return I40E_ERR_PARAM;
227                         }
228                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
229                            filter_type == RTE_MAC_HASH_MATCH)
230                         vlan_num = 1;
231
232                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
233                 if (!mv_f) {
234                         PMD_DRV_LOG(ERR, "failed to allocate memory");
235                         return I40E_ERR_NO_MEMORY;
236                 }
237
238                 for (i = 0; i < vlan_num; i++) {
239                         mv_f[i].filter_type = filter_type;
240                         rte_memcpy(&mv_f[i].macaddr,
241                                          &f->mac_info.mac_addr,
242                                          ETH_ADDR_LEN);
243                 }
244                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
245                     filter_type == RTE_MACVLAN_HASH_MATCH) {
246                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
247                                                          &f->mac_info.mac_addr);
248                         if (ret != I40E_SUCCESS) {
249                                 rte_free(mv_f);
250                                 return ret;
251                         }
252                 }
253
254                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
255                 if (ret != I40E_SUCCESS) {
256                         rte_free(mv_f);
257                         return ret;
258                 }
259
260                 rte_free(mv_f);
261                 ret = I40E_SUCCESS;
262         }
263
264         return ret;
265 }
266
267 static int
268 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
269 {
270         struct i40e_mac_filter *f;
271         struct i40e_macvlan_filter *mv_f;
272         int i, vlan_num = 0;
273         int ret = I40E_SUCCESS;
274         void *temp;
275
276         /* restore all the MACs */
277         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
278                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
279                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
280                         /**
281                          * If vlan_num is 0, that's the first time to add mac,
282                          * set mask for vlan_id 0.
283                          */
284                         if (vsi->vlan_num == 0) {
285                                 i40e_set_vlan_filter(vsi, 0, 1);
286                                 vsi->vlan_num = 1;
287                         }
288                         vlan_num = vsi->vlan_num;
289                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
290                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
291                         vlan_num = 1;
292
293                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
294                 if (!mv_f) {
295                         PMD_DRV_LOG(ERR, "failed to allocate memory");
296                         return I40E_ERR_NO_MEMORY;
297                 }
298
299                 for (i = 0; i < vlan_num; i++) {
300                         mv_f[i].filter_type = f->mac_info.filter_type;
301                         rte_memcpy(&mv_f[i].macaddr,
302                                          &f->mac_info.mac_addr,
303                                          ETH_ADDR_LEN);
304                 }
305
306                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
307                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
308                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
309                                                          &f->mac_info.mac_addr);
310                         if (ret != I40E_SUCCESS) {
311                                 rte_free(mv_f);
312                                 return ret;
313                         }
314                 }
315
316                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
317                 if (ret != I40E_SUCCESS) {
318                         rte_free(mv_f);
319                         return ret;
320                 }
321
322                 rte_free(mv_f);
323                 ret = I40E_SUCCESS;
324         }
325
326         return ret;
327 }
328
329 static int
330 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
331 {
332         struct i40e_vsi_context ctxt;
333         struct i40e_hw *hw;
334         int ret;
335
336         if (!vsi)
337                 return -EINVAL;
338
339         hw = I40E_VSI_TO_HW(vsi);
340
341         /* Use the FW API if FW >= v5.0 */
342         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
343                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
344                 return -ENOTSUP;
345         }
346
347         /* Check if it has been already on or off */
348         if (vsi->info.valid_sections &
349                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
350                 if (on) {
351                         if ((vsi->info.switch_id &
352                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
353                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
354                                 return 0; /* already on */
355                 } else {
356                         if ((vsi->info.switch_id &
357                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
358                                 return 0; /* already off */
359                 }
360         }
361
362         /* remove all the MAC and VLAN first */
363         ret = i40e_vsi_rm_mac_filter(vsi);
364         if (ret) {
365                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
366                 return ret;
367         }
368         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
369                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
370                 if (ret) {
371                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
372                         return ret;
373                 }
374         }
375
376         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
377         if (on)
378                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
379         else
380                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
381
382         memset(&ctxt, 0, sizeof(ctxt));
383         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
384         ctxt.seid = vsi->seid;
385
386         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
387         if (ret != I40E_SUCCESS) {
388                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
389                 return ret;
390         }
391
392         /* add all the MAC and VLAN back */
393         ret = i40e_vsi_restore_mac_filter(vsi);
394         if (ret)
395                 return ret;
396         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
398                 if (ret)
399                         return ret;
400         }
401
402         return ret;
403 }
404
405 int
406 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
407 {
408         struct rte_eth_dev *dev;
409         struct i40e_pf *pf;
410         struct i40e_pf_vf *vf;
411         struct i40e_vsi *vsi;
412         uint16_t vf_id;
413         int ret;
414
415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
416
417         dev = &rte_eth_devices[port];
418
419         if (!is_i40e_supported(dev))
420                 return -ENOTSUP;
421
422         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
423
424         /* setup PF TX loopback */
425         vsi = pf->main_vsi;
426         ret = i40e_vsi_set_tx_loopback(vsi, on);
427         if (ret)
428                 return -ENOTSUP;
429
430         /* setup TX loopback for all the VFs */
431         if (!pf->vfs) {
432                 /* if no VF, do nothing. */
433                 return 0;
434         }
435
436         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
437                 vf = &pf->vfs[vf_id];
438                 vsi = vf->vsi;
439
440                 ret = i40e_vsi_set_tx_loopback(vsi, on);
441                 if (ret)
442                         return -ENOTSUP;
443         }
444
445         return ret;
446 }
447
448 int
449 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
450 {
451         struct rte_eth_dev *dev;
452         struct i40e_pf *pf;
453         struct i40e_vsi *vsi;
454         struct i40e_hw *hw;
455         int ret;
456
457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
458
459         dev = &rte_eth_devices[port];
460
461         if (!is_i40e_supported(dev))
462                 return -ENOTSUP;
463
464         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
465
466         if (vf_id >= pf->vf_num || !pf->vfs) {
467                 PMD_DRV_LOG(ERR, "Invalid argument.");
468                 return -EINVAL;
469         }
470
471         vsi = pf->vfs[vf_id].vsi;
472         if (!vsi) {
473                 PMD_DRV_LOG(ERR, "Invalid VSI.");
474                 return -EINVAL;
475         }
476
477         hw = I40E_VSI_TO_HW(vsi);
478
479         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
480                                                   on, NULL, true);
481         if (ret != I40E_SUCCESS) {
482                 ret = -ENOTSUP;
483                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
484         }
485
486         return ret;
487 }
488
489 int
490 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
491 {
492         struct rte_eth_dev *dev;
493         struct i40e_pf *pf;
494         struct i40e_vsi *vsi;
495         struct i40e_hw *hw;
496         int ret;
497
498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
499
500         dev = &rte_eth_devices[port];
501
502         if (!is_i40e_supported(dev))
503                 return -ENOTSUP;
504
505         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
506
507         if (vf_id >= pf->vf_num || !pf->vfs) {
508                 PMD_DRV_LOG(ERR, "Invalid argument.");
509                 return -EINVAL;
510         }
511
512         vsi = pf->vfs[vf_id].vsi;
513         if (!vsi) {
514                 PMD_DRV_LOG(ERR, "Invalid VSI.");
515                 return -EINVAL;
516         }
517
518         hw = I40E_VSI_TO_HW(vsi);
519
520         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
521                                                     on, NULL);
522         if (ret != I40E_SUCCESS) {
523                 ret = -ENOTSUP;
524                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
525         }
526
527         return ret;
528 }
529
530 int
531 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
532                              struct rte_ether_addr *mac_addr)
533 {
534         struct i40e_mac_filter *f;
535         struct rte_eth_dev *dev;
536         struct i40e_pf_vf *vf;
537         struct i40e_vsi *vsi;
538         struct i40e_pf *pf;
539         void *temp;
540
541         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
542                 return -EINVAL;
543
544         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
545
546         dev = &rte_eth_devices[port];
547
548         if (!is_i40e_supported(dev))
549                 return -ENOTSUP;
550
551         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
552
553         if (vf_id >= pf->vf_num || !pf->vfs)
554                 return -EINVAL;
555
556         vf = &pf->vfs[vf_id];
557         vsi = vf->vsi;
558         if (!vsi) {
559                 PMD_DRV_LOG(ERR, "Invalid VSI.");
560                 return -EINVAL;
561         }
562
563         ether_addr_copy(mac_addr, &vf->mac_addr);
564
565         /* Remove all existing mac */
566         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
567                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
568                                 != I40E_SUCCESS)
569                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
570
571         return 0;
572 }
573
574 static const struct rte_ether_addr null_mac_addr;
575
576 int
577 rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
578         struct rte_ether_addr *mac_addr)
579 {
580         struct rte_eth_dev *dev;
581         struct i40e_pf_vf *vf;
582         struct i40e_vsi *vsi;
583         struct i40e_pf *pf;
584
585         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
586                 return -EINVAL;
587
588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
589
590         dev = &rte_eth_devices[port];
591
592         if (!is_i40e_supported(dev))
593                 return -ENOTSUP;
594
595         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
596
597         if (vf_id >= pf->vf_num || !pf->vfs)
598                 return -EINVAL;
599
600         vf = &pf->vfs[vf_id];
601         vsi = vf->vsi;
602         if (!vsi) {
603                 PMD_DRV_LOG(ERR, "Invalid VSI.");
604                 return -EINVAL;
605         }
606
607         if (is_same_ether_addr(mac_addr, &vf->mac_addr))
608                 /* Reset the mac with NULL address */
609                 ether_addr_copy(&null_mac_addr, &vf->mac_addr);
610
611         /* Remove the mac */
612         i40e_vsi_delete_mac(vsi, mac_addr);
613
614         return 0;
615 }
616
617 /* Set vlan strip on/off for specific VF from host */
618 int
619 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
620 {
621         struct rte_eth_dev *dev;
622         struct i40e_pf *pf;
623         struct i40e_vsi *vsi;
624         int ret;
625
626         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
627
628         dev = &rte_eth_devices[port];
629
630         if (!is_i40e_supported(dev))
631                 return -ENOTSUP;
632
633         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
634
635         if (vf_id >= pf->vf_num || !pf->vfs) {
636                 PMD_DRV_LOG(ERR, "Invalid argument.");
637                 return -EINVAL;
638         }
639
640         vsi = pf->vfs[vf_id].vsi;
641
642         if (!vsi)
643                 return -EINVAL;
644
645         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
646         if (ret != I40E_SUCCESS) {
647                 ret = -ENOTSUP;
648                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
649         }
650
651         return ret;
652 }
653
654 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
655                                     uint16_t vlan_id)
656 {
657         struct rte_eth_dev *dev;
658         struct i40e_pf *pf;
659         struct i40e_hw *hw;
660         struct i40e_vsi *vsi;
661         struct i40e_vsi_context ctxt;
662         int ret;
663
664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
665
666         if (vlan_id > ETHER_MAX_VLAN_ID) {
667                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
668                 return -EINVAL;
669         }
670
671         dev = &rte_eth_devices[port];
672
673         if (!is_i40e_supported(dev))
674                 return -ENOTSUP;
675
676         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
677         hw = I40E_PF_TO_HW(pf);
678
679         /**
680          * return -ENODEV if SRIOV not enabled, VF number not configured
681          * or no queue assigned.
682          */
683         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
684             pf->vf_nb_qps == 0)
685                 return -ENODEV;
686
687         if (vf_id >= pf->vf_num || !pf->vfs) {
688                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
689                 return -EINVAL;
690         }
691
692         vsi = pf->vfs[vf_id].vsi;
693         if (!vsi) {
694                 PMD_DRV_LOG(ERR, "Invalid VSI.");
695                 return -EINVAL;
696         }
697
698         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
699         vsi->info.pvid = vlan_id;
700         if (vlan_id > 0)
701                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
702         else
703                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
704
705         memset(&ctxt, 0, sizeof(ctxt));
706         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
707         ctxt.seid = vsi->seid;
708
709         hw = I40E_VSI_TO_HW(vsi);
710         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
711         if (ret != I40E_SUCCESS) {
712                 ret = -ENOTSUP;
713                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
714         }
715
716         return ret;
717 }
718
719 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
720                                   uint8_t on)
721 {
722         struct rte_eth_dev *dev;
723         struct i40e_pf *pf;
724         struct i40e_vsi *vsi;
725         struct i40e_hw *hw;
726         struct i40e_mac_filter_info filter;
727         struct rte_ether_addr broadcast = {
728                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
729         int ret;
730
731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
732
733         if (on > 1) {
734                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
735                 return -EINVAL;
736         }
737
738         dev = &rte_eth_devices[port];
739
740         if (!is_i40e_supported(dev))
741                 return -ENOTSUP;
742
743         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
744         hw = I40E_PF_TO_HW(pf);
745
746         if (vf_id >= pf->vf_num || !pf->vfs) {
747                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
748                 return -EINVAL;
749         }
750
751         /**
752          * return -ENODEV if SRIOV not enabled, VF number not configured
753          * or no queue assigned.
754          */
755         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
756             pf->vf_nb_qps == 0) {
757                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
758                 return -ENODEV;
759         }
760
761         vsi = pf->vfs[vf_id].vsi;
762         if (!vsi) {
763                 PMD_DRV_LOG(ERR, "Invalid VSI.");
764                 return -EINVAL;
765         }
766
767         if (on) {
768                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
769                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
770                 ret = i40e_vsi_add_mac(vsi, &filter);
771         } else {
772                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
773         }
774
775         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
776                 ret = -ENOTSUP;
777                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
778         } else {
779                 ret = 0;
780         }
781
782         return ret;
783 }
784
785 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
786 {
787         struct rte_eth_dev *dev;
788         struct i40e_pf *pf;
789         struct i40e_hw *hw;
790         struct i40e_vsi *vsi;
791         struct i40e_vsi_context ctxt;
792         int ret;
793
794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
795
796         if (on > 1) {
797                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
798                 return -EINVAL;
799         }
800
801         dev = &rte_eth_devices[port];
802
803         if (!is_i40e_supported(dev))
804                 return -ENOTSUP;
805
806         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
807         hw = I40E_PF_TO_HW(pf);
808
809         /**
810          * return -ENODEV if SRIOV not enabled, VF number not configured
811          * or no queue assigned.
812          */
813         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
814             pf->vf_nb_qps == 0) {
815                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
816                 return -ENODEV;
817         }
818
819         if (vf_id >= pf->vf_num || !pf->vfs) {
820                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
821                 return -EINVAL;
822         }
823
824         vsi = pf->vfs[vf_id].vsi;
825         if (!vsi) {
826                 PMD_DRV_LOG(ERR, "Invalid VSI.");
827                 return -EINVAL;
828         }
829
830         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
831         if (on) {
832                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
833                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
834         } else {
835                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
836                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
837         }
838
839         memset(&ctxt, 0, sizeof(ctxt));
840         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
841         ctxt.seid = vsi->seid;
842
843         hw = I40E_VSI_TO_HW(vsi);
844         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
845         if (ret != I40E_SUCCESS) {
846                 ret = -ENOTSUP;
847                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
848         }
849
850         return ret;
851 }
852
853 static int
854 i40e_vlan_filter_count(struct i40e_vsi *vsi)
855 {
856         uint32_t j, k;
857         uint16_t vlan_id;
858         int count = 0;
859
860         for (j = 0; j < I40E_VFTA_SIZE; j++) {
861                 if (!vsi->vfta[j])
862                         continue;
863
864                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
865                         if (!(vsi->vfta[j] & (1 << k)))
866                                 continue;
867
868                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
869                         if (!vlan_id)
870                                 continue;
871
872                         count++;
873                 }
874         }
875
876         return count;
877 }
878
879 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
880                                     uint64_t vf_mask, uint8_t on)
881 {
882         struct rte_eth_dev *dev;
883         struct i40e_pf *pf;
884         struct i40e_hw *hw;
885         struct i40e_vsi *vsi;
886         uint16_t vf_idx;
887         int ret = I40E_SUCCESS;
888
889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
890
891         dev = &rte_eth_devices[port];
892
893         if (!is_i40e_supported(dev))
894                 return -ENOTSUP;
895
896         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
897                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
898                 return -EINVAL;
899         }
900
901         if (vf_mask == 0) {
902                 PMD_DRV_LOG(ERR, "No VF.");
903                 return -EINVAL;
904         }
905
906         if (on > 1) {
907                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
908                 return -EINVAL;
909         }
910
911         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
912         hw = I40E_PF_TO_HW(pf);
913
914         /**
915          * return -ENODEV if SRIOV not enabled, VF number not configured
916          * or no queue assigned.
917          */
918         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
919             pf->vf_nb_qps == 0) {
920                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
921                 return -ENODEV;
922         }
923
924         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
925                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
926                         vsi = pf->vfs[vf_idx].vsi;
927                         if (on) {
928                                 if (!vsi->vlan_filter_on) {
929                                         vsi->vlan_filter_on = true;
930                                         i40e_aq_set_vsi_vlan_promisc(hw,
931                                                                      vsi->seid,
932                                                                      false,
933                                                                      NULL);
934                                         if (!vsi->vlan_anti_spoof_on)
935                                                 i40e_add_rm_all_vlan_filter(
936                                                         vsi, true);
937                                 }
938                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
939                         } else {
940                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
941
942                                 if (!i40e_vlan_filter_count(vsi)) {
943                                         vsi->vlan_filter_on = false;
944                                         i40e_aq_set_vsi_vlan_promisc(hw,
945                                                                      vsi->seid,
946                                                                      true,
947                                                                      NULL);
948                                 }
949                         }
950                 }
951         }
952
953         if (ret != I40E_SUCCESS) {
954                 ret = -ENOTSUP;
955                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
956         }
957
958         return ret;
959 }
960
961 int
962 rte_pmd_i40e_get_vf_stats(uint16_t port,
963                           uint16_t vf_id,
964                           struct rte_eth_stats *stats)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         i40e_update_vsi_stats(vsi);
991
992         stats->ipackets = vsi->eth_stats.rx_unicast +
993                         vsi->eth_stats.rx_multicast +
994                         vsi->eth_stats.rx_broadcast;
995         stats->opackets = vsi->eth_stats.tx_unicast +
996                         vsi->eth_stats.tx_multicast +
997                         vsi->eth_stats.tx_broadcast;
998         stats->ibytes   = vsi->eth_stats.rx_bytes;
999         stats->obytes   = vsi->eth_stats.tx_bytes;
1000         stats->ierrors  = vsi->eth_stats.rx_discards;
1001         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
1002
1003         return 0;
1004 }
1005
1006 int
1007 rte_pmd_i40e_reset_vf_stats(uint16_t port,
1008                             uint16_t vf_id)
1009 {
1010         struct rte_eth_dev *dev;
1011         struct i40e_pf *pf;
1012         struct i40e_vsi *vsi;
1013
1014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1015
1016         dev = &rte_eth_devices[port];
1017
1018         if (!is_i40e_supported(dev))
1019                 return -ENOTSUP;
1020
1021         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1022
1023         if (vf_id >= pf->vf_num || !pf->vfs) {
1024                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1025                 return -EINVAL;
1026         }
1027
1028         vsi = pf->vfs[vf_id].vsi;
1029         if (!vsi) {
1030                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1031                 return -EINVAL;
1032         }
1033
1034         vsi->offset_loaded = false;
1035         i40e_update_vsi_stats(vsi);
1036
1037         return 0;
1038 }
1039
1040 int
1041 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1042 {
1043         struct rte_eth_dev *dev;
1044         struct i40e_pf *pf;
1045         struct i40e_vsi *vsi;
1046         struct i40e_hw *hw;
1047         int ret = 0;
1048         int i;
1049
1050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1051
1052         dev = &rte_eth_devices[port];
1053
1054         if (!is_i40e_supported(dev))
1055                 return -ENOTSUP;
1056
1057         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1058
1059         if (vf_id >= pf->vf_num || !pf->vfs) {
1060                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1061                 return -EINVAL;
1062         }
1063
1064         vsi = pf->vfs[vf_id].vsi;
1065         if (!vsi) {
1066                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1067                 return -EINVAL;
1068         }
1069
1070         if (bw > I40E_QOS_BW_MAX) {
1071                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1072                             I40E_QOS_BW_MAX);
1073                 return -EINVAL;
1074         }
1075
1076         if (bw % I40E_QOS_BW_GRANULARITY) {
1077                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1078                             I40E_QOS_BW_GRANULARITY);
1079                 return -EINVAL;
1080         }
1081
1082         bw /= I40E_QOS_BW_GRANULARITY;
1083
1084         hw = I40E_VSI_TO_HW(vsi);
1085
1086         /* No change. */
1087         if (bw == vsi->bw_info.bw_limit) {
1088                 PMD_DRV_LOG(INFO,
1089                             "No change for VF max bandwidth. Nothing to do.");
1090                 return 0;
1091         }
1092
1093         /**
1094          * VF bandwidth limitation and TC bandwidth limitation cannot be
1095          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1096          *
1097          * If bw is 0, means disable bandwidth limitation. Then no need to
1098          * check TC bandwidth limitation.
1099          */
1100         if (bw) {
1101                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1102                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1103                             vsi->bw_info.bw_ets_credits[i])
1104                                 break;
1105                 }
1106                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1107                         PMD_DRV_LOG(ERR,
1108                                     "TC max bandwidth has been set on this VF,"
1109                                     " please disable it first.");
1110                         return -EINVAL;
1111                 }
1112         }
1113
1114         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1115         if (ret) {
1116                 PMD_DRV_LOG(ERR,
1117                             "Failed to set VF %d bandwidth, err(%d).",
1118                             vf_id, ret);
1119                 return -EINVAL;
1120         }
1121
1122         /* Store the configuration. */
1123         vsi->bw_info.bw_limit = (uint16_t)bw;
1124         vsi->bw_info.bw_max = 0;
1125
1126         return 0;
1127 }
1128
1129 int
1130 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1131                                 uint8_t tc_num, uint8_t *bw_weight)
1132 {
1133         struct rte_eth_dev *dev;
1134         struct i40e_pf *pf;
1135         struct i40e_vsi *vsi;
1136         struct i40e_hw *hw;
1137         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1138         int ret = 0;
1139         int i, j;
1140         uint16_t sum;
1141         bool b_change = false;
1142
1143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1144
1145         dev = &rte_eth_devices[port];
1146
1147         if (!is_i40e_supported(dev))
1148                 return -ENOTSUP;
1149
1150         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1151
1152         if (vf_id >= pf->vf_num || !pf->vfs) {
1153                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1154                 return -EINVAL;
1155         }
1156
1157         vsi = pf->vfs[vf_id].vsi;
1158         if (!vsi) {
1159                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1160                 return -EINVAL;
1161         }
1162
1163         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1164                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1165                             I40E_MAX_TRAFFIC_CLASS);
1166                 return -EINVAL;
1167         }
1168
1169         sum = 0;
1170         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1171                 if (vsi->enabled_tc & BIT_ULL(i))
1172                         sum++;
1173         }
1174         if (sum != tc_num) {
1175                 PMD_DRV_LOG(ERR,
1176                             "Weight should be set for all %d enabled TCs.",
1177                             sum);
1178                 return -EINVAL;
1179         }
1180
1181         sum = 0;
1182         for (i = 0; i < tc_num; i++) {
1183                 if (!bw_weight[i]) {
1184                         PMD_DRV_LOG(ERR,
1185                                     "The weight should be 1 at least.");
1186                         return -EINVAL;
1187                 }
1188                 sum += bw_weight[i];
1189         }
1190         if (sum != 100) {
1191                 PMD_DRV_LOG(ERR,
1192                             "The summary of the TC weight should be 100.");
1193                 return -EINVAL;
1194         }
1195
1196         /**
1197          * Create the configuration for all the TCs.
1198          */
1199         memset(&tc_bw, 0, sizeof(tc_bw));
1200         tc_bw.tc_valid_bits = vsi->enabled_tc;
1201         j = 0;
1202         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1203                 if (vsi->enabled_tc & BIT_ULL(i)) {
1204                         if (bw_weight[j] !=
1205                                 vsi->bw_info.bw_ets_share_credits[i])
1206                                 b_change = true;
1207
1208                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1209                         j++;
1210                 }
1211         }
1212
1213         /* No change. */
1214         if (!b_change) {
1215                 PMD_DRV_LOG(INFO,
1216                             "No change for TC allocated bandwidth."
1217                             " Nothing to do.");
1218                 return 0;
1219         }
1220
1221         hw = I40E_VSI_TO_HW(vsi);
1222
1223         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1224         if (ret) {
1225                 PMD_DRV_LOG(ERR,
1226                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1227                             vf_id, ret);
1228                 return -EINVAL;
1229         }
1230
1231         /* Store the configuration. */
1232         j = 0;
1233         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1234                 if (vsi->enabled_tc & BIT_ULL(i)) {
1235                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1236                         j++;
1237                 }
1238         }
1239
1240         return 0;
1241 }
1242
1243 int
1244 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1245                               uint8_t tc_no, uint32_t bw)
1246 {
1247         struct rte_eth_dev *dev;
1248         struct i40e_pf *pf;
1249         struct i40e_vsi *vsi;
1250         struct i40e_hw *hw;
1251         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1252         int ret = 0;
1253         int i;
1254
1255         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1256
1257         dev = &rte_eth_devices[port];
1258
1259         if (!is_i40e_supported(dev))
1260                 return -ENOTSUP;
1261
1262         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1263
1264         if (vf_id >= pf->vf_num || !pf->vfs) {
1265                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1266                 return -EINVAL;
1267         }
1268
1269         vsi = pf->vfs[vf_id].vsi;
1270         if (!vsi) {
1271                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1272                 return -EINVAL;
1273         }
1274
1275         if (bw > I40E_QOS_BW_MAX) {
1276                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1277                             I40E_QOS_BW_MAX);
1278                 return -EINVAL;
1279         }
1280
1281         if (bw % I40E_QOS_BW_GRANULARITY) {
1282                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1283                             I40E_QOS_BW_GRANULARITY);
1284                 return -EINVAL;
1285         }
1286
1287         bw /= I40E_QOS_BW_GRANULARITY;
1288
1289         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1290                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1291                             I40E_MAX_TRAFFIC_CLASS);
1292                 return -EINVAL;
1293         }
1294
1295         hw = I40E_VSI_TO_HW(vsi);
1296
1297         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1298                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1299                             vf_id, tc_no);
1300                 return -EINVAL;
1301         }
1302
1303         /* No change. */
1304         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1305                 PMD_DRV_LOG(INFO,
1306                             "No change for TC max bandwidth. Nothing to do.");
1307                 return 0;
1308         }
1309
1310         /**
1311          * VF bandwidth limitation and TC bandwidth limitation cannot be
1312          * enabled in parallel, disable VF bandwidth limitation if it's
1313          * enabled.
1314          * If bw is 0, means disable bandwidth limitation. Then no need to
1315          * care about VF bandwidth limitation configuration.
1316          */
1317         if (bw && vsi->bw_info.bw_limit) {
1318                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1319                 if (ret) {
1320                         PMD_DRV_LOG(ERR,
1321                                     "Failed to disable VF(%d)"
1322                                     " bandwidth limitation, err(%d).",
1323                                     vf_id, ret);
1324                         return -EINVAL;
1325                 }
1326
1327                 PMD_DRV_LOG(INFO,
1328                             "VF max bandwidth is disabled according"
1329                             " to TC max bandwidth setting.");
1330         }
1331
1332         /**
1333          * Get all the TCs' info to create a whole picture.
1334          * Because the incremental change isn't permitted.
1335          */
1336         memset(&tc_bw, 0, sizeof(tc_bw));
1337         tc_bw.tc_valid_bits = vsi->enabled_tc;
1338         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1339                 if (vsi->enabled_tc & BIT_ULL(i)) {
1340                         tc_bw.tc_bw_credits[i] =
1341                                 rte_cpu_to_le_16(
1342                                         vsi->bw_info.bw_ets_credits[i]);
1343                 }
1344         }
1345         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1346
1347         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1348         if (ret) {
1349                 PMD_DRV_LOG(ERR,
1350                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1351                             vf_id, tc_no, ret);
1352                 return -EINVAL;
1353         }
1354
1355         /* Store the configuration. */
1356         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1357
1358         return 0;
1359 }
1360
1361 int
1362 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1363 {
1364         struct rte_eth_dev *dev;
1365         struct i40e_pf *pf;
1366         struct i40e_vsi *vsi;
1367         struct i40e_veb *veb;
1368         struct i40e_hw *hw;
1369         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1370         int i;
1371         int ret;
1372
1373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1374
1375         dev = &rte_eth_devices[port];
1376
1377         if (!is_i40e_supported(dev))
1378                 return -ENOTSUP;
1379
1380         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1381
1382         vsi = pf->main_vsi;
1383         if (!vsi) {
1384                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1385                 return -EINVAL;
1386         }
1387
1388         veb = vsi->veb;
1389         if (!veb) {
1390                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1391                 return -EINVAL;
1392         }
1393
1394         if ((tc_map & veb->enabled_tc) != tc_map) {
1395                 PMD_DRV_LOG(ERR,
1396                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1397                             veb->enabled_tc);
1398                 return -EINVAL;
1399         }
1400
1401         if (tc_map == veb->strict_prio_tc) {
1402                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1403                 return 0;
1404         }
1405
1406         hw = I40E_VSI_TO_HW(vsi);
1407
1408         /* Disable DCBx if it's the first time to set strict priority. */
1409         if (!veb->strict_prio_tc) {
1410                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1411                 if (ret)
1412                         PMD_DRV_LOG(INFO,
1413                                     "Failed to disable DCBx as it's already"
1414                                     " disabled.");
1415                 else
1416                         PMD_DRV_LOG(INFO,
1417                                     "DCBx is disabled according to strict"
1418                                     " priority setting.");
1419         }
1420
1421         memset(&ets_data, 0, sizeof(ets_data));
1422         ets_data.tc_valid_bits = veb->enabled_tc;
1423         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1424         ets_data.tc_strict_priority_flags = tc_map;
1425         /* Get all TCs' bandwidth. */
1426         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1427                 if (veb->enabled_tc & BIT_ULL(i)) {
1428                         /* For rubust, if bandwidth is 0, use 1 instead. */
1429                         if (veb->bw_info.bw_ets_share_credits[i])
1430                                 ets_data.tc_bw_share_credits[i] =
1431                                         veb->bw_info.bw_ets_share_credits[i];
1432                         else
1433                                 ets_data.tc_bw_share_credits[i] =
1434                                         I40E_QOS_BW_WEIGHT_MIN;
1435                 }
1436         }
1437
1438         if (!veb->strict_prio_tc)
1439                 ret = i40e_aq_config_switch_comp_ets(
1440                         hw, veb->uplink_seid,
1441                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1442                         NULL);
1443         else if (tc_map)
1444                 ret = i40e_aq_config_switch_comp_ets(
1445                         hw, veb->uplink_seid,
1446                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1447                         NULL);
1448         else
1449                 ret = i40e_aq_config_switch_comp_ets(
1450                         hw, veb->uplink_seid,
1451                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1452                         NULL);
1453
1454         if (ret) {
1455                 PMD_DRV_LOG(ERR,
1456                             "Failed to set TCs' strict priority mode."
1457                             " err (%d)", ret);
1458                 return -EINVAL;
1459         }
1460
1461         veb->strict_prio_tc = tc_map;
1462
1463         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1464         if (!tc_map) {
1465                 ret = i40e_aq_start_lldp(hw, NULL);
1466                 if (ret) {
1467                         PMD_DRV_LOG(ERR,
1468                                     "Failed to enable DCBx, err(%d).", ret);
1469                         return -EINVAL;
1470                 }
1471
1472                 PMD_DRV_LOG(INFO,
1473                             "DCBx is enabled again according to strict"
1474                             " priority setting.");
1475         }
1476
1477         return ret;
1478 }
1479
1480 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1481 #define I40E_MAX_PROFILE_NUM 16
1482
1483 static void
1484 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1485                                uint32_t track_id, uint8_t *profile_info_sec,
1486                                bool add)
1487 {
1488         struct i40e_profile_section_header *sec = NULL;
1489         struct i40e_profile_info *pinfo;
1490
1491         sec = (struct i40e_profile_section_header *)profile_info_sec;
1492         sec->tbl_size = 1;
1493         sec->data_end = sizeof(struct i40e_profile_section_header) +
1494                 sizeof(struct i40e_profile_info);
1495         sec->section.type = SECTION_TYPE_INFO;
1496         sec->section.offset = sizeof(struct i40e_profile_section_header);
1497         sec->section.size = sizeof(struct i40e_profile_info);
1498         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1499                                              sec->section.offset);
1500         pinfo->track_id = track_id;
1501         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1502         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1503         if (add)
1504                 pinfo->op = I40E_DDP_ADD_TRACKID;
1505         else
1506                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1507 }
1508
1509 static enum i40e_status_code
1510 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1511 {
1512         enum i40e_status_code status = I40E_SUCCESS;
1513         struct i40e_profile_section_header *sec;
1514         uint32_t track_id;
1515         uint32_t offset = 0;
1516         uint32_t info = 0;
1517
1518         sec = (struct i40e_profile_section_header *)profile_info_sec;
1519         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1520                                          sec->section.offset))->track_id;
1521
1522         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1523                                    track_id, &offset, &info, NULL);
1524         if (status)
1525                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1526                             "offset %d, info %d",
1527                             offset, info);
1528
1529         return status;
1530 }
1531
1532 /* Check if the profile info exists */
1533 static int
1534 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1535 {
1536         struct rte_eth_dev *dev = &rte_eth_devices[port];
1537         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1538         uint8_t *buff;
1539         struct rte_pmd_i40e_profile_list *p_list;
1540         struct rte_pmd_i40e_profile_info *pinfo, *p;
1541         uint32_t i;
1542         int ret;
1543         static const uint32_t group_mask = 0x00ff0000;
1544
1545         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1546                              sizeof(struct i40e_profile_section_header));
1547         if (pinfo->track_id == 0) {
1548                 PMD_DRV_LOG(INFO, "Read-only profile.");
1549                 return 0;
1550         }
1551         buff = rte_zmalloc("pinfo_list",
1552                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1553                            0);
1554         if (!buff) {
1555                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1556                 return -1;
1557         }
1558
1559         ret = i40e_aq_get_ddp_list(
1560                 hw, (void *)buff,
1561                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1562                 0, NULL);
1563         if (ret) {
1564                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1565                 rte_free(buff);
1566                 return -1;
1567         }
1568         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1569         for (i = 0; i < p_list->p_count; i++) {
1570                 p = &p_list->p_info[i];
1571                 if (pinfo->track_id == p->track_id) {
1572                         PMD_DRV_LOG(INFO, "Profile exists.");
1573                         rte_free(buff);
1574                         return 1;
1575                 }
1576         }
1577         /* profile with group id 0xff is compatible with any other profile */
1578         if ((pinfo->track_id & group_mask) == group_mask) {
1579                 rte_free(buff);
1580                 return 0;
1581         }
1582         for (i = 0; i < p_list->p_count; i++) {
1583                 p = &p_list->p_info[i];
1584                 if ((p->track_id & group_mask) == 0) {
1585                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1586                         rte_free(buff);
1587                         return 2;
1588                 }
1589         }
1590         for (i = 0; i < p_list->p_count; i++) {
1591                 p = &p_list->p_info[i];
1592                 if ((p->track_id & group_mask) == group_mask)
1593                         continue;
1594                 if ((pinfo->track_id & group_mask) !=
1595                     (p->track_id & group_mask)) {
1596                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1597                         rte_free(buff);
1598                         return 3;
1599                 }
1600         }
1601
1602         rte_free(buff);
1603         return 0;
1604 }
1605
1606 int
1607 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1608                                  uint32_t size,
1609                                  enum rte_pmd_i40e_package_op op)
1610 {
1611         struct rte_eth_dev *dev;
1612         struct i40e_hw *hw;
1613         struct i40e_package_header *pkg_hdr;
1614         struct i40e_generic_seg_header *profile_seg_hdr;
1615         struct i40e_generic_seg_header *metadata_seg_hdr;
1616         uint32_t track_id;
1617         uint8_t *profile_info_sec;
1618         int is_exist;
1619         enum i40e_status_code status = I40E_SUCCESS;
1620         static const uint32_t type_mask = 0xff000000;
1621
1622         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1623                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1624                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1625                 PMD_DRV_LOG(ERR, "Operation not supported.");
1626                 return -ENOTSUP;
1627         }
1628
1629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1630
1631         dev = &rte_eth_devices[port];
1632
1633         if (!is_i40e_supported(dev))
1634                 return -ENOTSUP;
1635
1636         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1637
1638         if (size < (sizeof(struct i40e_package_header) +
1639                     sizeof(struct i40e_metadata_segment) +
1640                     sizeof(uint32_t) * 2)) {
1641                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1642                 return -EINVAL;
1643         }
1644
1645         pkg_hdr = (struct i40e_package_header *)buff;
1646
1647         if (!pkg_hdr) {
1648                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1649                 return -EINVAL;
1650         }
1651
1652         if (pkg_hdr->segment_count < 2) {
1653                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1654                 return -EINVAL;
1655         }
1656
1657         /* Find metadata segment */
1658         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1659                                                         pkg_hdr);
1660         if (!metadata_seg_hdr) {
1661                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1662                 return -EINVAL;
1663         }
1664         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1665         if (track_id == I40E_DDP_TRACKID_INVALID) {
1666                 PMD_DRV_LOG(ERR, "Invalid track_id");
1667                 return -EINVAL;
1668         }
1669
1670         /* force read-only track_id for type 0 */
1671         if ((track_id & type_mask) == 0)
1672                 track_id = 0;
1673
1674         /* Find profile segment */
1675         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1676                                                        pkg_hdr);
1677         if (!profile_seg_hdr) {
1678                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1679                 return -EINVAL;
1680         }
1681
1682         profile_info_sec = rte_zmalloc(
1683                 "i40e_profile_info",
1684                 sizeof(struct i40e_profile_section_header) +
1685                 sizeof(struct i40e_profile_info),
1686                 0);
1687         if (!profile_info_sec) {
1688                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1689                 return -EINVAL;
1690         }
1691
1692         /* Check if the profile already loaded */
1693         i40e_generate_profile_info_sec(
1694                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1695                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1696                 track_id, profile_info_sec,
1697                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1698         is_exist = i40e_check_profile_info(port, profile_info_sec);
1699         if (is_exist < 0) {
1700                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1701                 rte_free(profile_info_sec);
1702                 return -EINVAL;
1703         }
1704
1705         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1706                 if (is_exist) {
1707                         if (is_exist == 1)
1708                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1709                         else if (is_exist == 2)
1710                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1711                         else if (is_exist == 3)
1712                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1713                         i40e_update_customized_info(dev, buff, size, op);
1714                         rte_free(profile_info_sec);
1715                         return -EEXIST;
1716                 }
1717         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1718                 if (is_exist != 1) {
1719                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1720                         rte_free(profile_info_sec);
1721                         return -EACCES;
1722                 }
1723         }
1724
1725         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1726                 status = i40e_rollback_profile(
1727                         hw,
1728                         (struct i40e_profile_segment *)profile_seg_hdr,
1729                         track_id);
1730                 if (status) {
1731                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1732                         rte_free(profile_info_sec);
1733                         return status;
1734                 }
1735         } else {
1736                 status = i40e_write_profile(
1737                         hw,
1738                         (struct i40e_profile_segment *)profile_seg_hdr,
1739                         track_id);
1740                 if (status) {
1741                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1742                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1743                         else
1744                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1745                         rte_free(profile_info_sec);
1746                         return status;
1747                 }
1748         }
1749
1750         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1751                 /* Modify loaded profiles info list */
1752                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1753                 if (status) {
1754                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1755                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1756                         else
1757                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1758                 }
1759         }
1760
1761         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1762             op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1763                 i40e_update_customized_info(dev, buff, size, op);
1764
1765         rte_free(profile_info_sec);
1766         return status;
1767 }
1768
1769 /* Get number of tvl records in the section */
1770 static unsigned int
1771 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1772 {
1773         unsigned int i, nb_rec, nb_tlv = 0;
1774         struct i40e_profile_tlv_section_record *tlv;
1775
1776         if (!sec)
1777                 return nb_tlv;
1778
1779         /* get number of records in the section */
1780         nb_rec = sec->section.size /
1781                                 sizeof(struct i40e_profile_tlv_section_record);
1782         for (i = 0; i < nb_rec; ) {
1783                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1784                 i += tlv->len;
1785                 nb_tlv++;
1786         }
1787         return nb_tlv;
1788 }
1789
1790 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1791         uint8_t *info_buff, uint32_t info_size,
1792         enum rte_pmd_i40e_package_info type)
1793 {
1794         uint32_t ret_size;
1795         struct i40e_package_header *pkg_hdr;
1796         struct i40e_generic_seg_header *i40e_seg_hdr;
1797         struct i40e_generic_seg_header *note_seg_hdr;
1798         struct i40e_generic_seg_header *metadata_seg_hdr;
1799
1800         if (!info_buff) {
1801                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1802                 return -EINVAL;
1803         }
1804
1805         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1806                 sizeof(struct i40e_metadata_segment) +
1807                 sizeof(uint32_t) * 2)) {
1808                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1809                 return -EINVAL;
1810         }
1811
1812         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1813         if (pkg_hdr->segment_count < 2) {
1814                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1815                 return -EINVAL;
1816         }
1817
1818         /* Find metadata segment */
1819         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1820                 pkg_hdr);
1821
1822         /* Find global notes segment */
1823         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1824                 pkg_hdr);
1825
1826         /* Find i40e profile segment */
1827         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1828
1829         /* get global header info */
1830         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1831                 struct rte_pmd_i40e_profile_info *info =
1832                         (struct rte_pmd_i40e_profile_info *)info_buff;
1833
1834                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1835                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1836                         return -EINVAL;
1837                 }
1838
1839                 if (!metadata_seg_hdr) {
1840                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1841                         return -EINVAL;
1842                 }
1843
1844                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1845                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1846                 info->track_id =
1847                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1848
1849                 memcpy(info->name,
1850                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1851                         I40E_DDP_NAME_SIZE);
1852                 memcpy(&info->version,
1853                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1854                         sizeof(struct i40e_ddp_version));
1855                 return I40E_SUCCESS;
1856         }
1857
1858         /* get global note size */
1859         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1860                 if (info_size < sizeof(uint32_t)) {
1861                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1862                         return -EINVAL;
1863                 }
1864                 if (note_seg_hdr == NULL)
1865                         ret_size = 0;
1866                 else
1867                         ret_size = note_seg_hdr->size;
1868                 *(uint32_t *)info_buff = ret_size;
1869                 return I40E_SUCCESS;
1870         }
1871
1872         /* get global note */
1873         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1874                 if (note_seg_hdr == NULL)
1875                         return -ENOTSUP;
1876                 if (info_size < note_seg_hdr->size) {
1877                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1878                         return -EINVAL;
1879                 }
1880                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1881                 return I40E_SUCCESS;
1882         }
1883
1884         /* get i40e segment header info */
1885         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1886                 struct rte_pmd_i40e_profile_info *info =
1887                         (struct rte_pmd_i40e_profile_info *)info_buff;
1888
1889                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1890                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1891                         return -EINVAL;
1892                 }
1893
1894                 if (!metadata_seg_hdr) {
1895                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1896                         return -EINVAL;
1897                 }
1898
1899                 if (!i40e_seg_hdr) {
1900                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1901                         return -EINVAL;
1902                 }
1903
1904                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1905                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1906                 info->track_id =
1907                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1908
1909                 memcpy(info->name,
1910                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1911                         I40E_DDP_NAME_SIZE);
1912                 memcpy(&info->version,
1913                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1914                         sizeof(struct i40e_ddp_version));
1915                 return I40E_SUCCESS;
1916         }
1917
1918         /* get number of devices */
1919         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1920                 if (info_size < sizeof(uint32_t)) {
1921                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1922                         return -EINVAL;
1923                 }
1924                 *(uint32_t *)info_buff =
1925                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1926                 return I40E_SUCCESS;
1927         }
1928
1929         /* get list of devices */
1930         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1931                 uint32_t dev_num;
1932                 dev_num =
1933                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1934                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1935                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1936                         return -EINVAL;
1937                 }
1938                 memcpy(info_buff,
1939                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1940                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1941                 return I40E_SUCCESS;
1942         }
1943
1944         /* get number of protocols */
1945         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1946                 struct i40e_profile_section_header *proto;
1947
1948                 if (info_size < sizeof(uint32_t)) {
1949                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1950                         return -EINVAL;
1951                 }
1952                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1953                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1954                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1955                 return I40E_SUCCESS;
1956         }
1957
1958         /* get list of protocols */
1959         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1960                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1961                 struct rte_pmd_i40e_proto_info *pinfo;
1962                 struct i40e_profile_section_header *proto;
1963                 struct i40e_profile_tlv_section_record *tlv;
1964
1965                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1966                 nb_proto_info = info_size /
1967                                         sizeof(struct rte_pmd_i40e_proto_info);
1968                 for (i = 0; i < nb_proto_info; i++) {
1969                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1970                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1971                 }
1972                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1973                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1974                 nb_tlv = i40e_get_tlv_section_size(proto);
1975                 if (nb_tlv == 0)
1976                         return I40E_SUCCESS;
1977                 if (nb_proto_info < nb_tlv) {
1978                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1979                         return -EINVAL;
1980                 }
1981                 /* get number of records in the section */
1982                 nb_rec = proto->section.size /
1983                                 sizeof(struct i40e_profile_tlv_section_record);
1984                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1985                 for (i = j = 0; i < nb_rec; j++) {
1986                         pinfo[j].proto_id = tlv->data[0];
1987                         strlcpy(pinfo[j].name, (const char *)&tlv->data[1],
1988                                 I40E_DDP_NAME_SIZE);
1989                         i += tlv->len;
1990                         tlv = &tlv[tlv->len];
1991                 }
1992                 return I40E_SUCCESS;
1993         }
1994
1995         /* get number of packet classification types */
1996         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1997                 struct i40e_profile_section_header *pctype;
1998
1999                 if (info_size < sizeof(uint32_t)) {
2000                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2001                         return -EINVAL;
2002                 }
2003                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2004                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2005                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
2006                 return I40E_SUCCESS;
2007         }
2008
2009         /* get list of packet classification types */
2010         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
2011                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2012                 struct rte_pmd_i40e_ptype_info *pinfo;
2013                 struct i40e_profile_section_header *pctype;
2014                 struct i40e_profile_tlv_section_record *tlv;
2015
2016                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2017                 nb_proto_info = info_size /
2018                                         sizeof(struct rte_pmd_i40e_ptype_info);
2019                 for (i = 0; i < nb_proto_info; i++)
2020                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2021                                sizeof(struct rte_pmd_i40e_ptype_info));
2022                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2023                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2024                 nb_tlv = i40e_get_tlv_section_size(pctype);
2025                 if (nb_tlv == 0)
2026                         return I40E_SUCCESS;
2027                 if (nb_proto_info < nb_tlv) {
2028                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2029                         return -EINVAL;
2030                 }
2031
2032                 /* get number of records in the section */
2033                 nb_rec = pctype->section.size /
2034                                 sizeof(struct i40e_profile_tlv_section_record);
2035                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2036                 for (i = j = 0; i < nb_rec; j++) {
2037                         memcpy(&pinfo[j], tlv->data,
2038                                sizeof(struct rte_pmd_i40e_ptype_info));
2039                         i += tlv->len;
2040                         tlv = &tlv[tlv->len];
2041                 }
2042                 return I40E_SUCCESS;
2043         }
2044
2045         /* get number of packet types */
2046         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2047                 struct i40e_profile_section_header *ptype;
2048
2049                 if (info_size < sizeof(uint32_t)) {
2050                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2051                         return -EINVAL;
2052                 }
2053                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2054                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2055                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2056                 return I40E_SUCCESS;
2057         }
2058
2059         /* get list of packet types */
2060         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2061                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2062                 struct rte_pmd_i40e_ptype_info *pinfo;
2063                 struct i40e_profile_section_header *ptype;
2064                 struct i40e_profile_tlv_section_record *tlv;
2065
2066                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2067                 nb_proto_info = info_size /
2068                                         sizeof(struct rte_pmd_i40e_ptype_info);
2069                 for (i = 0; i < nb_proto_info; i++)
2070                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2071                                sizeof(struct rte_pmd_i40e_ptype_info));
2072                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2073                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2074                 nb_tlv = i40e_get_tlv_section_size(ptype);
2075                 if (nb_tlv == 0)
2076                         return I40E_SUCCESS;
2077                 if (nb_proto_info < nb_tlv) {
2078                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2079                         return -EINVAL;
2080                 }
2081                 /* get number of records in the section */
2082                 nb_rec = ptype->section.size /
2083                                 sizeof(struct i40e_profile_tlv_section_record);
2084                 for (i = j = 0; i < nb_rec; j++) {
2085                         tlv = (struct i40e_profile_tlv_section_record *)
2086                                                                 &ptype[1 + i];
2087                         memcpy(&pinfo[j], tlv->data,
2088                                sizeof(struct rte_pmd_i40e_ptype_info));
2089                         i += tlv->len;
2090                 }
2091                 return I40E_SUCCESS;
2092         }
2093
2094         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2095         return -EINVAL;
2096 }
2097
2098 int
2099 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2100 {
2101         struct rte_eth_dev *dev;
2102         struct i40e_hw *hw;
2103         enum i40e_status_code status = I40E_SUCCESS;
2104
2105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2106
2107         dev = &rte_eth_devices[port];
2108
2109         if (!is_i40e_supported(dev))
2110                 return -ENOTSUP;
2111
2112         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2113                 return -EINVAL;
2114
2115         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2116
2117         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2118                                       size, 0, NULL);
2119
2120         return status;
2121 }
2122
2123 static int check_invalid_pkt_type(uint32_t pkt_type)
2124 {
2125         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2126
2127         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2128         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2129         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2130         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2131         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2132         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2133         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2134
2135         if (l2 &&
2136             l2 != RTE_PTYPE_L2_ETHER &&
2137             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2138             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2139             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2140             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2141             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2142             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2143             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2144                 return -1;
2145
2146         if (l3 &&
2147             l3 != RTE_PTYPE_L3_IPV4 &&
2148             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2149             l3 != RTE_PTYPE_L3_IPV6 &&
2150             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2151             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2152             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2153                 return -1;
2154
2155         if (l4 &&
2156             l4 != RTE_PTYPE_L4_TCP &&
2157             l4 != RTE_PTYPE_L4_UDP &&
2158             l4 != RTE_PTYPE_L4_FRAG &&
2159             l4 != RTE_PTYPE_L4_SCTP &&
2160             l4 != RTE_PTYPE_L4_ICMP &&
2161             l4 != RTE_PTYPE_L4_NONFRAG)
2162                 return -1;
2163
2164         if (tnl &&
2165             tnl != RTE_PTYPE_TUNNEL_IP &&
2166             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2167             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2168             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2169             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2170             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2171             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2172             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2173             tnl != RTE_PTYPE_TUNNEL_L2TP)
2174                 return -1;
2175
2176         if (il2 &&
2177             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2178             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2179             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2180                 return -1;
2181
2182         if (il3 &&
2183             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2184             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2185             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2186             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2187             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2188             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2189                 return -1;
2190
2191         if (il4 &&
2192             il4 != RTE_PTYPE_INNER_L4_TCP &&
2193             il4 != RTE_PTYPE_INNER_L4_UDP &&
2194             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2195             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2196             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2197             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2198                 return -1;
2199
2200         return 0;
2201 }
2202
2203 static int check_invalid_ptype_mapping(
2204                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2205                 uint16_t count)
2206 {
2207         int i;
2208
2209         for (i = 0; i < count; i++) {
2210                 uint16_t ptype = mapping_table[i].hw_ptype;
2211                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2212
2213                 if (ptype >= I40E_MAX_PKT_TYPE)
2214                         return -1;
2215
2216                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2217                         continue;
2218
2219                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2220                         continue;
2221
2222                 if (check_invalid_pkt_type(pkt_type))
2223                         return -1;
2224         }
2225
2226         return 0;
2227 }
2228
2229 int
2230 rte_pmd_i40e_ptype_mapping_update(
2231                         uint16_t port,
2232                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2233                         uint16_t count,
2234                         uint8_t exclusive)
2235 {
2236         struct rte_eth_dev *dev;
2237         struct i40e_adapter *ad;
2238         int i;
2239
2240         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2241
2242         dev = &rte_eth_devices[port];
2243
2244         if (!is_i40e_supported(dev))
2245                 return -ENOTSUP;
2246
2247         if (count > I40E_MAX_PKT_TYPE)
2248                 return -EINVAL;
2249
2250         if (check_invalid_ptype_mapping(mapping_items, count))
2251                 return -EINVAL;
2252
2253         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2254
2255         if (exclusive) {
2256                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2257                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2258         }
2259
2260         for (i = 0; i < count; i++)
2261                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2262                         = mapping_items[i].sw_ptype;
2263
2264         return 0;
2265 }
2266
2267 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2268 {
2269         struct rte_eth_dev *dev;
2270
2271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2272
2273         dev = &rte_eth_devices[port];
2274
2275         if (!is_i40e_supported(dev))
2276                 return -ENOTSUP;
2277
2278         i40e_set_default_ptype_table(dev);
2279
2280         return 0;
2281 }
2282
2283 int rte_pmd_i40e_ptype_mapping_get(
2284                         uint16_t port,
2285                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2286                         uint16_t size,
2287                         uint16_t *count,
2288                         uint8_t valid_only)
2289 {
2290         struct rte_eth_dev *dev;
2291         struct i40e_adapter *ad;
2292         int n = 0;
2293         uint16_t i;
2294
2295         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2296
2297         dev = &rte_eth_devices[port];
2298
2299         if (!is_i40e_supported(dev))
2300                 return -ENOTSUP;
2301
2302         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2303
2304         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2305                 if (n >= size)
2306                         break;
2307                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2308                         continue;
2309                 mapping_items[n].hw_ptype = i;
2310                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2311                 n++;
2312         }
2313
2314         *count = n;
2315         return 0;
2316 }
2317
2318 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2319                                        uint32_t target,
2320                                        uint8_t mask,
2321                                        uint32_t pkt_type)
2322 {
2323         struct rte_eth_dev *dev;
2324         struct i40e_adapter *ad;
2325         uint16_t i;
2326
2327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2328
2329         dev = &rte_eth_devices[port];
2330
2331         if (!is_i40e_supported(dev))
2332                 return -ENOTSUP;
2333
2334         if (!mask && check_invalid_pkt_type(target))
2335                 return -EINVAL;
2336
2337         if (check_invalid_pkt_type(pkt_type))
2338                 return -EINVAL;
2339
2340         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2341
2342         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2343                 if (mask) {
2344                         if ((target | ad->ptype_tbl[i]) == target &&
2345                             (target & ad->ptype_tbl[i]))
2346                                 ad->ptype_tbl[i] = pkt_type;
2347                 } else {
2348                         if (ad->ptype_tbl[i] == target)
2349                                 ad->ptype_tbl[i] = pkt_type;
2350                 }
2351         }
2352
2353         return 0;
2354 }
2355
2356 int
2357 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2358                              struct rte_ether_addr *mac_addr)
2359 {
2360         struct rte_eth_dev *dev;
2361         struct i40e_pf_vf *vf;
2362         struct i40e_vsi *vsi;
2363         struct i40e_pf *pf;
2364         struct i40e_mac_filter_info mac_filter;
2365         int ret;
2366
2367         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2368                 return -EINVAL;
2369
2370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2371
2372         dev = &rte_eth_devices[port];
2373
2374         if (!is_i40e_supported(dev))
2375                 return -ENOTSUP;
2376
2377         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2378
2379         if (vf_id >= pf->vf_num || !pf->vfs)
2380                 return -EINVAL;
2381
2382         vf = &pf->vfs[vf_id];
2383         vsi = vf->vsi;
2384         if (!vsi) {
2385                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2386                 return -EINVAL;
2387         }
2388
2389         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2390         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2391         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2392         if (ret != I40E_SUCCESS) {
2393                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2394                 return -1;
2395         }
2396
2397         return 0;
2398 }
2399
2400 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2401 {
2402         struct rte_eth_dev *dev;
2403
2404         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2405
2406         dev = &rte_eth_devices[port];
2407
2408         if (!is_i40e_supported(dev))
2409                 return -ENOTSUP;
2410
2411         i40e_set_default_pctype_table(dev);
2412
2413         return 0;
2414 }
2415
2416 int rte_pmd_i40e_flow_type_mapping_get(
2417                         uint16_t port,
2418                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2419 {
2420         struct rte_eth_dev *dev;
2421         struct i40e_adapter *ad;
2422         uint16_t i;
2423
2424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2425
2426         dev = &rte_eth_devices[port];
2427
2428         if (!is_i40e_supported(dev))
2429                 return -ENOTSUP;
2430
2431         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2432
2433         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2434                 mapping_items[i].flow_type = i;
2435                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2436         }
2437
2438         return 0;
2439 }
2440
2441 int
2442 rte_pmd_i40e_flow_type_mapping_update(
2443                         uint16_t port,
2444                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2445                         uint16_t count,
2446                         uint8_t exclusive)
2447 {
2448         struct rte_eth_dev *dev;
2449         struct i40e_adapter *ad;
2450         int i;
2451
2452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2453
2454         dev = &rte_eth_devices[port];
2455
2456         if (!is_i40e_supported(dev))
2457                 return -ENOTSUP;
2458
2459         if (count > I40E_FLOW_TYPE_MAX)
2460                 return -EINVAL;
2461
2462         for (i = 0; i < count; i++)
2463                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2464                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2465                     (mapping_items[i].pctype &
2466                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2467                         return -EINVAL;
2468
2469         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2470
2471         if (exclusive) {
2472                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2473                         ad->pctypes_tbl[i] = 0ULL;
2474                 ad->flow_types_mask = 0ULL;
2475         }
2476
2477         for (i = 0; i < count; i++) {
2478                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2479                                                 mapping_items[i].pctype;
2480                 if (mapping_items[i].pctype)
2481                         ad->flow_types_mask |=
2482                                         (1ULL << mapping_items[i].flow_type);
2483                 else
2484                         ad->flow_types_mask &=
2485                                         ~(1ULL << mapping_items[i].flow_type);
2486         }
2487
2488         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2489                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2490
2491         return 0;
2492 }
2493
2494 int
2495 rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
2496                         const struct rte_ether_addr *vf_mac)
2497 {
2498         struct rte_eth_dev *dev;
2499         struct rte_ether_addr *mac;
2500         struct i40e_pf *pf;
2501         int vf_id;
2502         struct i40e_pf_vf *vf;
2503         uint16_t vf_num;
2504
2505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2506         dev = &rte_eth_devices[port];
2507
2508         if (!is_i40e_supported(dev))
2509                 return -ENOTSUP;
2510
2511         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2512         vf_num = pf->vf_num;
2513
2514         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2515                 vf = &pf->vfs[vf_id];
2516                 mac = &vf->mac_addr;
2517
2518                 if (is_same_ether_addr(mac, vf_mac))
2519                         return vf_id;
2520         }
2521
2522         return -EINVAL;
2523 }
2524
2525 static int
2526 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2527                               struct i40e_pf *pf)
2528 {
2529         uint16_t i;
2530         struct i40e_vsi *vsi = pf->main_vsi;
2531         uint16_t queue_offset, bsf, tc_index;
2532         struct i40e_vsi_context ctxt;
2533         struct i40e_aqc_vsi_properties_data *vsi_info;
2534         struct i40e_queue_regions *region_info =
2535                                 &pf->queue_region;
2536         int32_t ret = -EINVAL;
2537
2538         if (!region_info->queue_region_number) {
2539                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2540                 return ret;
2541         }
2542
2543         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2544
2545         /* Update Queue Pairs Mapping for currently enabled UPs */
2546         ctxt.seid = vsi->seid;
2547         ctxt.pf_num = hw->pf_id;
2548         ctxt.vf_num = 0;
2549         ctxt.uplink_seid = vsi->uplink_seid;
2550         ctxt.info = vsi->info;
2551         vsi_info = &ctxt.info;
2552
2553         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2554         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2555
2556         /* Configure queue region and queue mapping parameters,
2557          * for enabled queue region, allocate queues to this region.
2558          */
2559
2560         for (i = 0; i < region_info->queue_region_number; i++) {
2561                 tc_index = region_info->region[i].region_id;
2562                 bsf = rte_bsf32(region_info->region[i].queue_num);
2563                 queue_offset = region_info->region[i].queue_start_index;
2564                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2565                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2566                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2567         }
2568
2569         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2570         vsi_info->mapping_flags |=
2571                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2572         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2573         vsi_info->valid_sections |=
2574                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2575
2576         /* Update the VSI after updating the VSI queue-mapping information */
2577         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2578         if (ret) {
2579                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2580                                 hw->aq.asq_last_status);
2581                 return ret;
2582         }
2583         /* update the local VSI info with updated queue map */
2584         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2585                                         sizeof(vsi->info.tc_mapping));
2586         rte_memcpy(&vsi->info.queue_mapping,
2587                         &ctxt.info.queue_mapping,
2588                         sizeof(vsi->info.queue_mapping));
2589         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2590         vsi->info.valid_sections = 0;
2591
2592         return 0;
2593 }
2594
2595
2596 static int
2597 i40e_queue_region_set_region(struct i40e_pf *pf,
2598                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2599 {
2600         uint16_t i;
2601         struct i40e_vsi *main_vsi = pf->main_vsi;
2602         struct i40e_queue_regions *info = &pf->queue_region;
2603         int32_t ret = -EINVAL;
2604
2605         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2606                                 conf_ptr->queue_num <= 64)) {
2607                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2608                         "total number of queues do not exceed the VSI allocation");
2609                 return ret;
2610         }
2611
2612         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2613                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2614                 return ret;
2615         }
2616
2617         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2618                                         > main_vsi->nb_used_qps) {
2619                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2620                 return ret;
2621         }
2622
2623         for (i = 0; i < info->queue_region_number; i++)
2624                 if (conf_ptr->region_id == info->region[i].region_id)
2625                         break;
2626
2627         if (i == info->queue_region_number &&
2628                                 i <= I40E_REGION_MAX_INDEX) {
2629                 info->region[i].region_id = conf_ptr->region_id;
2630                 info->region[i].queue_num = conf_ptr->queue_num;
2631                 info->region[i].queue_start_index =
2632                         conf_ptr->queue_start_index;
2633                 info->queue_region_number++;
2634         } else {
2635                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2636                 return ret;
2637         }
2638
2639         return 0;
2640 }
2641
2642 static int
2643 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2644                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2645 {
2646         int32_t ret = -EINVAL;
2647         struct i40e_queue_regions *info = &pf->queue_region;
2648         uint16_t i, j;
2649         uint16_t region_index, flowtype_index;
2650
2651         /* For the pctype or hardware flowtype of packet,
2652          * the specific index for each type has been defined
2653          * in file i40e_type.h as enum i40e_filter_pctype.
2654          */
2655
2656         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2657                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2658                 return ret;
2659         }
2660
2661         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2662                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2663                 return ret;
2664         }
2665
2666
2667         for (i = 0; i < info->queue_region_number; i++)
2668                 if (rss_region_conf->region_id == info->region[i].region_id)
2669                         break;
2670
2671         if (i == info->queue_region_number) {
2672                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2673                 ret = -EINVAL;
2674                 return ret;
2675         }
2676         region_index = i;
2677
2678         for (i = 0; i < info->queue_region_number; i++) {
2679                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2680                         if (rss_region_conf->hw_flowtype ==
2681                                 info->region[i].hw_flowtype[j]) {
2682                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2683                                 return 0;
2684                         }
2685                 }
2686         }
2687
2688         flowtype_index = info->region[region_index].flowtype_num;
2689         info->region[region_index].hw_flowtype[flowtype_index] =
2690                                         rss_region_conf->hw_flowtype;
2691         info->region[region_index].flowtype_num++;
2692
2693         return 0;
2694 }
2695
2696 static void
2697 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2698                                 struct i40e_pf *pf)
2699 {
2700         uint8_t hw_flowtype;
2701         uint32_t pfqf_hregion;
2702         uint16_t i, j, index;
2703         struct i40e_queue_regions *info = &pf->queue_region;
2704
2705         /* For the pctype or hardware flowtype of packet,
2706          * the specific index for each type has been defined
2707          * in file i40e_type.h as enum i40e_filter_pctype.
2708          */
2709
2710         for (i = 0; i < info->queue_region_number; i++) {
2711                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2712                         hw_flowtype = info->region[i].hw_flowtype[j];
2713                         index = hw_flowtype >> 3;
2714                         pfqf_hregion =
2715                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2716
2717                         if ((hw_flowtype & 0x7) == 0) {
2718                                 pfqf_hregion |= info->region[i].region_id <<
2719                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2720                                 pfqf_hregion |= 1 <<
2721                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2722                         } else if ((hw_flowtype & 0x7) == 1) {
2723                                 pfqf_hregion |= info->region[i].region_id  <<
2724                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2725                                 pfqf_hregion |= 1 <<
2726                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2727                         } else if ((hw_flowtype & 0x7) == 2) {
2728                                 pfqf_hregion |= info->region[i].region_id  <<
2729                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2730                                 pfqf_hregion |= 1 <<
2731                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2732                         } else if ((hw_flowtype & 0x7) == 3) {
2733                                 pfqf_hregion |= info->region[i].region_id  <<
2734                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2735                                 pfqf_hregion |= 1 <<
2736                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2737                         } else if ((hw_flowtype & 0x7) == 4) {
2738                                 pfqf_hregion |= info->region[i].region_id  <<
2739                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2740                                 pfqf_hregion |= 1 <<
2741                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2742                         } else if ((hw_flowtype & 0x7) == 5) {
2743                                 pfqf_hregion |= info->region[i].region_id  <<
2744                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2745                                 pfqf_hregion |= 1 <<
2746                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2747                         } else if ((hw_flowtype & 0x7) == 6) {
2748                                 pfqf_hregion |= info->region[i].region_id  <<
2749                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2750                                 pfqf_hregion |= 1 <<
2751                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2752                         } else {
2753                                 pfqf_hregion |= info->region[i].region_id  <<
2754                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2755                                 pfqf_hregion |= 1 <<
2756                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2757                         }
2758
2759                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2760                                                 pfqf_hregion);
2761                 }
2762         }
2763 }
2764
2765 static int
2766 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2767                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2768 {
2769         struct i40e_queue_regions *info = &pf->queue_region;
2770         int32_t ret = -EINVAL;
2771         uint16_t i, j, region_index;
2772
2773         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2774                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2775                 return ret;
2776         }
2777
2778         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2779                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2780                 return ret;
2781         }
2782
2783         for (i = 0; i < info->queue_region_number; i++)
2784                 if (rss_region_conf->region_id == info->region[i].region_id)
2785                         break;
2786
2787         if (i == info->queue_region_number) {
2788                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2789                 ret = -EINVAL;
2790                 return ret;
2791         }
2792
2793         region_index = i;
2794
2795         for (i = 0; i < info->queue_region_number; i++) {
2796                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2797                         if (info->region[i].user_priority[j] ==
2798                                 rss_region_conf->user_priority) {
2799                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2800                                 return 0;
2801                         }
2802                 }
2803         }
2804
2805         j = info->region[region_index].user_priority_num;
2806         info->region[region_index].user_priority[j] =
2807                                         rss_region_conf->user_priority;
2808         info->region[region_index].user_priority_num++;
2809
2810         return 0;
2811 }
2812
2813 static int
2814 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2815                                 struct i40e_pf *pf)
2816 {
2817         struct i40e_dcbx_config dcb_cfg_local;
2818         struct i40e_dcbx_config *dcb_cfg;
2819         struct i40e_queue_regions *info = &pf->queue_region;
2820         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2821         int32_t ret = -EINVAL;
2822         uint16_t i, j, prio_index, region_index;
2823         uint8_t tc_map, tc_bw, bw_lf, dcb_flag = 0;
2824
2825         if (!info->queue_region_number) {
2826                 PMD_DRV_LOG(ERR, "No queue region been set before");
2827                 return ret;
2828         }
2829
2830         for (i = 0; i < info->queue_region_number; i++) {
2831                 if (info->region[i].user_priority_num) {
2832                         dcb_flag = 1;
2833                         break;
2834                 }
2835         }
2836
2837         if (dcb_flag == 0)
2838                 return 0;
2839
2840         dcb_cfg = &dcb_cfg_local;
2841         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2842
2843         /* assume each tc has the same bw */
2844         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2845         for (i = 0; i < info->queue_region_number; i++)
2846                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2847         /* to ensure the sum of tcbw is equal to 100 */
2848         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2849         for (i = 0; i < bw_lf; i++)
2850                 dcb_cfg->etscfg.tcbwtable[i]++;
2851
2852         /* assume each tc has the same Transmission Selection Algorithm */
2853         for (i = 0; i < info->queue_region_number; i++)
2854                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2855
2856         for (i = 0; i < info->queue_region_number; i++) {
2857                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2858                         prio_index = info->region[i].user_priority[j];
2859                         region_index = info->region[i].region_id;
2860                         dcb_cfg->etscfg.prioritytable[prio_index] =
2861                                                 region_index;
2862                 }
2863         }
2864
2865         /* FW needs one App to configure HW */
2866         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2867         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2868         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2869         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2870
2871         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2872
2873         dcb_cfg->pfc.willing = 0;
2874         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2875         dcb_cfg->pfc.pfcenable = tc_map;
2876
2877         /* Copy the new config to the current config */
2878         *old_cfg = *dcb_cfg;
2879         old_cfg->etsrec = old_cfg->etscfg;
2880         ret = i40e_set_dcb_config(hw);
2881
2882         if (ret) {
2883                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2884                          i40e_stat_str(hw, ret),
2885                          i40e_aq_str(hw, hw->aq.asq_last_status));
2886                 return ret;
2887         }
2888
2889         return 0;
2890 }
2891
2892 int
2893 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2894         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2895 {
2896         int32_t ret = -EINVAL;
2897         struct i40e_queue_regions *info = &pf->queue_region;
2898         struct i40e_vsi *main_vsi = pf->main_vsi;
2899
2900         if (on) {
2901                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2902
2903                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2904                 if (ret != I40E_SUCCESS) {
2905                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2906                         return ret;
2907                 }
2908
2909                 ret = i40e_queue_region_dcb_configure(hw, pf);
2910                 if (ret != I40E_SUCCESS) {
2911                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2912                         return ret;
2913                 }
2914
2915                 return 0;
2916         }
2917
2918         if (info->queue_region_number) {
2919                 info->queue_region_number = 1;
2920                 info->region[0].queue_num = main_vsi->nb_used_qps;
2921                 info->region[0].queue_start_index = 0;
2922
2923                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2924                 if (ret != I40E_SUCCESS)
2925                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2926
2927                 ret = i40e_dcb_init_configure(dev, TRUE);
2928                 if (ret != I40E_SUCCESS) {
2929                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2930                         pf->flags &= ~I40E_FLAG_DCB;
2931                 }
2932
2933                 i40e_init_queue_region_conf(dev);
2934         }
2935         return 0;
2936 }
2937
2938 static int
2939 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2940 {
2941         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2942         uint64_t hena;
2943
2944         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2945         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2946
2947         if (!hena)
2948                 return -ENOTSUP;
2949
2950         return 0;
2951 }
2952
2953 static int
2954 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2955                 struct i40e_queue_regions *regions_ptr)
2956 {
2957         struct i40e_queue_regions *info = &pf->queue_region;
2958
2959         rte_memcpy(regions_ptr, info,
2960                         sizeof(struct i40e_queue_regions));
2961
2962         return 0;
2963 }
2964
2965 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2966                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2967 {
2968         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2969         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2970         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2971         int32_t ret;
2972
2973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2974
2975         if (!is_i40e_supported(dev))
2976                 return -ENOTSUP;
2977
2978         if (!(!i40e_queue_region_pf_check_rss(pf)))
2979                 return -ENOTSUP;
2980
2981         /* This queue region feature only support pf by now. It should
2982          * be called after dev_start, and will be clear after dev_stop.
2983          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2984          * is just an enable function which server for other configuration,
2985          * it is for all configuration about queue region from up layer,
2986          * at first will only keep in DPDK softwarestored in driver,
2987          * only after "FLUSH_ON", it commit all configuration to HW.
2988          * Because PMD had to set hardware configuration at a time, so
2989          * it will record all up layer command at first.
2990          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2991          * just clean all configuration about queue region just now,
2992          * and restore all to DPDK i40e driver default
2993          * config when start up.
2994          */
2995
2996         switch (op_type) {
2997         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2998                 ret = i40e_queue_region_set_region(pf,
2999                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3000                 break;
3001         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
3002                 ret = i40e_queue_region_set_flowtype(pf,
3003                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3004                 break;
3005         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
3006                 ret = i40e_queue_region_set_user_priority(pf,
3007                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
3008                 break;
3009         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
3010                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
3011                 break;
3012         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
3013                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
3014                 break;
3015         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
3016                 ret = i40e_queue_region_get_all_info(pf,
3017                                 (struct i40e_queue_regions *)arg);
3018                 break;
3019         default:
3020                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
3021                             op_type);
3022                 ret = -EINVAL;
3023         }
3024
3025         I40E_WRITE_FLUSH(hw);
3026
3027         return ret;
3028 }
3029
3030 int rte_pmd_i40e_flow_add_del_packet_template(
3031                         uint16_t port,
3032                         const struct rte_pmd_i40e_pkt_template_conf *conf,
3033                         uint8_t add)
3034 {
3035         struct rte_eth_dev *dev = &rte_eth_devices[port];
3036         struct i40e_fdir_filter_conf filter_conf;
3037
3038         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3039
3040         if (!is_i40e_supported(dev))
3041                 return -ENOTSUP;
3042
3043         memset(&filter_conf, 0, sizeof(filter_conf));
3044         filter_conf.soft_id = conf->soft_id;
3045         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3046         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3047         filter_conf.input.flow.raw_flow.length = conf->input.length;
3048         filter_conf.input.flow_ext.pkt_template = true;
3049
3050         filter_conf.action.rx_queue = conf->action.rx_queue;
3051         filter_conf.action.behavior =
3052                 (enum i40e_fdir_behavior)conf->action.behavior;
3053         filter_conf.action.report_status =
3054                 (enum i40e_fdir_status)conf->action.report_status;
3055         filter_conf.action.flex_off = conf->action.flex_off;
3056
3057         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3058 }
3059
3060 int
3061 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
3062                        struct rte_pmd_i40e_inset *inset,
3063                        enum rte_pmd_i40e_inset_type inset_type)
3064 {
3065         struct rte_eth_dev *dev;
3066         struct i40e_hw *hw;
3067         uint64_t inset_reg;
3068         uint32_t mask_reg[2];
3069         int i;
3070
3071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3072
3073         dev = &rte_eth_devices[port];
3074
3075         if (!is_i40e_supported(dev))
3076                 return -ENOTSUP;
3077
3078         if (pctype > 63)
3079                 return -EINVAL;
3080
3081         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3082         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3083
3084         switch (inset_type) {
3085         case INSET_HASH:
3086                 /* Get input set */
3087                 inset_reg =
3088                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3089                 inset_reg <<= I40E_32_BIT_WIDTH;
3090                 inset_reg |=
3091                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3092                 /* Get field mask */
3093                 mask_reg[0] =
3094                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3095                 mask_reg[1] =
3096                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3097                 break;
3098         case INSET_FDIR:
3099                 inset_reg =
3100                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3101                 inset_reg <<= I40E_32_BIT_WIDTH;
3102                 inset_reg |=
3103                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3104                 mask_reg[0] =
3105                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3106                 mask_reg[1] =
3107                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3108                 break;
3109         case INSET_FDIR_FLX:
3110                 inset_reg =
3111                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3112                 mask_reg[0] =
3113                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3114                 mask_reg[1] =
3115                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3116                 break;
3117         default:
3118                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3119                 return -EINVAL;
3120         }
3121
3122         inset->inset = inset_reg;
3123
3124         for (i = 0; i < 2; i++) {
3125                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3126                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3127         }
3128
3129         return 0;
3130 }
3131
3132 int
3133 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3134                        struct rte_pmd_i40e_inset *inset,
3135                        enum rte_pmd_i40e_inset_type inset_type)
3136 {
3137         struct rte_eth_dev *dev;
3138         struct i40e_hw *hw;
3139         struct i40e_pf *pf;
3140         uint64_t inset_reg;
3141         uint32_t mask_reg[2];
3142         int i;
3143
3144         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3145
3146         dev = &rte_eth_devices[port];
3147
3148         if (!is_i40e_supported(dev))
3149                 return -ENOTSUP;
3150
3151         if (pctype > 63)
3152                 return -EINVAL;
3153
3154         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3155         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3156
3157         if (pf->support_multi_driver) {
3158                 PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
3159                 return -ENOTSUP;
3160         }
3161
3162         inset_reg = inset->inset;
3163         for (i = 0; i < 2; i++)
3164                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3165                         inset->mask[i].mask;
3166
3167         switch (inset_type) {
3168         case INSET_HASH:
3169                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3170                                             (uint32_t)(inset_reg & UINT32_MAX));
3171                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3172                                             (uint32_t)((inset_reg >>
3173                                              I40E_32_BIT_WIDTH) & UINT32_MAX));
3174                 for (i = 0; i < 2; i++)
3175                         i40e_check_write_global_reg(hw,
3176                                                   I40E_GLQF_HASH_MSK(i, pctype),
3177                                                   mask_reg[i]);
3178                 break;
3179         case INSET_FDIR:
3180                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3181                                      (uint32_t)(inset_reg & UINT32_MAX));
3182                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3183                                      (uint32_t)((inset_reg >>
3184                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3185                 for (i = 0; i < 2; i++)
3186                         i40e_check_write_global_reg(hw,
3187                                                     I40E_GLQF_FD_MSK(i, pctype),
3188                                                     mask_reg[i]);
3189                 break;
3190         case INSET_FDIR_FLX:
3191                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3192                                      (uint32_t)(inset_reg & UINT32_MAX));
3193                 for (i = 0; i < 2; i++)
3194                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3195                                              mask_reg[i]);
3196                 break;
3197         default:
3198                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3199                 return -EINVAL;
3200         }
3201
3202         I40E_WRITE_FLUSH(hw);
3203         return 0;
3204 }