f2d961945c433673f03c5ff79725ca335646e1c3
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_tailq.h>
7
8 #include "base/i40e_prototype.h"
9 #include "base/i40e_dcb.h"
10 #include "i40e_ethdev.h"
11 #include "i40e_pf.h"
12 #include "i40e_rxtx.h"
13 #include "rte_pmd_i40e.h"
14
15 int
16 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
17 {
18         struct rte_eth_dev *dev;
19         struct i40e_pf *pf;
20
21         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
22
23         dev = &rte_eth_devices[port];
24
25         if (!is_i40e_supported(dev))
26                 return -ENOTSUP;
27
28         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
29
30         if (vf >= pf->vf_num || !pf->vfs) {
31                 PMD_DRV_LOG(ERR, "Invalid argument.");
32                 return -EINVAL;
33         }
34
35         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
36
37         return 0;
38 }
39
40 int
41 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
42 {
43         struct rte_eth_dev *dev;
44         struct i40e_pf *pf;
45         struct i40e_vsi *vsi;
46         struct i40e_hw *hw;
47         struct i40e_vsi_context ctxt;
48         int ret;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf_id >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         vsi = pf->vfs[vf_id].vsi;
65         if (!vsi) {
66                 PMD_DRV_LOG(ERR, "Invalid VSI.");
67                 return -EINVAL;
68         }
69
70         /* Check if it has been already on or off */
71         if (vsi->info.valid_sections &
72                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
73                 if (on) {
74                         if ((vsi->info.sec_flags &
75                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
76                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
77                                 return 0; /* already on */
78                 } else {
79                         if ((vsi->info.sec_flags &
80                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
81                                 return 0; /* already off */
82                 }
83         }
84
85         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
86         if (on)
87                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
88         else
89                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
90
91         memset(&ctxt, 0, sizeof(ctxt));
92         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
93         ctxt.seid = vsi->seid;
94
95         hw = I40E_VSI_TO_HW(vsi);
96         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
97         if (ret != I40E_SUCCESS) {
98                 ret = -ENOTSUP;
99                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
100         }
101
102         return ret;
103 }
104
105 static int
106 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
107 {
108         uint32_t j, k;
109         uint16_t vlan_id;
110         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
111         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
112         int ret;
113
114         for (j = 0; j < I40E_VFTA_SIZE; j++) {
115                 if (!vsi->vfta[j])
116                         continue;
117
118                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
119                         if (!(vsi->vfta[j] & (1 << k)))
120                                 continue;
121
122                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
123                         if (!vlan_id)
124                                 continue;
125
126                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
127                         if (add)
128                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
129                                                        &vlan_data, 1, NULL);
130                         else
131                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
132                                                           &vlan_data, 1, NULL);
133                         if (ret != I40E_SUCCESS) {
134                                 PMD_DRV_LOG(ERR,
135                                             "Failed to add/rm vlan filter");
136                                 return ret;
137                         }
138                 }
139         }
140
141         return I40E_SUCCESS;
142 }
143
144 int
145 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
146 {
147         struct rte_eth_dev *dev;
148         struct i40e_pf *pf;
149         struct i40e_vsi *vsi;
150         struct i40e_hw *hw;
151         struct i40e_vsi_context ctxt;
152         int ret;
153
154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
155
156         dev = &rte_eth_devices[port];
157
158         if (!is_i40e_supported(dev))
159                 return -ENOTSUP;
160
161         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
162
163         if (vf_id >= pf->vf_num || !pf->vfs) {
164                 PMD_DRV_LOG(ERR, "Invalid argument.");
165                 return -EINVAL;
166         }
167
168         vsi = pf->vfs[vf_id].vsi;
169         if (!vsi) {
170                 PMD_DRV_LOG(ERR, "Invalid VSI.");
171                 return -EINVAL;
172         }
173
174         /* Check if it has been already on or off */
175         if (vsi->vlan_anti_spoof_on == on)
176                 return 0; /* already on or off */
177
178         vsi->vlan_anti_spoof_on = on;
179         if (!vsi->vlan_filter_on) {
180                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
181                 if (ret) {
182                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
183                         return -ENOTSUP;
184                 }
185         }
186
187         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
188         if (on)
189                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
190         else
191                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
192
193         memset(&ctxt, 0, sizeof(ctxt));
194         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
195         ctxt.seid = vsi->seid;
196
197         hw = I40E_VSI_TO_HW(vsi);
198         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
199         if (ret != I40E_SUCCESS) {
200                 ret = -ENOTSUP;
201                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
202         }
203
204         return ret;
205 }
206
207 static int
208 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
209 {
210         struct i40e_mac_filter *f;
211         struct i40e_macvlan_filter *mv_f;
212         int i, vlan_num;
213         enum rte_mac_filter_type filter_type;
214         int ret = I40E_SUCCESS;
215         void *temp;
216
217         /* remove all the MACs */
218         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
219                 vlan_num = vsi->vlan_num;
220                 filter_type = f->mac_info.filter_type;
221                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
222                     filter_type == RTE_MACVLAN_HASH_MATCH) {
223                         if (vlan_num == 0) {
224                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
225                                 return I40E_ERR_PARAM;
226                         }
227                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
228                            filter_type == RTE_MAC_HASH_MATCH)
229                         vlan_num = 1;
230
231                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
232                 if (!mv_f) {
233                         PMD_DRV_LOG(ERR, "failed to allocate memory");
234                         return I40E_ERR_NO_MEMORY;
235                 }
236
237                 for (i = 0; i < vlan_num; i++) {
238                         mv_f[i].filter_type = filter_type;
239                         rte_memcpy(&mv_f[i].macaddr,
240                                          &f->mac_info.mac_addr,
241                                          ETH_ADDR_LEN);
242                 }
243                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
244                     filter_type == RTE_MACVLAN_HASH_MATCH) {
245                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
246                                                          &f->mac_info.mac_addr);
247                         if (ret != I40E_SUCCESS) {
248                                 rte_free(mv_f);
249                                 return ret;
250                         }
251                 }
252
253                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
254                 if (ret != I40E_SUCCESS) {
255                         rte_free(mv_f);
256                         return ret;
257                 }
258
259                 rte_free(mv_f);
260                 ret = I40E_SUCCESS;
261         }
262
263         return ret;
264 }
265
266 static int
267 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
268 {
269         struct i40e_mac_filter *f;
270         struct i40e_macvlan_filter *mv_f;
271         int i, vlan_num = 0;
272         int ret = I40E_SUCCESS;
273         void *temp;
274
275         /* restore all the MACs */
276         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
277                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
278                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
279                         /**
280                          * If vlan_num is 0, that's the first time to add mac,
281                          * set mask for vlan_id 0.
282                          */
283                         if (vsi->vlan_num == 0) {
284                                 i40e_set_vlan_filter(vsi, 0, 1);
285                                 vsi->vlan_num = 1;
286                         }
287                         vlan_num = vsi->vlan_num;
288                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
289                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
290                         vlan_num = 1;
291
292                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
293                 if (!mv_f) {
294                         PMD_DRV_LOG(ERR, "failed to allocate memory");
295                         return I40E_ERR_NO_MEMORY;
296                 }
297
298                 for (i = 0; i < vlan_num; i++) {
299                         mv_f[i].filter_type = f->mac_info.filter_type;
300                         rte_memcpy(&mv_f[i].macaddr,
301                                          &f->mac_info.mac_addr,
302                                          ETH_ADDR_LEN);
303                 }
304
305                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
306                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
307                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
308                                                          &f->mac_info.mac_addr);
309                         if (ret != I40E_SUCCESS) {
310                                 rte_free(mv_f);
311                                 return ret;
312                         }
313                 }
314
315                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
316                 if (ret != I40E_SUCCESS) {
317                         rte_free(mv_f);
318                         return ret;
319                 }
320
321                 rte_free(mv_f);
322                 ret = I40E_SUCCESS;
323         }
324
325         return ret;
326 }
327
328 static int
329 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
330 {
331         struct i40e_vsi_context ctxt;
332         struct i40e_hw *hw;
333         int ret;
334
335         if (!vsi)
336                 return -EINVAL;
337
338         hw = I40E_VSI_TO_HW(vsi);
339
340         /* Use the FW API if FW >= v5.0 */
341         if (hw->aq.fw_maj_ver < 5) {
342                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
343                 return -ENOTSUP;
344         }
345
346         /* Check if it has been already on or off */
347         if (vsi->info.valid_sections &
348                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
349                 if (on) {
350                         if ((vsi->info.switch_id &
351                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
352                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
353                                 return 0; /* already on */
354                 } else {
355                         if ((vsi->info.switch_id &
356                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
357                                 return 0; /* already off */
358                 }
359         }
360
361         /* remove all the MAC and VLAN first */
362         ret = i40e_vsi_rm_mac_filter(vsi);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
365                 return ret;
366         }
367         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
368                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
369                 if (ret) {
370                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
371                         return ret;
372                 }
373         }
374
375         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
376         if (on)
377                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
378         else
379                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
380
381         memset(&ctxt, 0, sizeof(ctxt));
382         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
383         ctxt.seid = vsi->seid;
384
385         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
386         if (ret != I40E_SUCCESS) {
387                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
388                 return ret;
389         }
390
391         /* add all the MAC and VLAN back */
392         ret = i40e_vsi_restore_mac_filter(vsi);
393         if (ret)
394                 return ret;
395         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
396                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
397                 if (ret)
398                         return ret;
399         }
400
401         return ret;
402 }
403
404 int
405 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
406 {
407         struct rte_eth_dev *dev;
408         struct i40e_pf *pf;
409         struct i40e_pf_vf *vf;
410         struct i40e_vsi *vsi;
411         uint16_t vf_id;
412         int ret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
415
416         dev = &rte_eth_devices[port];
417
418         if (!is_i40e_supported(dev))
419                 return -ENOTSUP;
420
421         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
422
423         /* setup PF TX loopback */
424         vsi = pf->main_vsi;
425         ret = i40e_vsi_set_tx_loopback(vsi, on);
426         if (ret)
427                 return -ENOTSUP;
428
429         /* setup TX loopback for all the VFs */
430         if (!pf->vfs) {
431                 /* if no VF, do nothing. */
432                 return 0;
433         }
434
435         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
436                 vf = &pf->vfs[vf_id];
437                 vsi = vf->vsi;
438
439                 ret = i40e_vsi_set_tx_loopback(vsi, on);
440                 if (ret)
441                         return -ENOTSUP;
442         }
443
444         return ret;
445 }
446
447 int
448 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
449 {
450         struct rte_eth_dev *dev;
451         struct i40e_pf *pf;
452         struct i40e_vsi *vsi;
453         struct i40e_hw *hw;
454         int ret;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
457
458         dev = &rte_eth_devices[port];
459
460         if (!is_i40e_supported(dev))
461                 return -ENOTSUP;
462
463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464
465         if (vf_id >= pf->vf_num || !pf->vfs) {
466                 PMD_DRV_LOG(ERR, "Invalid argument.");
467                 return -EINVAL;
468         }
469
470         vsi = pf->vfs[vf_id].vsi;
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Invalid VSI.");
473                 return -EINVAL;
474         }
475
476         hw = I40E_VSI_TO_HW(vsi);
477
478         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
479                                                   on, NULL, true);
480         if (ret != I40E_SUCCESS) {
481                 ret = -ENOTSUP;
482                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
483         }
484
485         return ret;
486 }
487
488 int
489 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
490 {
491         struct rte_eth_dev *dev;
492         struct i40e_pf *pf;
493         struct i40e_vsi *vsi;
494         struct i40e_hw *hw;
495         int ret;
496
497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
498
499         dev = &rte_eth_devices[port];
500
501         if (!is_i40e_supported(dev))
502                 return -ENOTSUP;
503
504         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
505
506         if (vf_id >= pf->vf_num || !pf->vfs) {
507                 PMD_DRV_LOG(ERR, "Invalid argument.");
508                 return -EINVAL;
509         }
510
511         vsi = pf->vfs[vf_id].vsi;
512         if (!vsi) {
513                 PMD_DRV_LOG(ERR, "Invalid VSI.");
514                 return -EINVAL;
515         }
516
517         hw = I40E_VSI_TO_HW(vsi);
518
519         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
520                                                     on, NULL);
521         if (ret != I40E_SUCCESS) {
522                 ret = -ENOTSUP;
523                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
524         }
525
526         return ret;
527 }
528
529 int
530 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
531                              struct ether_addr *mac_addr)
532 {
533         struct i40e_mac_filter *f;
534         struct rte_eth_dev *dev;
535         struct i40e_pf_vf *vf;
536         struct i40e_vsi *vsi;
537         struct i40e_pf *pf;
538         void *temp;
539
540         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
541                 return -EINVAL;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
544
545         dev = &rte_eth_devices[port];
546
547         if (!is_i40e_supported(dev))
548                 return -ENOTSUP;
549
550         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
551
552         if (vf_id >= pf->vf_num || !pf->vfs)
553                 return -EINVAL;
554
555         vf = &pf->vfs[vf_id];
556         vsi = vf->vsi;
557         if (!vsi) {
558                 PMD_DRV_LOG(ERR, "Invalid VSI.");
559                 return -EINVAL;
560         }
561
562         ether_addr_copy(mac_addr, &vf->mac_addr);
563
564         /* Remove all existing mac */
565         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
566                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
567                                 != I40E_SUCCESS)
568                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
569
570         return 0;
571 }
572
573 /* Set vlan strip on/off for specific VF from host */
574 int
575 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
576 {
577         struct rte_eth_dev *dev;
578         struct i40e_pf *pf;
579         struct i40e_vsi *vsi;
580         int ret;
581
582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
583
584         dev = &rte_eth_devices[port];
585
586         if (!is_i40e_supported(dev))
587                 return -ENOTSUP;
588
589         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590
591         if (vf_id >= pf->vf_num || !pf->vfs) {
592                 PMD_DRV_LOG(ERR, "Invalid argument.");
593                 return -EINVAL;
594         }
595
596         vsi = pf->vfs[vf_id].vsi;
597
598         if (!vsi)
599                 return -EINVAL;
600
601         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
602         if (ret != I40E_SUCCESS) {
603                 ret = -ENOTSUP;
604                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
605         }
606
607         return ret;
608 }
609
610 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
611                                     uint16_t vlan_id)
612 {
613         struct rte_eth_dev *dev;
614         struct i40e_pf *pf;
615         struct i40e_hw *hw;
616         struct i40e_vsi *vsi;
617         struct i40e_vsi_context ctxt;
618         int ret;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
621
622         if (vlan_id > ETHER_MAX_VLAN_ID) {
623                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
624                 return -EINVAL;
625         }
626
627         dev = &rte_eth_devices[port];
628
629         if (!is_i40e_supported(dev))
630                 return -ENOTSUP;
631
632         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
633         hw = I40E_PF_TO_HW(pf);
634
635         /**
636          * return -ENODEV if SRIOV not enabled, VF number not configured
637          * or no queue assigned.
638          */
639         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
640             pf->vf_nb_qps == 0)
641                 return -ENODEV;
642
643         if (vf_id >= pf->vf_num || !pf->vfs) {
644                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
645                 return -EINVAL;
646         }
647
648         vsi = pf->vfs[vf_id].vsi;
649         if (!vsi) {
650                 PMD_DRV_LOG(ERR, "Invalid VSI.");
651                 return -EINVAL;
652         }
653
654         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
655         vsi->info.pvid = vlan_id;
656         if (vlan_id > 0)
657                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
658         else
659                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
660
661         memset(&ctxt, 0, sizeof(ctxt));
662         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
663         ctxt.seid = vsi->seid;
664
665         hw = I40E_VSI_TO_HW(vsi);
666         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
667         if (ret != I40E_SUCCESS) {
668                 ret = -ENOTSUP;
669                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
670         }
671
672         return ret;
673 }
674
675 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
676                                   uint8_t on)
677 {
678         struct rte_eth_dev *dev;
679         struct i40e_pf *pf;
680         struct i40e_vsi *vsi;
681         struct i40e_hw *hw;
682         struct i40e_mac_filter_info filter;
683         struct ether_addr broadcast = {
684                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
685         int ret;
686
687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
688
689         if (on > 1) {
690                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
691                 return -EINVAL;
692         }
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_i40e_supported(dev))
697                 return -ENOTSUP;
698
699         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700         hw = I40E_PF_TO_HW(pf);
701
702         if (vf_id >= pf->vf_num || !pf->vfs) {
703                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
704                 return -EINVAL;
705         }
706
707         /**
708          * return -ENODEV if SRIOV not enabled, VF number not configured
709          * or no queue assigned.
710          */
711         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
712             pf->vf_nb_qps == 0) {
713                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
714                 return -ENODEV;
715         }
716
717         vsi = pf->vfs[vf_id].vsi;
718         if (!vsi) {
719                 PMD_DRV_LOG(ERR, "Invalid VSI.");
720                 return -EINVAL;
721         }
722
723         if (on) {
724                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
725                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
726                 ret = i40e_vsi_add_mac(vsi, &filter);
727         } else {
728                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
729         }
730
731         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
732                 ret = -ENOTSUP;
733                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
734         } else {
735                 ret = 0;
736         }
737
738         return ret;
739 }
740
741 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
742 {
743         struct rte_eth_dev *dev;
744         struct i40e_pf *pf;
745         struct i40e_hw *hw;
746         struct i40e_vsi *vsi;
747         struct i40e_vsi_context ctxt;
748         int ret;
749
750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
751
752         if (on > 1) {
753                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
754                 return -EINVAL;
755         }
756
757         dev = &rte_eth_devices[port];
758
759         if (!is_i40e_supported(dev))
760                 return -ENOTSUP;
761
762         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763         hw = I40E_PF_TO_HW(pf);
764
765         /**
766          * return -ENODEV if SRIOV not enabled, VF number not configured
767          * or no queue assigned.
768          */
769         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
770             pf->vf_nb_qps == 0) {
771                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
772                 return -ENODEV;
773         }
774
775         if (vf_id >= pf->vf_num || !pf->vfs) {
776                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
777                 return -EINVAL;
778         }
779
780         vsi = pf->vfs[vf_id].vsi;
781         if (!vsi) {
782                 PMD_DRV_LOG(ERR, "Invalid VSI.");
783                 return -EINVAL;
784         }
785
786         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
787         if (on) {
788                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
789                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
790         } else {
791                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
792                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
793         }
794
795         memset(&ctxt, 0, sizeof(ctxt));
796         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
797         ctxt.seid = vsi->seid;
798
799         hw = I40E_VSI_TO_HW(vsi);
800         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
801         if (ret != I40E_SUCCESS) {
802                 ret = -ENOTSUP;
803                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
804         }
805
806         return ret;
807 }
808
809 static int
810 i40e_vlan_filter_count(struct i40e_vsi *vsi)
811 {
812         uint32_t j, k;
813         uint16_t vlan_id;
814         int count = 0;
815
816         for (j = 0; j < I40E_VFTA_SIZE; j++) {
817                 if (!vsi->vfta[j])
818                         continue;
819
820                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
821                         if (!(vsi->vfta[j] & (1 << k)))
822                                 continue;
823
824                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
825                         if (!vlan_id)
826                                 continue;
827
828                         count++;
829                 }
830         }
831
832         return count;
833 }
834
835 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
836                                     uint64_t vf_mask, uint8_t on)
837 {
838         struct rte_eth_dev *dev;
839         struct i40e_pf *pf;
840         struct i40e_hw *hw;
841         struct i40e_vsi *vsi;
842         uint16_t vf_idx;
843         int ret = I40E_SUCCESS;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
846
847         dev = &rte_eth_devices[port];
848
849         if (!is_i40e_supported(dev))
850                 return -ENOTSUP;
851
852         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
853                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
854                 return -EINVAL;
855         }
856
857         if (vf_mask == 0) {
858                 PMD_DRV_LOG(ERR, "No VF.");
859                 return -EINVAL;
860         }
861
862         if (on > 1) {
863                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
864                 return -EINVAL;
865         }
866
867         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         hw = I40E_PF_TO_HW(pf);
869
870         /**
871          * return -ENODEV if SRIOV not enabled, VF number not configured
872          * or no queue assigned.
873          */
874         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
875             pf->vf_nb_qps == 0) {
876                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
877                 return -ENODEV;
878         }
879
880         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
881                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
882                         vsi = pf->vfs[vf_idx].vsi;
883                         if (on) {
884                                 if (!vsi->vlan_filter_on) {
885                                         vsi->vlan_filter_on = true;
886                                         i40e_aq_set_vsi_vlan_promisc(hw,
887                                                                      vsi->seid,
888                                                                      false,
889                                                                      NULL);
890                                         if (!vsi->vlan_anti_spoof_on)
891                                                 i40e_add_rm_all_vlan_filter(
892                                                         vsi, true);
893                                 }
894                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
895                         } else {
896                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
897
898                                 if (!i40e_vlan_filter_count(vsi)) {
899                                         vsi->vlan_filter_on = false;
900                                         i40e_aq_set_vsi_vlan_promisc(hw,
901                                                                      vsi->seid,
902                                                                      true,
903                                                                      NULL);
904                                 }
905                         }
906                 }
907         }
908
909         if (ret != I40E_SUCCESS) {
910                 ret = -ENOTSUP;
911                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
912         }
913
914         return ret;
915 }
916
917 int
918 rte_pmd_i40e_get_vf_stats(uint16_t port,
919                           uint16_t vf_id,
920                           struct rte_eth_stats *stats)
921 {
922         struct rte_eth_dev *dev;
923         struct i40e_pf *pf;
924         struct i40e_vsi *vsi;
925
926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
927
928         dev = &rte_eth_devices[port];
929
930         if (!is_i40e_supported(dev))
931                 return -ENOTSUP;
932
933         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934
935         if (vf_id >= pf->vf_num || !pf->vfs) {
936                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
937                 return -EINVAL;
938         }
939
940         vsi = pf->vfs[vf_id].vsi;
941         if (!vsi) {
942                 PMD_DRV_LOG(ERR, "Invalid VSI.");
943                 return -EINVAL;
944         }
945
946         i40e_update_vsi_stats(vsi);
947
948         stats->ipackets = vsi->eth_stats.rx_unicast +
949                         vsi->eth_stats.rx_multicast +
950                         vsi->eth_stats.rx_broadcast;
951         stats->opackets = vsi->eth_stats.tx_unicast +
952                         vsi->eth_stats.tx_multicast +
953                         vsi->eth_stats.tx_broadcast;
954         stats->ibytes   = vsi->eth_stats.rx_bytes;
955         stats->obytes   = vsi->eth_stats.tx_bytes;
956         stats->ierrors  = vsi->eth_stats.rx_discards;
957         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
958
959         return 0;
960 }
961
962 int
963 rte_pmd_i40e_reset_vf_stats(uint16_t port,
964                             uint16_t vf_id)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         vsi->offset_loaded = false;
991         i40e_update_vsi_stats(vsi);
992
993         return 0;
994 }
995
996 int
997 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
998 {
999         struct rte_eth_dev *dev;
1000         struct i40e_pf *pf;
1001         struct i40e_vsi *vsi;
1002         struct i40e_hw *hw;
1003         int ret = 0;
1004         int i;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1007
1008         dev = &rte_eth_devices[port];
1009
1010         if (!is_i40e_supported(dev))
1011                 return -ENOTSUP;
1012
1013         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014
1015         if (vf_id >= pf->vf_num || !pf->vfs) {
1016                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1017                 return -EINVAL;
1018         }
1019
1020         vsi = pf->vfs[vf_id].vsi;
1021         if (!vsi) {
1022                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1023                 return -EINVAL;
1024         }
1025
1026         if (bw > I40E_QOS_BW_MAX) {
1027                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1028                             I40E_QOS_BW_MAX);
1029                 return -EINVAL;
1030         }
1031
1032         if (bw % I40E_QOS_BW_GRANULARITY) {
1033                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1034                             I40E_QOS_BW_GRANULARITY);
1035                 return -EINVAL;
1036         }
1037
1038         bw /= I40E_QOS_BW_GRANULARITY;
1039
1040         hw = I40E_VSI_TO_HW(vsi);
1041
1042         /* No change. */
1043         if (bw == vsi->bw_info.bw_limit) {
1044                 PMD_DRV_LOG(INFO,
1045                             "No change for VF max bandwidth. Nothing to do.");
1046                 return 0;
1047         }
1048
1049         /**
1050          * VF bandwidth limitation and TC bandwidth limitation cannot be
1051          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1052          *
1053          * If bw is 0, means disable bandwidth limitation. Then no need to
1054          * check TC bandwidth limitation.
1055          */
1056         if (bw) {
1057                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1058                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1059                             vsi->bw_info.bw_ets_credits[i])
1060                                 break;
1061                 }
1062                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1063                         PMD_DRV_LOG(ERR,
1064                                     "TC max bandwidth has been set on this VF,"
1065                                     " please disable it first.");
1066                         return -EINVAL;
1067                 }
1068         }
1069
1070         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1071         if (ret) {
1072                 PMD_DRV_LOG(ERR,
1073                             "Failed to set VF %d bandwidth, err(%d).",
1074                             vf_id, ret);
1075                 return -EINVAL;
1076         }
1077
1078         /* Store the configuration. */
1079         vsi->bw_info.bw_limit = (uint16_t)bw;
1080         vsi->bw_info.bw_max = 0;
1081
1082         return 0;
1083 }
1084
1085 int
1086 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1087                                 uint8_t tc_num, uint8_t *bw_weight)
1088 {
1089         struct rte_eth_dev *dev;
1090         struct i40e_pf *pf;
1091         struct i40e_vsi *vsi;
1092         struct i40e_hw *hw;
1093         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1094         int ret = 0;
1095         int i, j;
1096         uint16_t sum;
1097         bool b_change = false;
1098
1099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1100
1101         dev = &rte_eth_devices[port];
1102
1103         if (!is_i40e_supported(dev))
1104                 return -ENOTSUP;
1105
1106         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1107
1108         if (vf_id >= pf->vf_num || !pf->vfs) {
1109                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1110                 return -EINVAL;
1111         }
1112
1113         vsi = pf->vfs[vf_id].vsi;
1114         if (!vsi) {
1115                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1116                 return -EINVAL;
1117         }
1118
1119         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1120                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1121                             I40E_MAX_TRAFFIC_CLASS);
1122                 return -EINVAL;
1123         }
1124
1125         sum = 0;
1126         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1127                 if (vsi->enabled_tc & BIT_ULL(i))
1128                         sum++;
1129         }
1130         if (sum != tc_num) {
1131                 PMD_DRV_LOG(ERR,
1132                             "Weight should be set for all %d enabled TCs.",
1133                             sum);
1134                 return -EINVAL;
1135         }
1136
1137         sum = 0;
1138         for (i = 0; i < tc_num; i++) {
1139                 if (!bw_weight[i]) {
1140                         PMD_DRV_LOG(ERR,
1141                                     "The weight should be 1 at least.");
1142                         return -EINVAL;
1143                 }
1144                 sum += bw_weight[i];
1145         }
1146         if (sum != 100) {
1147                 PMD_DRV_LOG(ERR,
1148                             "The summary of the TC weight should be 100.");
1149                 return -EINVAL;
1150         }
1151
1152         /**
1153          * Create the configuration for all the TCs.
1154          */
1155         memset(&tc_bw, 0, sizeof(tc_bw));
1156         tc_bw.tc_valid_bits = vsi->enabled_tc;
1157         j = 0;
1158         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1159                 if (vsi->enabled_tc & BIT_ULL(i)) {
1160                         if (bw_weight[j] !=
1161                                 vsi->bw_info.bw_ets_share_credits[i])
1162                                 b_change = true;
1163
1164                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1165                         j++;
1166                 }
1167         }
1168
1169         /* No change. */
1170         if (!b_change) {
1171                 PMD_DRV_LOG(INFO,
1172                             "No change for TC allocated bandwidth."
1173                             " Nothing to do.");
1174                 return 0;
1175         }
1176
1177         hw = I40E_VSI_TO_HW(vsi);
1178
1179         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1180         if (ret) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1183                             vf_id, ret);
1184                 return -EINVAL;
1185         }
1186
1187         /* Store the configuration. */
1188         j = 0;
1189         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1190                 if (vsi->enabled_tc & BIT_ULL(i)) {
1191                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1192                         j++;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 int
1200 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1201                               uint8_t tc_no, uint32_t bw)
1202 {
1203         struct rte_eth_dev *dev;
1204         struct i40e_pf *pf;
1205         struct i40e_vsi *vsi;
1206         struct i40e_hw *hw;
1207         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1208         int ret = 0;
1209         int i;
1210
1211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1212
1213         dev = &rte_eth_devices[port];
1214
1215         if (!is_i40e_supported(dev))
1216                 return -ENOTSUP;
1217
1218         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219
1220         if (vf_id >= pf->vf_num || !pf->vfs) {
1221                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1222                 return -EINVAL;
1223         }
1224
1225         vsi = pf->vfs[vf_id].vsi;
1226         if (!vsi) {
1227                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1228                 return -EINVAL;
1229         }
1230
1231         if (bw > I40E_QOS_BW_MAX) {
1232                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1233                             I40E_QOS_BW_MAX);
1234                 return -EINVAL;
1235         }
1236
1237         if (bw % I40E_QOS_BW_GRANULARITY) {
1238                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1239                             I40E_QOS_BW_GRANULARITY);
1240                 return -EINVAL;
1241         }
1242
1243         bw /= I40E_QOS_BW_GRANULARITY;
1244
1245         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1246                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1247                             I40E_MAX_TRAFFIC_CLASS);
1248                 return -EINVAL;
1249         }
1250
1251         hw = I40E_VSI_TO_HW(vsi);
1252
1253         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1254                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1255                             vf_id, tc_no);
1256                 return -EINVAL;
1257         }
1258
1259         /* No change. */
1260         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1261                 PMD_DRV_LOG(INFO,
1262                             "No change for TC max bandwidth. Nothing to do.");
1263                 return 0;
1264         }
1265
1266         /**
1267          * VF bandwidth limitation and TC bandwidth limitation cannot be
1268          * enabled in parallel, disable VF bandwidth limitation if it's
1269          * enabled.
1270          * If bw is 0, means disable bandwidth limitation. Then no need to
1271          * care about VF bandwidth limitation configuration.
1272          */
1273         if (bw && vsi->bw_info.bw_limit) {
1274                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1275                 if (ret) {
1276                         PMD_DRV_LOG(ERR,
1277                                     "Failed to disable VF(%d)"
1278                                     " bandwidth limitation, err(%d).",
1279                                     vf_id, ret);
1280                         return -EINVAL;
1281                 }
1282
1283                 PMD_DRV_LOG(INFO,
1284                             "VF max bandwidth is disabled according"
1285                             " to TC max bandwidth setting.");
1286         }
1287
1288         /**
1289          * Get all the TCs' info to create a whole picture.
1290          * Because the incremental change isn't permitted.
1291          */
1292         memset(&tc_bw, 0, sizeof(tc_bw));
1293         tc_bw.tc_valid_bits = vsi->enabled_tc;
1294         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295                 if (vsi->enabled_tc & BIT_ULL(i)) {
1296                         tc_bw.tc_bw_credits[i] =
1297                                 rte_cpu_to_le_16(
1298                                         vsi->bw_info.bw_ets_credits[i]);
1299                 }
1300         }
1301         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1302
1303         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1304         if (ret) {
1305                 PMD_DRV_LOG(ERR,
1306                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1307                             vf_id, tc_no, ret);
1308                 return -EINVAL;
1309         }
1310
1311         /* Store the configuration. */
1312         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1313
1314         return 0;
1315 }
1316
1317 int
1318 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct i40e_pf *pf;
1322         struct i40e_vsi *vsi;
1323         struct i40e_veb *veb;
1324         struct i40e_hw *hw;
1325         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1326         int i;
1327         int ret;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1330
1331         dev = &rte_eth_devices[port];
1332
1333         if (!is_i40e_supported(dev))
1334                 return -ENOTSUP;
1335
1336         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337
1338         vsi = pf->main_vsi;
1339         if (!vsi) {
1340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1341                 return -EINVAL;
1342         }
1343
1344         veb = vsi->veb;
1345         if (!veb) {
1346                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1347                 return -EINVAL;
1348         }
1349
1350         if ((tc_map & veb->enabled_tc) != tc_map) {
1351                 PMD_DRV_LOG(ERR,
1352                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1353                             veb->enabled_tc);
1354                 return -EINVAL;
1355         }
1356
1357         if (tc_map == veb->strict_prio_tc) {
1358                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1359                 return 0;
1360         }
1361
1362         hw = I40E_VSI_TO_HW(vsi);
1363
1364         /* Disable DCBx if it's the first time to set strict priority. */
1365         if (!veb->strict_prio_tc) {
1366                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1367                 if (ret)
1368                         PMD_DRV_LOG(INFO,
1369                                     "Failed to disable DCBx as it's already"
1370                                     " disabled.");
1371                 else
1372                         PMD_DRV_LOG(INFO,
1373                                     "DCBx is disabled according to strict"
1374                                     " priority setting.");
1375         }
1376
1377         memset(&ets_data, 0, sizeof(ets_data));
1378         ets_data.tc_valid_bits = veb->enabled_tc;
1379         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1380         ets_data.tc_strict_priority_flags = tc_map;
1381         /* Get all TCs' bandwidth. */
1382         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383                 if (veb->enabled_tc & BIT_ULL(i)) {
1384                         /* For rubust, if bandwidth is 0, use 1 instead. */
1385                         if (veb->bw_info.bw_ets_share_credits[i])
1386                                 ets_data.tc_bw_share_credits[i] =
1387                                         veb->bw_info.bw_ets_share_credits[i];
1388                         else
1389                                 ets_data.tc_bw_share_credits[i] =
1390                                         I40E_QOS_BW_WEIGHT_MIN;
1391                 }
1392         }
1393
1394         if (!veb->strict_prio_tc)
1395                 ret = i40e_aq_config_switch_comp_ets(
1396                         hw, veb->uplink_seid,
1397                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1398                         NULL);
1399         else if (tc_map)
1400                 ret = i40e_aq_config_switch_comp_ets(
1401                         hw, veb->uplink_seid,
1402                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1403                         NULL);
1404         else
1405                 ret = i40e_aq_config_switch_comp_ets(
1406                         hw, veb->uplink_seid,
1407                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1408                         NULL);
1409
1410         if (ret) {
1411                 PMD_DRV_LOG(ERR,
1412                             "Failed to set TCs' strict priority mode."
1413                             " err (%d)", ret);
1414                 return -EINVAL;
1415         }
1416
1417         veb->strict_prio_tc = tc_map;
1418
1419         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1420         if (!tc_map) {
1421                 ret = i40e_aq_start_lldp(hw, NULL);
1422                 if (ret) {
1423                         PMD_DRV_LOG(ERR,
1424                                     "Failed to enable DCBx, err(%d).", ret);
1425                         return -EINVAL;
1426                 }
1427
1428                 PMD_DRV_LOG(INFO,
1429                             "DCBx is enabled again according to strict"
1430                             " priority setting.");
1431         }
1432
1433         return ret;
1434 }
1435
1436 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1437 #define I40E_MAX_PROFILE_NUM 16
1438
1439 static void
1440 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1441                                uint32_t track_id, uint8_t *profile_info_sec,
1442                                bool add)
1443 {
1444         struct i40e_profile_section_header *sec = NULL;
1445         struct i40e_profile_info *pinfo;
1446
1447         sec = (struct i40e_profile_section_header *)profile_info_sec;
1448         sec->tbl_size = 1;
1449         sec->data_end = sizeof(struct i40e_profile_section_header) +
1450                 sizeof(struct i40e_profile_info);
1451         sec->section.type = SECTION_TYPE_INFO;
1452         sec->section.offset = sizeof(struct i40e_profile_section_header);
1453         sec->section.size = sizeof(struct i40e_profile_info);
1454         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1455                                              sec->section.offset);
1456         pinfo->track_id = track_id;
1457         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1458         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1459         if (add)
1460                 pinfo->op = I40E_DDP_ADD_TRACKID;
1461         else
1462                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1463 }
1464
1465 static enum i40e_status_code
1466 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1467 {
1468         enum i40e_status_code status = I40E_SUCCESS;
1469         struct i40e_profile_section_header *sec;
1470         uint32_t track_id;
1471         uint32_t offset = 0;
1472         uint32_t info = 0;
1473
1474         sec = (struct i40e_profile_section_header *)profile_info_sec;
1475         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1476                                          sec->section.offset))->track_id;
1477
1478         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1479                                    track_id, &offset, &info, NULL);
1480         if (status)
1481                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1482                             "offset %d, info %d",
1483                             offset, info);
1484
1485         return status;
1486 }
1487
1488 /* Check if the profile info exists */
1489 static int
1490 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1491 {
1492         struct rte_eth_dev *dev = &rte_eth_devices[port];
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint8_t *buff;
1495         struct rte_pmd_i40e_profile_list *p_list;
1496         struct rte_pmd_i40e_profile_info *pinfo, *p;
1497         uint32_t i;
1498         int ret;
1499         static const uint32_t group_mask = 0x00ff0000;
1500
1501         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1502                              sizeof(struct i40e_profile_section_header));
1503         if (pinfo->track_id == 0) {
1504                 PMD_DRV_LOG(INFO, "Read-only profile.");
1505                 return 0;
1506         }
1507         buff = rte_zmalloc("pinfo_list",
1508                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1509                            0);
1510         if (!buff) {
1511                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1512                 return -1;
1513         }
1514
1515         ret = i40e_aq_get_ddp_list(
1516                 hw, (void *)buff,
1517                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1518                 0, NULL);
1519         if (ret) {
1520                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1521                 rte_free(buff);
1522                 return -1;
1523         }
1524         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1525         for (i = 0; i < p_list->p_count; i++) {
1526                 p = &p_list->p_info[i];
1527                 if (pinfo->track_id == p->track_id) {
1528                         PMD_DRV_LOG(INFO, "Profile exists.");
1529                         rte_free(buff);
1530                         return 1;
1531                 }
1532         }
1533         /* profile with group id 0xff is compatible with any other profile */
1534         if ((pinfo->track_id & group_mask) == group_mask) {
1535                 rte_free(buff);
1536                 return 0;
1537         }
1538         for (i = 0; i < p_list->p_count; i++) {
1539                 p = &p_list->p_info[i];
1540                 if ((p->track_id & group_mask) == 0) {
1541                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1542                         rte_free(buff);
1543                         return 2;
1544                 }
1545         }
1546         for (i = 0; i < p_list->p_count; i++) {
1547                 p = &p_list->p_info[i];
1548                 if ((p->track_id & group_mask) == group_mask)
1549                         continue;
1550                 if ((pinfo->track_id & group_mask) !=
1551                     (p->track_id & group_mask)) {
1552                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1553                         rte_free(buff);
1554                         return 3;
1555                 }
1556         }
1557
1558         rte_free(buff);
1559         return 0;
1560 }
1561
1562 int
1563 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1564                                  uint32_t size,
1565                                  enum rte_pmd_i40e_package_op op)
1566 {
1567         struct rte_eth_dev *dev;
1568         struct i40e_hw *hw;
1569         struct i40e_package_header *pkg_hdr;
1570         struct i40e_generic_seg_header *profile_seg_hdr;
1571         struct i40e_generic_seg_header *metadata_seg_hdr;
1572         uint32_t track_id;
1573         uint8_t *profile_info_sec;
1574         int is_exist;
1575         enum i40e_status_code status = I40E_SUCCESS;
1576         static const uint32_t type_mask = 0xff000000;
1577
1578         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1579                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1580                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1581                 PMD_DRV_LOG(ERR, "Operation not supported.");
1582                 return -ENOTSUP;
1583         }
1584
1585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1586
1587         dev = &rte_eth_devices[port];
1588
1589         if (!is_i40e_supported(dev))
1590                 return -ENOTSUP;
1591
1592         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593
1594         if (size < (sizeof(struct i40e_package_header) +
1595                     sizeof(struct i40e_metadata_segment) +
1596                     sizeof(uint32_t) * 2)) {
1597                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1598                 return -EINVAL;
1599         }
1600
1601         pkg_hdr = (struct i40e_package_header *)buff;
1602
1603         if (!pkg_hdr) {
1604                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1605                 return -EINVAL;
1606         }
1607
1608         if (pkg_hdr->segment_count < 2) {
1609                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1610                 return -EINVAL;
1611         }
1612
1613         i40e_update_customized_info(dev, buff, size);
1614
1615         /* Find metadata segment */
1616         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1617                                                         pkg_hdr);
1618         if (!metadata_seg_hdr) {
1619                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1620                 return -EINVAL;
1621         }
1622         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1623         if (track_id == I40E_DDP_TRACKID_INVALID) {
1624                 PMD_DRV_LOG(ERR, "Invalid track_id");
1625                 return -EINVAL;
1626         }
1627
1628         /* force read-only track_id for type 0 */
1629         if ((track_id & type_mask) == 0)
1630                 track_id = 0;
1631
1632         /* Find profile segment */
1633         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1634                                                        pkg_hdr);
1635         if (!profile_seg_hdr) {
1636                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1637                 return -EINVAL;
1638         }
1639
1640         profile_info_sec = rte_zmalloc(
1641                 "i40e_profile_info",
1642                 sizeof(struct i40e_profile_section_header) +
1643                 sizeof(struct i40e_profile_info),
1644                 0);
1645         if (!profile_info_sec) {
1646                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1647                 return -EINVAL;
1648         }
1649
1650         /* Check if the profile already loaded */
1651         i40e_generate_profile_info_sec(
1652                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1653                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1654                 track_id, profile_info_sec,
1655                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1656         is_exist = i40e_check_profile_info(port, profile_info_sec);
1657         if (is_exist < 0) {
1658                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1659                 rte_free(profile_info_sec);
1660                 return -EINVAL;
1661         }
1662
1663         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1664                 if (is_exist) {
1665                         if (is_exist == 1)
1666                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1667                         else if (is_exist == 2)
1668                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1669                         else if (is_exist == 3)
1670                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1671                         rte_free(profile_info_sec);
1672                         return -EEXIST;
1673                 }
1674         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1675                 if (is_exist != 1) {
1676                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1677                         rte_free(profile_info_sec);
1678                         return -EACCES;
1679                 }
1680         }
1681
1682         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1683                 status = i40e_rollback_profile(
1684                         hw,
1685                         (struct i40e_profile_segment *)profile_seg_hdr,
1686                         track_id);
1687                 if (status) {
1688                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1689                         rte_free(profile_info_sec);
1690                         return status;
1691                 }
1692         } else {
1693                 status = i40e_write_profile(
1694                         hw,
1695                         (struct i40e_profile_segment *)profile_seg_hdr,
1696                         track_id);
1697                 if (status) {
1698                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1699                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1700                         else
1701                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1702                         rte_free(profile_info_sec);
1703                         return status;
1704                 }
1705         }
1706
1707         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1708                 /* Modify loaded profiles info list */
1709                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1710                 if (status) {
1711                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1712                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1713                         else
1714                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1715                 }
1716         }
1717
1718         rte_free(profile_info_sec);
1719         return status;
1720 }
1721
1722 /* Get number of tvl records in the section */
1723 static unsigned int
1724 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1725 {
1726         unsigned int i, nb_rec, nb_tlv = 0;
1727         struct i40e_profile_tlv_section_record *tlv;
1728
1729         if (!sec)
1730                 return nb_tlv;
1731
1732         /* get number of records in the section */
1733         nb_rec = sec->section.size /
1734                                 sizeof(struct i40e_profile_tlv_section_record);
1735         for (i = 0; i < nb_rec; ) {
1736                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1737                 i += tlv->len;
1738                 nb_tlv++;
1739         }
1740         return nb_tlv;
1741 }
1742
1743 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1744         uint8_t *info_buff, uint32_t info_size,
1745         enum rte_pmd_i40e_package_info type)
1746 {
1747         uint32_t ret_size;
1748         struct i40e_package_header *pkg_hdr;
1749         struct i40e_generic_seg_header *i40e_seg_hdr;
1750         struct i40e_generic_seg_header *note_seg_hdr;
1751         struct i40e_generic_seg_header *metadata_seg_hdr;
1752
1753         if (!info_buff) {
1754                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1755                 return -EINVAL;
1756         }
1757
1758         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1759                 sizeof(struct i40e_metadata_segment) +
1760                 sizeof(uint32_t) * 2)) {
1761                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1762                 return -EINVAL;
1763         }
1764
1765         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1766         if (pkg_hdr->segment_count < 2) {
1767                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1768                 return -EINVAL;
1769         }
1770
1771         /* Find metadata segment */
1772         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1773                 pkg_hdr);
1774
1775         /* Find global notes segment */
1776         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1777                 pkg_hdr);
1778
1779         /* Find i40e profile segment */
1780         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1781
1782         /* get global header info */
1783         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1784                 struct rte_pmd_i40e_profile_info *info =
1785                         (struct rte_pmd_i40e_profile_info *)info_buff;
1786
1787                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1788                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1789                         return -EINVAL;
1790                 }
1791
1792                 if (!metadata_seg_hdr) {
1793                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1794                         return -EINVAL;
1795                 }
1796
1797                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1798                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1799                 info->track_id =
1800                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1801
1802                 memcpy(info->name,
1803                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1804                         I40E_DDP_NAME_SIZE);
1805                 memcpy(&info->version,
1806                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1807                         sizeof(struct i40e_ddp_version));
1808                 return I40E_SUCCESS;
1809         }
1810
1811         /* get global note size */
1812         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1813                 if (info_size < sizeof(uint32_t)) {
1814                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1815                         return -EINVAL;
1816                 }
1817                 if (note_seg_hdr == NULL)
1818                         ret_size = 0;
1819                 else
1820                         ret_size = note_seg_hdr->size;
1821                 *(uint32_t *)info_buff = ret_size;
1822                 return I40E_SUCCESS;
1823         }
1824
1825         /* get global note */
1826         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1827                 if (note_seg_hdr == NULL)
1828                         return -ENOTSUP;
1829                 if (info_size < note_seg_hdr->size) {
1830                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1831                         return -EINVAL;
1832                 }
1833                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1834                 return I40E_SUCCESS;
1835         }
1836
1837         /* get i40e segment header info */
1838         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1839                 struct rte_pmd_i40e_profile_info *info =
1840                         (struct rte_pmd_i40e_profile_info *)info_buff;
1841
1842                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1843                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1844                         return -EINVAL;
1845                 }
1846
1847                 if (!metadata_seg_hdr) {
1848                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1849                         return -EINVAL;
1850                 }
1851
1852                 if (!i40e_seg_hdr) {
1853                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1854                         return -EINVAL;
1855                 }
1856
1857                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1858                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1859                 info->track_id =
1860                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1861
1862                 memcpy(info->name,
1863                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1864                         I40E_DDP_NAME_SIZE);
1865                 memcpy(&info->version,
1866                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1867                         sizeof(struct i40e_ddp_version));
1868                 return I40E_SUCCESS;
1869         }
1870
1871         /* get number of devices */
1872         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1873                 if (info_size < sizeof(uint32_t)) {
1874                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1875                         return -EINVAL;
1876                 }
1877                 *(uint32_t *)info_buff =
1878                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1879                 return I40E_SUCCESS;
1880         }
1881
1882         /* get list of devices */
1883         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1884                 uint32_t dev_num;
1885                 dev_num =
1886                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1887                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1888                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1889                         return -EINVAL;
1890                 }
1891                 memcpy(info_buff,
1892                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1893                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1894                 return I40E_SUCCESS;
1895         }
1896
1897         /* get number of protocols */
1898         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1899                 struct i40e_profile_section_header *proto;
1900
1901                 if (info_size < sizeof(uint32_t)) {
1902                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1903                         return -EINVAL;
1904                 }
1905                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1906                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1907                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1908                 return I40E_SUCCESS;
1909         }
1910
1911         /* get list of protocols */
1912         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1913                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1914                 struct rte_pmd_i40e_proto_info *pinfo;
1915                 struct i40e_profile_section_header *proto;
1916                 struct i40e_profile_tlv_section_record *tlv;
1917
1918                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1919                 nb_proto_info = info_size /
1920                                         sizeof(struct rte_pmd_i40e_proto_info);
1921                 for (i = 0; i < nb_proto_info; i++) {
1922                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1923                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1924                 }
1925                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1926                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1927                 nb_tlv = i40e_get_tlv_section_size(proto);
1928                 if (nb_tlv == 0)
1929                         return I40E_SUCCESS;
1930                 if (nb_proto_info < nb_tlv) {
1931                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1932                         return -EINVAL;
1933                 }
1934                 /* get number of records in the section */
1935                 nb_rec = proto->section.size /
1936                                 sizeof(struct i40e_profile_tlv_section_record);
1937                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1938                 for (i = j = 0; i < nb_rec; j++) {
1939                         pinfo[j].proto_id = tlv->data[0];
1940                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1941                                  (const char *)&tlv->data[1]);
1942                         i += tlv->len;
1943                         tlv = &tlv[tlv->len];
1944                 }
1945                 return I40E_SUCCESS;
1946         }
1947
1948         /* get number of packet classification types */
1949         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1950                 struct i40e_profile_section_header *pctype;
1951
1952                 if (info_size < sizeof(uint32_t)) {
1953                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1954                         return -EINVAL;
1955                 }
1956                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1957                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1958                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1959                 return I40E_SUCCESS;
1960         }
1961
1962         /* get list of packet classification types */
1963         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1964                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1965                 struct rte_pmd_i40e_ptype_info *pinfo;
1966                 struct i40e_profile_section_header *pctype;
1967                 struct i40e_profile_tlv_section_record *tlv;
1968
1969                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1970                 nb_proto_info = info_size /
1971                                         sizeof(struct rte_pmd_i40e_ptype_info);
1972                 for (i = 0; i < nb_proto_info; i++)
1973                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1974                                sizeof(struct rte_pmd_i40e_ptype_info));
1975                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1976                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1977                 nb_tlv = i40e_get_tlv_section_size(pctype);
1978                 if (nb_tlv == 0)
1979                         return I40E_SUCCESS;
1980                 if (nb_proto_info < nb_tlv) {
1981                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1982                         return -EINVAL;
1983                 }
1984
1985                 /* get number of records in the section */
1986                 nb_rec = pctype->section.size /
1987                                 sizeof(struct i40e_profile_tlv_section_record);
1988                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1989                 for (i = j = 0; i < nb_rec; j++) {
1990                         memcpy(&pinfo[j], tlv->data,
1991                                sizeof(struct rte_pmd_i40e_ptype_info));
1992                         i += tlv->len;
1993                         tlv = &tlv[tlv->len];
1994                 }
1995                 return I40E_SUCCESS;
1996         }
1997
1998         /* get number of packet types */
1999         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2000                 struct i40e_profile_section_header *ptype;
2001
2002                 if (info_size < sizeof(uint32_t)) {
2003                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2004                         return -EINVAL;
2005                 }
2006                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2007                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2008                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2009                 return I40E_SUCCESS;
2010         }
2011
2012         /* get list of packet types */
2013         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2014                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2015                 struct rte_pmd_i40e_ptype_info *pinfo;
2016                 struct i40e_profile_section_header *ptype;
2017                 struct i40e_profile_tlv_section_record *tlv;
2018
2019                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2020                 nb_proto_info = info_size /
2021                                         sizeof(struct rte_pmd_i40e_ptype_info);
2022                 for (i = 0; i < nb_proto_info; i++)
2023                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2024                                sizeof(struct rte_pmd_i40e_ptype_info));
2025                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2026                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2027                 nb_tlv = i40e_get_tlv_section_size(ptype);
2028                 if (nb_tlv == 0)
2029                         return I40E_SUCCESS;
2030                 if (nb_proto_info < nb_tlv) {
2031                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2032                         return -EINVAL;
2033                 }
2034                 /* get number of records in the section */
2035                 nb_rec = ptype->section.size /
2036                                 sizeof(struct i40e_profile_tlv_section_record);
2037                 for (i = j = 0; i < nb_rec; j++) {
2038                         tlv = (struct i40e_profile_tlv_section_record *)
2039                                                                 &ptype[1 + i];
2040                         memcpy(&pinfo[j], tlv->data,
2041                                sizeof(struct rte_pmd_i40e_ptype_info));
2042                         i += tlv->len;
2043                 }
2044                 return I40E_SUCCESS;
2045         }
2046
2047         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2048         return -EINVAL;
2049 }
2050
2051 int
2052 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2053 {
2054         struct rte_eth_dev *dev;
2055         struct i40e_hw *hw;
2056         enum i40e_status_code status = I40E_SUCCESS;
2057
2058         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2059
2060         dev = &rte_eth_devices[port];
2061
2062         if (!is_i40e_supported(dev))
2063                 return -ENOTSUP;
2064
2065         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2066                 return -EINVAL;
2067
2068         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2069
2070         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2071                                       size, 0, NULL);
2072
2073         return status;
2074 }
2075
2076 static int check_invalid_pkt_type(uint32_t pkt_type)
2077 {
2078         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2079
2080         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2081         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2082         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2083         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2084         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2085         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2086         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2087
2088         if (l2 &&
2089             l2 != RTE_PTYPE_L2_ETHER &&
2090             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2091             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2092             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2093             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2094             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2095             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2096             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2097                 return -1;
2098
2099         if (l3 &&
2100             l3 != RTE_PTYPE_L3_IPV4 &&
2101             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2102             l3 != RTE_PTYPE_L3_IPV6 &&
2103             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2104             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2105             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2106                 return -1;
2107
2108         if (l4 &&
2109             l4 != RTE_PTYPE_L4_TCP &&
2110             l4 != RTE_PTYPE_L4_UDP &&
2111             l4 != RTE_PTYPE_L4_FRAG &&
2112             l4 != RTE_PTYPE_L4_SCTP &&
2113             l4 != RTE_PTYPE_L4_ICMP &&
2114             l4 != RTE_PTYPE_L4_NONFRAG)
2115                 return -1;
2116
2117         if (tnl &&
2118             tnl != RTE_PTYPE_TUNNEL_IP &&
2119             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2120             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2121             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2122             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2123             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2124             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2125             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2126             tnl != RTE_PTYPE_TUNNEL_L2TP)
2127                 return -1;
2128
2129         if (il2 &&
2130             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2131             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2132             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2133                 return -1;
2134
2135         if (il3 &&
2136             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2137             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2138             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2139             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2140             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2141             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2142                 return -1;
2143
2144         if (il4 &&
2145             il4 != RTE_PTYPE_INNER_L4_TCP &&
2146             il4 != RTE_PTYPE_INNER_L4_UDP &&
2147             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2148             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2149             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2150             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2151                 return -1;
2152
2153         return 0;
2154 }
2155
2156 static int check_invalid_ptype_mapping(
2157                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2158                 uint16_t count)
2159 {
2160         int i;
2161
2162         for (i = 0; i < count; i++) {
2163                 uint16_t ptype = mapping_table[i].hw_ptype;
2164                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2165
2166                 if (ptype >= I40E_MAX_PKT_TYPE)
2167                         return -1;
2168
2169                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2170                         continue;
2171
2172                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2173                         continue;
2174
2175                 if (check_invalid_pkt_type(pkt_type))
2176                         return -1;
2177         }
2178
2179         return 0;
2180 }
2181
2182 int
2183 rte_pmd_i40e_ptype_mapping_update(
2184                         uint16_t port,
2185                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2186                         uint16_t count,
2187                         uint8_t exclusive)
2188 {
2189         struct rte_eth_dev *dev;
2190         struct i40e_adapter *ad;
2191         int i;
2192
2193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2194
2195         dev = &rte_eth_devices[port];
2196
2197         if (!is_i40e_supported(dev))
2198                 return -ENOTSUP;
2199
2200         if (count > I40E_MAX_PKT_TYPE)
2201                 return -EINVAL;
2202
2203         if (check_invalid_ptype_mapping(mapping_items, count))
2204                 return -EINVAL;
2205
2206         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2207
2208         if (exclusive) {
2209                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2210                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2211         }
2212
2213         for (i = 0; i < count; i++)
2214                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2215                         = mapping_items[i].sw_ptype;
2216
2217         return 0;
2218 }
2219
2220 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2221 {
2222         struct rte_eth_dev *dev;
2223
2224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2225
2226         dev = &rte_eth_devices[port];
2227
2228         if (!is_i40e_supported(dev))
2229                 return -ENOTSUP;
2230
2231         i40e_set_default_ptype_table(dev);
2232
2233         return 0;
2234 }
2235
2236 int rte_pmd_i40e_ptype_mapping_get(
2237                         uint16_t port,
2238                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2239                         uint16_t size,
2240                         uint16_t *count,
2241                         uint8_t valid_only)
2242 {
2243         struct rte_eth_dev *dev;
2244         struct i40e_adapter *ad;
2245         int n = 0;
2246         uint16_t i;
2247
2248         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2249
2250         dev = &rte_eth_devices[port];
2251
2252         if (!is_i40e_supported(dev))
2253                 return -ENOTSUP;
2254
2255         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2256
2257         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2258                 if (n >= size)
2259                         break;
2260                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2261                         continue;
2262                 mapping_items[n].hw_ptype = i;
2263                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2264                 n++;
2265         }
2266
2267         *count = n;
2268         return 0;
2269 }
2270
2271 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2272                                        uint32_t target,
2273                                        uint8_t mask,
2274                                        uint32_t pkt_type)
2275 {
2276         struct rte_eth_dev *dev;
2277         struct i40e_adapter *ad;
2278         uint16_t i;
2279
2280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2281
2282         dev = &rte_eth_devices[port];
2283
2284         if (!is_i40e_supported(dev))
2285                 return -ENOTSUP;
2286
2287         if (!mask && check_invalid_pkt_type(target))
2288                 return -EINVAL;
2289
2290         if (check_invalid_pkt_type(pkt_type))
2291                 return -EINVAL;
2292
2293         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2294
2295         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2296                 if (mask) {
2297                         if ((target | ad->ptype_tbl[i]) == target &&
2298                             (target & ad->ptype_tbl[i]))
2299                                 ad->ptype_tbl[i] = pkt_type;
2300                 } else {
2301                         if (ad->ptype_tbl[i] == target)
2302                                 ad->ptype_tbl[i] = pkt_type;
2303                 }
2304         }
2305
2306         return 0;
2307 }
2308
2309 int
2310 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2311                              struct ether_addr *mac_addr)
2312 {
2313         struct rte_eth_dev *dev;
2314         struct i40e_pf_vf *vf;
2315         struct i40e_vsi *vsi;
2316         struct i40e_pf *pf;
2317         struct i40e_mac_filter_info mac_filter;
2318         int ret;
2319
2320         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2321                 return -EINVAL;
2322
2323         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2324
2325         dev = &rte_eth_devices[port];
2326
2327         if (!is_i40e_supported(dev))
2328                 return -ENOTSUP;
2329
2330         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2331
2332         if (vf_id >= pf->vf_num || !pf->vfs)
2333                 return -EINVAL;
2334
2335         vf = &pf->vfs[vf_id];
2336         vsi = vf->vsi;
2337         if (!vsi) {
2338                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2339                 return -EINVAL;
2340         }
2341
2342         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2343         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2344         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2345         if (ret != I40E_SUCCESS) {
2346                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2347                 return -1;
2348         }
2349
2350         return 0;
2351 }
2352
2353 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2354 {
2355         struct rte_eth_dev *dev;
2356
2357         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2358
2359         dev = &rte_eth_devices[port];
2360
2361         if (!is_i40e_supported(dev))
2362                 return -ENOTSUP;
2363
2364         i40e_set_default_pctype_table(dev);
2365
2366         return 0;
2367 }
2368
2369 int rte_pmd_i40e_flow_type_mapping_get(
2370                         uint16_t port,
2371                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2372 {
2373         struct rte_eth_dev *dev;
2374         struct i40e_adapter *ad;
2375         uint16_t i;
2376
2377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2378
2379         dev = &rte_eth_devices[port];
2380
2381         if (!is_i40e_supported(dev))
2382                 return -ENOTSUP;
2383
2384         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2385
2386         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2387                 mapping_items[i].flow_type = i;
2388                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2389         }
2390
2391         return 0;
2392 }
2393
2394 int
2395 rte_pmd_i40e_flow_type_mapping_update(
2396                         uint16_t port,
2397                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2398                         uint16_t count,
2399                         uint8_t exclusive)
2400 {
2401         struct rte_eth_dev *dev;
2402         struct i40e_adapter *ad;
2403         int i;
2404
2405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2406
2407         dev = &rte_eth_devices[port];
2408
2409         if (!is_i40e_supported(dev))
2410                 return -ENOTSUP;
2411
2412         if (count > I40E_FLOW_TYPE_MAX)
2413                 return -EINVAL;
2414
2415         for (i = 0; i < count; i++)
2416                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2417                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2418                     (mapping_items[i].pctype &
2419                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2420                         return -EINVAL;
2421
2422         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2423
2424         if (exclusive) {
2425                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2426                         ad->pctypes_tbl[i] = 0ULL;
2427                 ad->flow_types_mask = 0ULL;
2428         }
2429
2430         for (i = 0; i < count; i++) {
2431                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2432                                                 mapping_items[i].pctype;
2433                 if (mapping_items[i].pctype)
2434                         ad->flow_types_mask |=
2435                                         (1ULL << mapping_items[i].flow_type);
2436                 else
2437                         ad->flow_types_mask &=
2438                                         ~(1ULL << mapping_items[i].flow_type);
2439         }
2440
2441         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2442                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2443
2444         return 0;
2445 }
2446
2447 int
2448 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2449 {
2450         struct rte_eth_dev *dev;
2451         struct ether_addr *mac;
2452         struct i40e_pf *pf;
2453         int vf_id;
2454         struct i40e_pf_vf *vf;
2455         uint16_t vf_num;
2456
2457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2458         dev = &rte_eth_devices[port];
2459
2460         if (!is_i40e_supported(dev))
2461                 return -ENOTSUP;
2462
2463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2464         vf_num = pf->vf_num;
2465
2466         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2467                 vf = &pf->vfs[vf_id];
2468                 mac = &vf->mac_addr;
2469
2470                 if (is_same_ether_addr(mac, vf_mac))
2471                         return vf_id;
2472         }
2473
2474         return -EINVAL;
2475 }
2476
2477 static int
2478 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2479                               struct i40e_pf *pf)
2480 {
2481         uint16_t i;
2482         struct i40e_vsi *vsi = pf->main_vsi;
2483         uint16_t queue_offset, bsf, tc_index;
2484         struct i40e_vsi_context ctxt;
2485         struct i40e_aqc_vsi_properties_data *vsi_info;
2486         struct i40e_queue_regions *region_info =
2487                                 &pf->queue_region;
2488         int32_t ret = -EINVAL;
2489
2490         if (!region_info->queue_region_number) {
2491                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2492                 return ret;
2493         }
2494
2495         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2496
2497         /* Update Queue Pairs Mapping for currently enabled UPs */
2498         ctxt.seid = vsi->seid;
2499         ctxt.pf_num = hw->pf_id;
2500         ctxt.vf_num = 0;
2501         ctxt.uplink_seid = vsi->uplink_seid;
2502         ctxt.info = vsi->info;
2503         vsi_info = &ctxt.info;
2504
2505         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2506         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2507
2508         /* Configure queue region and queue mapping parameters,
2509          * for enabled queue region, allocate queues to this region.
2510          */
2511
2512         for (i = 0; i < region_info->queue_region_number; i++) {
2513                 tc_index = region_info->region[i].region_id;
2514                 bsf = rte_bsf32(region_info->region[i].queue_num);
2515                 queue_offset = region_info->region[i].queue_start_index;
2516                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2517                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2518                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2519         }
2520
2521         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2522         vsi_info->mapping_flags |=
2523                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2524         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2525         vsi_info->valid_sections |=
2526                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2527
2528         /* Update the VSI after updating the VSI queue-mapping information */
2529         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2530         if (ret) {
2531                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2532                                 hw->aq.asq_last_status);
2533                 return ret;
2534         }
2535         /* update the local VSI info with updated queue map */
2536         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2537                                         sizeof(vsi->info.tc_mapping));
2538         rte_memcpy(&vsi->info.queue_mapping,
2539                         &ctxt.info.queue_mapping,
2540                         sizeof(vsi->info.queue_mapping));
2541         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2542         vsi->info.valid_sections = 0;
2543
2544         return 0;
2545 }
2546
2547
2548 static int
2549 i40e_queue_region_set_region(struct i40e_pf *pf,
2550                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2551 {
2552         uint16_t i;
2553         struct i40e_vsi *main_vsi = pf->main_vsi;
2554         struct i40e_queue_regions *info = &pf->queue_region;
2555         int32_t ret = -EINVAL;
2556
2557         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2558                                 conf_ptr->queue_num <= 64)) {
2559                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2560                         "total number of queues do not exceed the VSI allocation");
2561                 return ret;
2562         }
2563
2564         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2565                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2566                 return ret;
2567         }
2568
2569         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2570                                         > main_vsi->nb_used_qps) {
2571                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2572                 return ret;
2573         }
2574
2575         for (i = 0; i < info->queue_region_number; i++)
2576                 if (conf_ptr->region_id == info->region[i].region_id)
2577                         break;
2578
2579         if (i == info->queue_region_number &&
2580                                 i <= I40E_REGION_MAX_INDEX) {
2581                 info->region[i].region_id = conf_ptr->region_id;
2582                 info->region[i].queue_num = conf_ptr->queue_num;
2583                 info->region[i].queue_start_index =
2584                         conf_ptr->queue_start_index;
2585                 info->queue_region_number++;
2586         } else {
2587                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2588                 return ret;
2589         }
2590
2591         return 0;
2592 }
2593
2594 static int
2595 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2596                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2597 {
2598         int32_t ret = -EINVAL;
2599         struct i40e_queue_regions *info = &pf->queue_region;
2600         uint16_t i, j;
2601         uint16_t region_index, flowtype_index;
2602
2603         /* For the pctype or hardware flowtype of packet,
2604          * the specific index for each type has been defined
2605          * in file i40e_type.h as enum i40e_filter_pctype.
2606          */
2607
2608         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2609                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2610                 return ret;
2611         }
2612
2613         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2614                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2615                 return ret;
2616         }
2617
2618
2619         for (i = 0; i < info->queue_region_number; i++)
2620                 if (rss_region_conf->region_id == info->region[i].region_id)
2621                         break;
2622
2623         if (i == info->queue_region_number) {
2624                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2625                 ret = -EINVAL;
2626                 return ret;
2627         }
2628         region_index = i;
2629
2630         for (i = 0; i < info->queue_region_number; i++) {
2631                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2632                         if (rss_region_conf->hw_flowtype ==
2633                                 info->region[i].hw_flowtype[j]) {
2634                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2635                                 return 0;
2636                         }
2637                 }
2638         }
2639
2640         flowtype_index = info->region[region_index].flowtype_num;
2641         info->region[region_index].hw_flowtype[flowtype_index] =
2642                                         rss_region_conf->hw_flowtype;
2643         info->region[region_index].flowtype_num++;
2644
2645         return 0;
2646 }
2647
2648 static void
2649 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2650                                 struct i40e_pf *pf)
2651 {
2652         uint8_t hw_flowtype;
2653         uint32_t pfqf_hregion;
2654         uint16_t i, j, index;
2655         struct i40e_queue_regions *info = &pf->queue_region;
2656
2657         /* For the pctype or hardware flowtype of packet,
2658          * the specific index for each type has been defined
2659          * in file i40e_type.h as enum i40e_filter_pctype.
2660          */
2661
2662         for (i = 0; i < info->queue_region_number; i++) {
2663                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2664                         hw_flowtype = info->region[i].hw_flowtype[j];
2665                         index = hw_flowtype >> 3;
2666                         pfqf_hregion =
2667                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2668
2669                         if ((hw_flowtype & 0x7) == 0) {
2670                                 pfqf_hregion |= info->region[i].region_id <<
2671                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2672                                 pfqf_hregion |= 1 <<
2673                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2674                         } else if ((hw_flowtype & 0x7) == 1) {
2675                                 pfqf_hregion |= info->region[i].region_id  <<
2676                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2677                                 pfqf_hregion |= 1 <<
2678                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2679                         } else if ((hw_flowtype & 0x7) == 2) {
2680                                 pfqf_hregion |= info->region[i].region_id  <<
2681                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2682                                 pfqf_hregion |= 1 <<
2683                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2684                         } else if ((hw_flowtype & 0x7) == 3) {
2685                                 pfqf_hregion |= info->region[i].region_id  <<
2686                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2687                                 pfqf_hregion |= 1 <<
2688                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2689                         } else if ((hw_flowtype & 0x7) == 4) {
2690                                 pfqf_hregion |= info->region[i].region_id  <<
2691                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2692                                 pfqf_hregion |= 1 <<
2693                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2694                         } else if ((hw_flowtype & 0x7) == 5) {
2695                                 pfqf_hregion |= info->region[i].region_id  <<
2696                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2697                                 pfqf_hregion |= 1 <<
2698                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2699                         } else if ((hw_flowtype & 0x7) == 6) {
2700                                 pfqf_hregion |= info->region[i].region_id  <<
2701                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2702                                 pfqf_hregion |= 1 <<
2703                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2704                         } else {
2705                                 pfqf_hregion |= info->region[i].region_id  <<
2706                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2707                                 pfqf_hregion |= 1 <<
2708                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2709                         }
2710
2711                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2712                                                 pfqf_hregion);
2713                 }
2714         }
2715 }
2716
2717 static int
2718 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2719                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2720 {
2721         struct i40e_queue_regions *info = &pf->queue_region;
2722         int32_t ret = -EINVAL;
2723         uint16_t i, j, region_index;
2724
2725         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2726                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2727                 return ret;
2728         }
2729
2730         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2731                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2732                 return ret;
2733         }
2734
2735         for (i = 0; i < info->queue_region_number; i++)
2736                 if (rss_region_conf->region_id == info->region[i].region_id)
2737                         break;
2738
2739         if (i == info->queue_region_number) {
2740                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2741                 ret = -EINVAL;
2742                 return ret;
2743         }
2744
2745         region_index = i;
2746
2747         for (i = 0; i < info->queue_region_number; i++) {
2748                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2749                         if (info->region[i].user_priority[j] ==
2750                                 rss_region_conf->user_priority) {
2751                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2752                                 return 0;
2753                         }
2754                 }
2755         }
2756
2757         j = info->region[region_index].user_priority_num;
2758         info->region[region_index].user_priority[j] =
2759                                         rss_region_conf->user_priority;
2760         info->region[region_index].user_priority_num++;
2761
2762         return 0;
2763 }
2764
2765 static int
2766 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2767                                 struct i40e_pf *pf)
2768 {
2769         struct i40e_dcbx_config dcb_cfg_local;
2770         struct i40e_dcbx_config *dcb_cfg;
2771         struct i40e_queue_regions *info = &pf->queue_region;
2772         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2773         int32_t ret = -EINVAL;
2774         uint16_t i, j, prio_index, region_index;
2775         uint8_t tc_map, tc_bw, bw_lf;
2776
2777         if (!info->queue_region_number) {
2778                 PMD_DRV_LOG(ERR, "No queue region been set before");
2779                 return ret;
2780         }
2781
2782         dcb_cfg = &dcb_cfg_local;
2783         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2784
2785         /* assume each tc has the same bw */
2786         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2787         for (i = 0; i < info->queue_region_number; i++)
2788                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2789         /* to ensure the sum of tcbw is equal to 100 */
2790         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2791         for (i = 0; i < bw_lf; i++)
2792                 dcb_cfg->etscfg.tcbwtable[i]++;
2793
2794         /* assume each tc has the same Transmission Selection Algorithm */
2795         for (i = 0; i < info->queue_region_number; i++)
2796                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2797
2798         for (i = 0; i < info->queue_region_number; i++) {
2799                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2800                         prio_index = info->region[i].user_priority[j];
2801                         region_index = info->region[i].region_id;
2802                         dcb_cfg->etscfg.prioritytable[prio_index] =
2803                                                 region_index;
2804                 }
2805         }
2806
2807         /* FW needs one App to configure HW */
2808         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2809         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2810         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2811         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2812
2813         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2814
2815         dcb_cfg->pfc.willing = 0;
2816         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2817         dcb_cfg->pfc.pfcenable = tc_map;
2818
2819         /* Copy the new config to the current config */
2820         *old_cfg = *dcb_cfg;
2821         old_cfg->etsrec = old_cfg->etscfg;
2822         ret = i40e_set_dcb_config(hw);
2823
2824         if (ret) {
2825                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2826                          i40e_stat_str(hw, ret),
2827                          i40e_aq_str(hw, hw->aq.asq_last_status));
2828                 return ret;
2829         }
2830
2831         return 0;
2832 }
2833
2834 int
2835 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2836         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2837 {
2838         int32_t ret = -EINVAL;
2839         struct i40e_queue_regions *info = &pf->queue_region;
2840         struct i40e_vsi *main_vsi = pf->main_vsi;
2841
2842         if (on) {
2843                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2844
2845                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2846                 if (ret != I40E_SUCCESS) {
2847                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2848                         return ret;
2849                 }
2850
2851                 ret = i40e_queue_region_dcb_configure(hw, pf);
2852                 if (ret != I40E_SUCCESS) {
2853                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2854                         return ret;
2855                 }
2856
2857                 return 0;
2858         }
2859
2860         if (info->queue_region_number) {
2861                 info->queue_region_number = 1;
2862                 info->region[0].queue_num = main_vsi->nb_used_qps;
2863                 info->region[0].queue_start_index = 0;
2864
2865                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2866                 if (ret != I40E_SUCCESS)
2867                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2868
2869                 ret = i40e_dcb_init_configure(dev, TRUE);
2870                 if (ret != I40E_SUCCESS) {
2871                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2872                         pf->flags &= ~I40E_FLAG_DCB;
2873                 }
2874
2875                 i40e_init_queue_region_conf(dev);
2876         }
2877         return 0;
2878 }
2879
2880 static int
2881 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2882 {
2883         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2884         uint64_t hena;
2885
2886         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2887         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2888
2889         if (!hena)
2890                 return -ENOTSUP;
2891
2892         return 0;
2893 }
2894
2895 static int
2896 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2897                 struct i40e_queue_regions *regions_ptr)
2898 {
2899         struct i40e_queue_regions *info = &pf->queue_region;
2900
2901         rte_memcpy(regions_ptr, info,
2902                         sizeof(struct i40e_queue_regions));
2903
2904         return 0;
2905 }
2906
2907 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2908                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2909 {
2910         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2911         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2912         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2913         int32_t ret;
2914
2915         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2916
2917         if (!is_i40e_supported(dev))
2918                 return -ENOTSUP;
2919
2920         if (!(!i40e_queue_region_pf_check_rss(pf)))
2921                 return -ENOTSUP;
2922
2923         /* This queue region feature only support pf by now. It should
2924          * be called after dev_start, and will be clear after dev_stop.
2925          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2926          * is just an enable function which server for other configuration,
2927          * it is for all configuration about queue region from up layer,
2928          * at first will only keep in DPDK softwarestored in driver,
2929          * only after "FLUSH_ON", it commit all configuration to HW.
2930          * Because PMD had to set hardware configuration at a time, so
2931          * it will record all up layer command at first.
2932          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2933          * just clean all configuration about queue region just now,
2934          * and restore all to DPDK i40e driver default
2935          * config when start up.
2936          */
2937
2938         switch (op_type) {
2939         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2940                 ret = i40e_queue_region_set_region(pf,
2941                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2942                 break;
2943         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2944                 ret = i40e_queue_region_set_flowtype(pf,
2945                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2946                 break;
2947         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2948                 ret = i40e_queue_region_set_user_priority(pf,
2949                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2950                 break;
2951         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2952                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2953                 break;
2954         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2955                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2956                 break;
2957         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2958                 ret = i40e_queue_region_get_all_info(pf,
2959                                 (struct i40e_queue_regions *)arg);
2960                 break;
2961         default:
2962                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2963                             op_type);
2964                 ret = -EINVAL;
2965         }
2966
2967         I40E_WRITE_FLUSH(hw);
2968
2969         return ret;
2970 }
2971
2972 int rte_pmd_i40e_flow_add_del_packet_template(
2973                         uint16_t port,
2974                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2975                         uint8_t add)
2976 {
2977         struct rte_eth_dev *dev = &rte_eth_devices[port];
2978         struct i40e_fdir_filter_conf filter_conf;
2979
2980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2981
2982         if (!is_i40e_supported(dev))
2983                 return -ENOTSUP;
2984
2985         memset(&filter_conf, 0, sizeof(filter_conf));
2986         filter_conf.soft_id = conf->soft_id;
2987         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
2988         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
2989         filter_conf.input.flow.raw_flow.length = conf->input.length;
2990         filter_conf.input.flow_ext.pkt_template = true;
2991
2992         filter_conf.action.rx_queue = conf->action.rx_queue;
2993         filter_conf.action.behavior =
2994                 (enum i40e_fdir_behavior)conf->action.behavior;
2995         filter_conf.action.report_status =
2996                 (enum i40e_fdir_status)conf->action.report_status;
2997         filter_conf.action.flex_off = conf->action.flex_off;
2998
2999         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3000 }
3001
3002 int
3003 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
3004                        struct rte_pmd_i40e_inset *inset,
3005                        enum rte_pmd_i40e_inset_type inset_type)
3006 {
3007         struct rte_eth_dev *dev;
3008         struct i40e_hw *hw;
3009         uint64_t inset_reg;
3010         uint32_t mask_reg[2];
3011         int i;
3012
3013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3014
3015         dev = &rte_eth_devices[port];
3016
3017         if (!is_i40e_supported(dev))
3018                 return -ENOTSUP;
3019
3020         if (pctype > 63)
3021                 return -EINVAL;
3022
3023         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3024         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3025
3026         switch (inset_type) {
3027         case INSET_HASH:
3028                 /* Get input set */
3029                 inset_reg =
3030                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3031                 inset_reg <<= I40E_32_BIT_WIDTH;
3032                 inset_reg |=
3033                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3034                 /* Get field mask */
3035                 mask_reg[0] =
3036                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3037                 mask_reg[1] =
3038                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3039                 break;
3040         case INSET_FDIR:
3041                 inset_reg =
3042                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3043                 inset_reg <<= I40E_32_BIT_WIDTH;
3044                 inset_reg |=
3045                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3046                 mask_reg[0] =
3047                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3048                 mask_reg[1] =
3049                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3050                 break;
3051         case INSET_FDIR_FLX:
3052                 inset_reg =
3053                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3054                 mask_reg[0] =
3055                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3056                 mask_reg[1] =
3057                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3058                 break;
3059         default:
3060                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3061                 return -EINVAL;
3062         }
3063
3064         inset->inset = inset_reg;
3065
3066         for (i = 0; i < 2; i++) {
3067                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3068                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3069         }
3070
3071         return 0;
3072 }
3073
3074 int
3075 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3076                        struct rte_pmd_i40e_inset *inset,
3077                        enum rte_pmd_i40e_inset_type inset_type)
3078 {
3079         struct rte_eth_dev *dev;
3080         struct i40e_hw *hw;
3081         struct i40e_pf *pf;
3082         uint64_t inset_reg;
3083         uint32_t mask_reg[2];
3084         int i;
3085
3086         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3087
3088         dev = &rte_eth_devices[port];
3089
3090         if (!is_i40e_supported(dev))
3091                 return -ENOTSUP;
3092
3093         if (pctype > 63)
3094                 return -EINVAL;
3095
3096         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3097         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3098
3099         if (pf->support_multi_driver) {
3100                 PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
3101                 return -ENOTSUP;
3102         }
3103
3104         inset_reg = inset->inset;
3105         for (i = 0; i < 2; i++)
3106                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3107                         inset->mask[i].mask;
3108
3109         switch (inset_type) {
3110         case INSET_HASH:
3111                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3112                                             (uint32_t)(inset_reg & UINT32_MAX));
3113                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3114                                             (uint32_t)((inset_reg >>
3115                                              I40E_32_BIT_WIDTH) & UINT32_MAX));
3116                 for (i = 0; i < 2; i++)
3117                         i40e_check_write_global_reg(hw,
3118                                                   I40E_GLQF_HASH_MSK(i, pctype),
3119                                                   mask_reg[i]);
3120                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
3121                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
3122                 break;
3123         case INSET_FDIR:
3124                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3125                                      (uint32_t)(inset_reg & UINT32_MAX));
3126                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3127                                      (uint32_t)((inset_reg >>
3128                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3129                 for (i = 0; i < 2; i++)
3130                         i40e_check_write_global_reg(hw,
3131                                                     I40E_GLQF_FD_MSK(i, pctype),
3132                                                     mask_reg[i]);
3133                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
3134                 break;
3135         case INSET_FDIR_FLX:
3136                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3137                                      (uint32_t)(inset_reg & UINT32_MAX));
3138                 for (i = 0; i < 2; i++)
3139                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3140                                              mask_reg[i]);
3141                 break;
3142         default:
3143                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3144                 return -EINVAL;
3145         }
3146
3147         I40E_WRITE_FLUSH(hw);
3148         return 0;
3149 }