9f9a6504d7efef4457670977f865594883fdf221
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_tailq.h>
7
8 #include "base/i40e_prototype.h"
9 #include "base/i40e_dcb.h"
10 #include "i40e_ethdev.h"
11 #include "i40e_pf.h"
12 #include "i40e_rxtx.h"
13 #include "rte_pmd_i40e.h"
14
15 int
16 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
17 {
18         struct rte_eth_dev *dev;
19         struct i40e_pf *pf;
20
21         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
22
23         dev = &rte_eth_devices[port];
24
25         if (!is_i40e_supported(dev))
26                 return -ENOTSUP;
27
28         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
29
30         if (vf >= pf->vf_num || !pf->vfs) {
31                 PMD_DRV_LOG(ERR, "Invalid argument.");
32                 return -EINVAL;
33         }
34
35         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
36
37         return 0;
38 }
39
40 int
41 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
42 {
43         struct rte_eth_dev *dev;
44         struct i40e_pf *pf;
45         struct i40e_vsi *vsi;
46         struct i40e_hw *hw;
47         struct i40e_vsi_context ctxt;
48         int ret;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf_id >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         vsi = pf->vfs[vf_id].vsi;
65         if (!vsi) {
66                 PMD_DRV_LOG(ERR, "Invalid VSI.");
67                 return -EINVAL;
68         }
69
70         /* Check if it has been already on or off */
71         if (vsi->info.valid_sections &
72                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
73                 if (on) {
74                         if ((vsi->info.sec_flags &
75                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
76                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
77                                 return 0; /* already on */
78                 } else {
79                         if ((vsi->info.sec_flags &
80                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
81                                 return 0; /* already off */
82                 }
83         }
84
85         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
86         if (on)
87                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
88         else
89                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
90
91         memset(&ctxt, 0, sizeof(ctxt));
92         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
93         ctxt.seid = vsi->seid;
94
95         hw = I40E_VSI_TO_HW(vsi);
96         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
97         if (ret != I40E_SUCCESS) {
98                 ret = -ENOTSUP;
99                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
100         }
101
102         return ret;
103 }
104
105 static int
106 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
107 {
108         uint32_t j, k;
109         uint16_t vlan_id;
110         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
111         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
112         int ret;
113
114         for (j = 0; j < I40E_VFTA_SIZE; j++) {
115                 if (!vsi->vfta[j])
116                         continue;
117
118                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
119                         if (!(vsi->vfta[j] & (1 << k)))
120                                 continue;
121
122                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
123                         if (!vlan_id)
124                                 continue;
125
126                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
127                         if (add)
128                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
129                                                        &vlan_data, 1, NULL);
130                         else
131                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
132                                                           &vlan_data, 1, NULL);
133                         if (ret != I40E_SUCCESS) {
134                                 PMD_DRV_LOG(ERR,
135                                             "Failed to add/rm vlan filter");
136                                 return ret;
137                         }
138                 }
139         }
140
141         return I40E_SUCCESS;
142 }
143
144 int
145 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
146 {
147         struct rte_eth_dev *dev;
148         struct i40e_pf *pf;
149         struct i40e_vsi *vsi;
150         struct i40e_hw *hw;
151         struct i40e_vsi_context ctxt;
152         int ret;
153
154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
155
156         dev = &rte_eth_devices[port];
157
158         if (!is_i40e_supported(dev))
159                 return -ENOTSUP;
160
161         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
162
163         if (vf_id >= pf->vf_num || !pf->vfs) {
164                 PMD_DRV_LOG(ERR, "Invalid argument.");
165                 return -EINVAL;
166         }
167
168         vsi = pf->vfs[vf_id].vsi;
169         if (!vsi) {
170                 PMD_DRV_LOG(ERR, "Invalid VSI.");
171                 return -EINVAL;
172         }
173
174         /* Check if it has been already on or off */
175         if (vsi->vlan_anti_spoof_on == on)
176                 return 0; /* already on or off */
177
178         vsi->vlan_anti_spoof_on = on;
179         if (!vsi->vlan_filter_on) {
180                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
181                 if (ret) {
182                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
183                         return -ENOTSUP;
184                 }
185         }
186
187         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
188         if (on)
189                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
190         else
191                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
192
193         memset(&ctxt, 0, sizeof(ctxt));
194         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
195         ctxt.seid = vsi->seid;
196
197         hw = I40E_VSI_TO_HW(vsi);
198         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
199         if (ret != I40E_SUCCESS) {
200                 ret = -ENOTSUP;
201                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
202         }
203
204         return ret;
205 }
206
207 static int
208 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
209 {
210         struct i40e_mac_filter *f;
211         struct i40e_macvlan_filter *mv_f;
212         int i, vlan_num;
213         enum rte_mac_filter_type filter_type;
214         int ret = I40E_SUCCESS;
215         void *temp;
216
217         /* remove all the MACs */
218         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
219                 vlan_num = vsi->vlan_num;
220                 filter_type = f->mac_info.filter_type;
221                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
222                     filter_type == RTE_MACVLAN_HASH_MATCH) {
223                         if (vlan_num == 0) {
224                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
225                                 return I40E_ERR_PARAM;
226                         }
227                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
228                            filter_type == RTE_MAC_HASH_MATCH)
229                         vlan_num = 1;
230
231                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
232                 if (!mv_f) {
233                         PMD_DRV_LOG(ERR, "failed to allocate memory");
234                         return I40E_ERR_NO_MEMORY;
235                 }
236
237                 for (i = 0; i < vlan_num; i++) {
238                         mv_f[i].filter_type = filter_type;
239                         rte_memcpy(&mv_f[i].macaddr,
240                                          &f->mac_info.mac_addr,
241                                          ETH_ADDR_LEN);
242                 }
243                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
244                     filter_type == RTE_MACVLAN_HASH_MATCH) {
245                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
246                                                          &f->mac_info.mac_addr);
247                         if (ret != I40E_SUCCESS) {
248                                 rte_free(mv_f);
249                                 return ret;
250                         }
251                 }
252
253                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
254                 if (ret != I40E_SUCCESS) {
255                         rte_free(mv_f);
256                         return ret;
257                 }
258
259                 rte_free(mv_f);
260                 ret = I40E_SUCCESS;
261         }
262
263         return ret;
264 }
265
266 static int
267 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
268 {
269         struct i40e_mac_filter *f;
270         struct i40e_macvlan_filter *mv_f;
271         int i, vlan_num = 0;
272         int ret = I40E_SUCCESS;
273         void *temp;
274
275         /* restore all the MACs */
276         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
277                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
278                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
279                         /**
280                          * If vlan_num is 0, that's the first time to add mac,
281                          * set mask for vlan_id 0.
282                          */
283                         if (vsi->vlan_num == 0) {
284                                 i40e_set_vlan_filter(vsi, 0, 1);
285                                 vsi->vlan_num = 1;
286                         }
287                         vlan_num = vsi->vlan_num;
288                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
289                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
290                         vlan_num = 1;
291
292                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
293                 if (!mv_f) {
294                         PMD_DRV_LOG(ERR, "failed to allocate memory");
295                         return I40E_ERR_NO_MEMORY;
296                 }
297
298                 for (i = 0; i < vlan_num; i++) {
299                         mv_f[i].filter_type = f->mac_info.filter_type;
300                         rte_memcpy(&mv_f[i].macaddr,
301                                          &f->mac_info.mac_addr,
302                                          ETH_ADDR_LEN);
303                 }
304
305                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
306                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
307                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
308                                                          &f->mac_info.mac_addr);
309                         if (ret != I40E_SUCCESS) {
310                                 rte_free(mv_f);
311                                 return ret;
312                         }
313                 }
314
315                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
316                 if (ret != I40E_SUCCESS) {
317                         rte_free(mv_f);
318                         return ret;
319                 }
320
321                 rte_free(mv_f);
322                 ret = I40E_SUCCESS;
323         }
324
325         return ret;
326 }
327
328 static int
329 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
330 {
331         struct i40e_vsi_context ctxt;
332         struct i40e_hw *hw;
333         int ret;
334
335         if (!vsi)
336                 return -EINVAL;
337
338         hw = I40E_VSI_TO_HW(vsi);
339
340         /* Use the FW API if FW >= v5.0 */
341         if (hw->aq.fw_maj_ver < 5) {
342                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
343                 return -ENOTSUP;
344         }
345
346         /* Check if it has been already on or off */
347         if (vsi->info.valid_sections &
348                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
349                 if (on) {
350                         if ((vsi->info.switch_id &
351                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
352                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
353                                 return 0; /* already on */
354                 } else {
355                         if ((vsi->info.switch_id &
356                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
357                                 return 0; /* already off */
358                 }
359         }
360
361         /* remove all the MAC and VLAN first */
362         ret = i40e_vsi_rm_mac_filter(vsi);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
365                 return ret;
366         }
367         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
368                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
369                 if (ret) {
370                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
371                         return ret;
372                 }
373         }
374
375         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
376         if (on)
377                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
378         else
379                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
380
381         memset(&ctxt, 0, sizeof(ctxt));
382         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
383         ctxt.seid = vsi->seid;
384
385         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
386         if (ret != I40E_SUCCESS) {
387                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
388                 return ret;
389         }
390
391         /* add all the MAC and VLAN back */
392         ret = i40e_vsi_restore_mac_filter(vsi);
393         if (ret)
394                 return ret;
395         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
396                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
397                 if (ret)
398                         return ret;
399         }
400
401         return ret;
402 }
403
404 int
405 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
406 {
407         struct rte_eth_dev *dev;
408         struct i40e_pf *pf;
409         struct i40e_pf_vf *vf;
410         struct i40e_vsi *vsi;
411         uint16_t vf_id;
412         int ret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
415
416         dev = &rte_eth_devices[port];
417
418         if (!is_i40e_supported(dev))
419                 return -ENOTSUP;
420
421         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
422
423         /* setup PF TX loopback */
424         vsi = pf->main_vsi;
425         ret = i40e_vsi_set_tx_loopback(vsi, on);
426         if (ret)
427                 return -ENOTSUP;
428
429         /* setup TX loopback for all the VFs */
430         if (!pf->vfs) {
431                 /* if no VF, do nothing. */
432                 return 0;
433         }
434
435         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
436                 vf = &pf->vfs[vf_id];
437                 vsi = vf->vsi;
438
439                 ret = i40e_vsi_set_tx_loopback(vsi, on);
440                 if (ret)
441                         return -ENOTSUP;
442         }
443
444         return ret;
445 }
446
447 int
448 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
449 {
450         struct rte_eth_dev *dev;
451         struct i40e_pf *pf;
452         struct i40e_vsi *vsi;
453         struct i40e_hw *hw;
454         int ret;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
457
458         dev = &rte_eth_devices[port];
459
460         if (!is_i40e_supported(dev))
461                 return -ENOTSUP;
462
463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464
465         if (vf_id >= pf->vf_num || !pf->vfs) {
466                 PMD_DRV_LOG(ERR, "Invalid argument.");
467                 return -EINVAL;
468         }
469
470         vsi = pf->vfs[vf_id].vsi;
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Invalid VSI.");
473                 return -EINVAL;
474         }
475
476         hw = I40E_VSI_TO_HW(vsi);
477
478         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
479                                                   on, NULL, true);
480         if (ret != I40E_SUCCESS) {
481                 ret = -ENOTSUP;
482                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
483         }
484
485         return ret;
486 }
487
488 int
489 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
490 {
491         struct rte_eth_dev *dev;
492         struct i40e_pf *pf;
493         struct i40e_vsi *vsi;
494         struct i40e_hw *hw;
495         int ret;
496
497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
498
499         dev = &rte_eth_devices[port];
500
501         if (!is_i40e_supported(dev))
502                 return -ENOTSUP;
503
504         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
505
506         if (vf_id >= pf->vf_num || !pf->vfs) {
507                 PMD_DRV_LOG(ERR, "Invalid argument.");
508                 return -EINVAL;
509         }
510
511         vsi = pf->vfs[vf_id].vsi;
512         if (!vsi) {
513                 PMD_DRV_LOG(ERR, "Invalid VSI.");
514                 return -EINVAL;
515         }
516
517         hw = I40E_VSI_TO_HW(vsi);
518
519         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
520                                                     on, NULL);
521         if (ret != I40E_SUCCESS) {
522                 ret = -ENOTSUP;
523                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
524         }
525
526         return ret;
527 }
528
529 int
530 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
531                              struct ether_addr *mac_addr)
532 {
533         struct i40e_mac_filter *f;
534         struct rte_eth_dev *dev;
535         struct i40e_pf_vf *vf;
536         struct i40e_vsi *vsi;
537         struct i40e_pf *pf;
538         void *temp;
539
540         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
541                 return -EINVAL;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
544
545         dev = &rte_eth_devices[port];
546
547         if (!is_i40e_supported(dev))
548                 return -ENOTSUP;
549
550         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
551
552         if (vf_id >= pf->vf_num || !pf->vfs)
553                 return -EINVAL;
554
555         vf = &pf->vfs[vf_id];
556         vsi = vf->vsi;
557         if (!vsi) {
558                 PMD_DRV_LOG(ERR, "Invalid VSI.");
559                 return -EINVAL;
560         }
561
562         ether_addr_copy(mac_addr, &vf->mac_addr);
563
564         /* Remove all existing mac */
565         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
566                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
567                                 != I40E_SUCCESS)
568                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
569
570         return 0;
571 }
572
573 /* Set vlan strip on/off for specific VF from host */
574 int
575 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
576 {
577         struct rte_eth_dev *dev;
578         struct i40e_pf *pf;
579         struct i40e_vsi *vsi;
580         int ret;
581
582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
583
584         dev = &rte_eth_devices[port];
585
586         if (!is_i40e_supported(dev))
587                 return -ENOTSUP;
588
589         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590
591         if (vf_id >= pf->vf_num || !pf->vfs) {
592                 PMD_DRV_LOG(ERR, "Invalid argument.");
593                 return -EINVAL;
594         }
595
596         vsi = pf->vfs[vf_id].vsi;
597
598         if (!vsi)
599                 return -EINVAL;
600
601         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
602         if (ret != I40E_SUCCESS) {
603                 ret = -ENOTSUP;
604                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
605         }
606
607         return ret;
608 }
609
610 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
611                                     uint16_t vlan_id)
612 {
613         struct rte_eth_dev *dev;
614         struct i40e_pf *pf;
615         struct i40e_hw *hw;
616         struct i40e_vsi *vsi;
617         struct i40e_vsi_context ctxt;
618         int ret;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
621
622         if (vlan_id > ETHER_MAX_VLAN_ID) {
623                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
624                 return -EINVAL;
625         }
626
627         dev = &rte_eth_devices[port];
628
629         if (!is_i40e_supported(dev))
630                 return -ENOTSUP;
631
632         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
633         hw = I40E_PF_TO_HW(pf);
634
635         /**
636          * return -ENODEV if SRIOV not enabled, VF number not configured
637          * or no queue assigned.
638          */
639         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
640             pf->vf_nb_qps == 0)
641                 return -ENODEV;
642
643         if (vf_id >= pf->vf_num || !pf->vfs) {
644                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
645                 return -EINVAL;
646         }
647
648         vsi = pf->vfs[vf_id].vsi;
649         if (!vsi) {
650                 PMD_DRV_LOG(ERR, "Invalid VSI.");
651                 return -EINVAL;
652         }
653
654         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
655         vsi->info.pvid = vlan_id;
656         if (vlan_id > 0)
657                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
658         else
659                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
660
661         memset(&ctxt, 0, sizeof(ctxt));
662         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
663         ctxt.seid = vsi->seid;
664
665         hw = I40E_VSI_TO_HW(vsi);
666         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
667         if (ret != I40E_SUCCESS) {
668                 ret = -ENOTSUP;
669                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
670         }
671
672         return ret;
673 }
674
675 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
676                                   uint8_t on)
677 {
678         struct rte_eth_dev *dev;
679         struct i40e_pf *pf;
680         struct i40e_vsi *vsi;
681         struct i40e_hw *hw;
682         struct i40e_mac_filter_info filter;
683         struct ether_addr broadcast = {
684                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
685         int ret;
686
687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
688
689         if (on > 1) {
690                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
691                 return -EINVAL;
692         }
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_i40e_supported(dev))
697                 return -ENOTSUP;
698
699         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700         hw = I40E_PF_TO_HW(pf);
701
702         if (vf_id >= pf->vf_num || !pf->vfs) {
703                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
704                 return -EINVAL;
705         }
706
707         /**
708          * return -ENODEV if SRIOV not enabled, VF number not configured
709          * or no queue assigned.
710          */
711         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
712             pf->vf_nb_qps == 0) {
713                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
714                 return -ENODEV;
715         }
716
717         vsi = pf->vfs[vf_id].vsi;
718         if (!vsi) {
719                 PMD_DRV_LOG(ERR, "Invalid VSI.");
720                 return -EINVAL;
721         }
722
723         if (on) {
724                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
725                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
726                 ret = i40e_vsi_add_mac(vsi, &filter);
727         } else {
728                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
729         }
730
731         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
732                 ret = -ENOTSUP;
733                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
734         } else {
735                 ret = 0;
736         }
737
738         return ret;
739 }
740
741 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
742 {
743         struct rte_eth_dev *dev;
744         struct i40e_pf *pf;
745         struct i40e_hw *hw;
746         struct i40e_vsi *vsi;
747         struct i40e_vsi_context ctxt;
748         int ret;
749
750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
751
752         if (on > 1) {
753                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
754                 return -EINVAL;
755         }
756
757         dev = &rte_eth_devices[port];
758
759         if (!is_i40e_supported(dev))
760                 return -ENOTSUP;
761
762         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763         hw = I40E_PF_TO_HW(pf);
764
765         /**
766          * return -ENODEV if SRIOV not enabled, VF number not configured
767          * or no queue assigned.
768          */
769         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
770             pf->vf_nb_qps == 0) {
771                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
772                 return -ENODEV;
773         }
774
775         if (vf_id >= pf->vf_num || !pf->vfs) {
776                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
777                 return -EINVAL;
778         }
779
780         vsi = pf->vfs[vf_id].vsi;
781         if (!vsi) {
782                 PMD_DRV_LOG(ERR, "Invalid VSI.");
783                 return -EINVAL;
784         }
785
786         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
787         if (on) {
788                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
789                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
790         } else {
791                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
792                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
793         }
794
795         memset(&ctxt, 0, sizeof(ctxt));
796         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
797         ctxt.seid = vsi->seid;
798
799         hw = I40E_VSI_TO_HW(vsi);
800         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
801         if (ret != I40E_SUCCESS) {
802                 ret = -ENOTSUP;
803                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
804         }
805
806         return ret;
807 }
808
809 static int
810 i40e_vlan_filter_count(struct i40e_vsi *vsi)
811 {
812         uint32_t j, k;
813         uint16_t vlan_id;
814         int count = 0;
815
816         for (j = 0; j < I40E_VFTA_SIZE; j++) {
817                 if (!vsi->vfta[j])
818                         continue;
819
820                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
821                         if (!(vsi->vfta[j] & (1 << k)))
822                                 continue;
823
824                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
825                         if (!vlan_id)
826                                 continue;
827
828                         count++;
829                 }
830         }
831
832         return count;
833 }
834
835 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
836                                     uint64_t vf_mask, uint8_t on)
837 {
838         struct rte_eth_dev *dev;
839         struct i40e_pf *pf;
840         struct i40e_hw *hw;
841         struct i40e_vsi *vsi;
842         uint16_t vf_idx;
843         int ret = I40E_SUCCESS;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
846
847         dev = &rte_eth_devices[port];
848
849         if (!is_i40e_supported(dev))
850                 return -ENOTSUP;
851
852         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
853                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
854                 return -EINVAL;
855         }
856
857         if (vf_mask == 0) {
858                 PMD_DRV_LOG(ERR, "No VF.");
859                 return -EINVAL;
860         }
861
862         if (on > 1) {
863                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
864                 return -EINVAL;
865         }
866
867         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         hw = I40E_PF_TO_HW(pf);
869
870         /**
871          * return -ENODEV if SRIOV not enabled, VF number not configured
872          * or no queue assigned.
873          */
874         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
875             pf->vf_nb_qps == 0) {
876                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
877                 return -ENODEV;
878         }
879
880         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
881                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
882                         vsi = pf->vfs[vf_idx].vsi;
883                         if (on) {
884                                 if (!vsi->vlan_filter_on) {
885                                         vsi->vlan_filter_on = true;
886                                         i40e_aq_set_vsi_vlan_promisc(hw,
887                                                                      vsi->seid,
888                                                                      false,
889                                                                      NULL);
890                                         if (!vsi->vlan_anti_spoof_on)
891                                                 i40e_add_rm_all_vlan_filter(
892                                                         vsi, true);
893                                 }
894                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
895                         } else {
896                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
897
898                                 if (!i40e_vlan_filter_count(vsi)) {
899                                         vsi->vlan_filter_on = false;
900                                         i40e_aq_set_vsi_vlan_promisc(hw,
901                                                                      vsi->seid,
902                                                                      true,
903                                                                      NULL);
904                                 }
905                         }
906                 }
907         }
908
909         if (ret != I40E_SUCCESS) {
910                 ret = -ENOTSUP;
911                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
912         }
913
914         return ret;
915 }
916
917 int
918 rte_pmd_i40e_get_vf_stats(uint16_t port,
919                           uint16_t vf_id,
920                           struct rte_eth_stats *stats)
921 {
922         struct rte_eth_dev *dev;
923         struct i40e_pf *pf;
924         struct i40e_vsi *vsi;
925
926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
927
928         dev = &rte_eth_devices[port];
929
930         if (!is_i40e_supported(dev))
931                 return -ENOTSUP;
932
933         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934
935         if (vf_id >= pf->vf_num || !pf->vfs) {
936                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
937                 return -EINVAL;
938         }
939
940         vsi = pf->vfs[vf_id].vsi;
941         if (!vsi) {
942                 PMD_DRV_LOG(ERR, "Invalid VSI.");
943                 return -EINVAL;
944         }
945
946         i40e_update_vsi_stats(vsi);
947
948         stats->ipackets = vsi->eth_stats.rx_unicast +
949                         vsi->eth_stats.rx_multicast +
950                         vsi->eth_stats.rx_broadcast;
951         stats->opackets = vsi->eth_stats.tx_unicast +
952                         vsi->eth_stats.tx_multicast +
953                         vsi->eth_stats.tx_broadcast;
954         stats->ibytes   = vsi->eth_stats.rx_bytes;
955         stats->obytes   = vsi->eth_stats.tx_bytes;
956         stats->ierrors  = vsi->eth_stats.rx_discards;
957         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
958
959         return 0;
960 }
961
962 int
963 rte_pmd_i40e_reset_vf_stats(uint16_t port,
964                             uint16_t vf_id)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         vsi->offset_loaded = false;
991         i40e_update_vsi_stats(vsi);
992
993         return 0;
994 }
995
996 int
997 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
998 {
999         struct rte_eth_dev *dev;
1000         struct i40e_pf *pf;
1001         struct i40e_vsi *vsi;
1002         struct i40e_hw *hw;
1003         int ret = 0;
1004         int i;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1007
1008         dev = &rte_eth_devices[port];
1009
1010         if (!is_i40e_supported(dev))
1011                 return -ENOTSUP;
1012
1013         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014
1015         if (vf_id >= pf->vf_num || !pf->vfs) {
1016                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1017                 return -EINVAL;
1018         }
1019
1020         vsi = pf->vfs[vf_id].vsi;
1021         if (!vsi) {
1022                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1023                 return -EINVAL;
1024         }
1025
1026         if (bw > I40E_QOS_BW_MAX) {
1027                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1028                             I40E_QOS_BW_MAX);
1029                 return -EINVAL;
1030         }
1031
1032         if (bw % I40E_QOS_BW_GRANULARITY) {
1033                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1034                             I40E_QOS_BW_GRANULARITY);
1035                 return -EINVAL;
1036         }
1037
1038         bw /= I40E_QOS_BW_GRANULARITY;
1039
1040         hw = I40E_VSI_TO_HW(vsi);
1041
1042         /* No change. */
1043         if (bw == vsi->bw_info.bw_limit) {
1044                 PMD_DRV_LOG(INFO,
1045                             "No change for VF max bandwidth. Nothing to do.");
1046                 return 0;
1047         }
1048
1049         /**
1050          * VF bandwidth limitation and TC bandwidth limitation cannot be
1051          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1052          *
1053          * If bw is 0, means disable bandwidth limitation. Then no need to
1054          * check TC bandwidth limitation.
1055          */
1056         if (bw) {
1057                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1058                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1059                             vsi->bw_info.bw_ets_credits[i])
1060                                 break;
1061                 }
1062                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1063                         PMD_DRV_LOG(ERR,
1064                                     "TC max bandwidth has been set on this VF,"
1065                                     " please disable it first.");
1066                         return -EINVAL;
1067                 }
1068         }
1069
1070         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1071         if (ret) {
1072                 PMD_DRV_LOG(ERR,
1073                             "Failed to set VF %d bandwidth, err(%d).",
1074                             vf_id, ret);
1075                 return -EINVAL;
1076         }
1077
1078         /* Store the configuration. */
1079         vsi->bw_info.bw_limit = (uint16_t)bw;
1080         vsi->bw_info.bw_max = 0;
1081
1082         return 0;
1083 }
1084
1085 int
1086 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1087                                 uint8_t tc_num, uint8_t *bw_weight)
1088 {
1089         struct rte_eth_dev *dev;
1090         struct i40e_pf *pf;
1091         struct i40e_vsi *vsi;
1092         struct i40e_hw *hw;
1093         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1094         int ret = 0;
1095         int i, j;
1096         uint16_t sum;
1097         bool b_change = false;
1098
1099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1100
1101         dev = &rte_eth_devices[port];
1102
1103         if (!is_i40e_supported(dev))
1104                 return -ENOTSUP;
1105
1106         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1107
1108         if (vf_id >= pf->vf_num || !pf->vfs) {
1109                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1110                 return -EINVAL;
1111         }
1112
1113         vsi = pf->vfs[vf_id].vsi;
1114         if (!vsi) {
1115                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1116                 return -EINVAL;
1117         }
1118
1119         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1120                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1121                             I40E_MAX_TRAFFIC_CLASS);
1122                 return -EINVAL;
1123         }
1124
1125         sum = 0;
1126         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1127                 if (vsi->enabled_tc & BIT_ULL(i))
1128                         sum++;
1129         }
1130         if (sum != tc_num) {
1131                 PMD_DRV_LOG(ERR,
1132                             "Weight should be set for all %d enabled TCs.",
1133                             sum);
1134                 return -EINVAL;
1135         }
1136
1137         sum = 0;
1138         for (i = 0; i < tc_num; i++) {
1139                 if (!bw_weight[i]) {
1140                         PMD_DRV_LOG(ERR,
1141                                     "The weight should be 1 at least.");
1142                         return -EINVAL;
1143                 }
1144                 sum += bw_weight[i];
1145         }
1146         if (sum != 100) {
1147                 PMD_DRV_LOG(ERR,
1148                             "The summary of the TC weight should be 100.");
1149                 return -EINVAL;
1150         }
1151
1152         /**
1153          * Create the configuration for all the TCs.
1154          */
1155         memset(&tc_bw, 0, sizeof(tc_bw));
1156         tc_bw.tc_valid_bits = vsi->enabled_tc;
1157         j = 0;
1158         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1159                 if (vsi->enabled_tc & BIT_ULL(i)) {
1160                         if (bw_weight[j] !=
1161                                 vsi->bw_info.bw_ets_share_credits[i])
1162                                 b_change = true;
1163
1164                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1165                         j++;
1166                 }
1167         }
1168
1169         /* No change. */
1170         if (!b_change) {
1171                 PMD_DRV_LOG(INFO,
1172                             "No change for TC allocated bandwidth."
1173                             " Nothing to do.");
1174                 return 0;
1175         }
1176
1177         hw = I40E_VSI_TO_HW(vsi);
1178
1179         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1180         if (ret) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1183                             vf_id, ret);
1184                 return -EINVAL;
1185         }
1186
1187         /* Store the configuration. */
1188         j = 0;
1189         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1190                 if (vsi->enabled_tc & BIT_ULL(i)) {
1191                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1192                         j++;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 int
1200 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1201                               uint8_t tc_no, uint32_t bw)
1202 {
1203         struct rte_eth_dev *dev;
1204         struct i40e_pf *pf;
1205         struct i40e_vsi *vsi;
1206         struct i40e_hw *hw;
1207         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1208         int ret = 0;
1209         int i;
1210
1211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1212
1213         dev = &rte_eth_devices[port];
1214
1215         if (!is_i40e_supported(dev))
1216                 return -ENOTSUP;
1217
1218         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219
1220         if (vf_id >= pf->vf_num || !pf->vfs) {
1221                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1222                 return -EINVAL;
1223         }
1224
1225         vsi = pf->vfs[vf_id].vsi;
1226         if (!vsi) {
1227                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1228                 return -EINVAL;
1229         }
1230
1231         if (bw > I40E_QOS_BW_MAX) {
1232                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1233                             I40E_QOS_BW_MAX);
1234                 return -EINVAL;
1235         }
1236
1237         if (bw % I40E_QOS_BW_GRANULARITY) {
1238                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1239                             I40E_QOS_BW_GRANULARITY);
1240                 return -EINVAL;
1241         }
1242
1243         bw /= I40E_QOS_BW_GRANULARITY;
1244
1245         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1246                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1247                             I40E_MAX_TRAFFIC_CLASS);
1248                 return -EINVAL;
1249         }
1250
1251         hw = I40E_VSI_TO_HW(vsi);
1252
1253         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1254                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1255                             vf_id, tc_no);
1256                 return -EINVAL;
1257         }
1258
1259         /* No change. */
1260         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1261                 PMD_DRV_LOG(INFO,
1262                             "No change for TC max bandwidth. Nothing to do.");
1263                 return 0;
1264         }
1265
1266         /**
1267          * VF bandwidth limitation and TC bandwidth limitation cannot be
1268          * enabled in parallel, disable VF bandwidth limitation if it's
1269          * enabled.
1270          * If bw is 0, means disable bandwidth limitation. Then no need to
1271          * care about VF bandwidth limitation configuration.
1272          */
1273         if (bw && vsi->bw_info.bw_limit) {
1274                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1275                 if (ret) {
1276                         PMD_DRV_LOG(ERR,
1277                                     "Failed to disable VF(%d)"
1278                                     " bandwidth limitation, err(%d).",
1279                                     vf_id, ret);
1280                         return -EINVAL;
1281                 }
1282
1283                 PMD_DRV_LOG(INFO,
1284                             "VF max bandwidth is disabled according"
1285                             " to TC max bandwidth setting.");
1286         }
1287
1288         /**
1289          * Get all the TCs' info to create a whole picture.
1290          * Because the incremental change isn't permitted.
1291          */
1292         memset(&tc_bw, 0, sizeof(tc_bw));
1293         tc_bw.tc_valid_bits = vsi->enabled_tc;
1294         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295                 if (vsi->enabled_tc & BIT_ULL(i)) {
1296                         tc_bw.tc_bw_credits[i] =
1297                                 rte_cpu_to_le_16(
1298                                         vsi->bw_info.bw_ets_credits[i]);
1299                 }
1300         }
1301         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1302
1303         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1304         if (ret) {
1305                 PMD_DRV_LOG(ERR,
1306                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1307                             vf_id, tc_no, ret);
1308                 return -EINVAL;
1309         }
1310
1311         /* Store the configuration. */
1312         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1313
1314         return 0;
1315 }
1316
1317 int
1318 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct i40e_pf *pf;
1322         struct i40e_vsi *vsi;
1323         struct i40e_veb *veb;
1324         struct i40e_hw *hw;
1325         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1326         int i;
1327         int ret;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1330
1331         dev = &rte_eth_devices[port];
1332
1333         if (!is_i40e_supported(dev))
1334                 return -ENOTSUP;
1335
1336         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337
1338         vsi = pf->main_vsi;
1339         if (!vsi) {
1340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1341                 return -EINVAL;
1342         }
1343
1344         veb = vsi->veb;
1345         if (!veb) {
1346                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1347                 return -EINVAL;
1348         }
1349
1350         if ((tc_map & veb->enabled_tc) != tc_map) {
1351                 PMD_DRV_LOG(ERR,
1352                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1353                             veb->enabled_tc);
1354                 return -EINVAL;
1355         }
1356
1357         if (tc_map == veb->strict_prio_tc) {
1358                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1359                 return 0;
1360         }
1361
1362         hw = I40E_VSI_TO_HW(vsi);
1363
1364         /* Disable DCBx if it's the first time to set strict priority. */
1365         if (!veb->strict_prio_tc) {
1366                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1367                 if (ret)
1368                         PMD_DRV_LOG(INFO,
1369                                     "Failed to disable DCBx as it's already"
1370                                     " disabled.");
1371                 else
1372                         PMD_DRV_LOG(INFO,
1373                                     "DCBx is disabled according to strict"
1374                                     " priority setting.");
1375         }
1376
1377         memset(&ets_data, 0, sizeof(ets_data));
1378         ets_data.tc_valid_bits = veb->enabled_tc;
1379         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1380         ets_data.tc_strict_priority_flags = tc_map;
1381         /* Get all TCs' bandwidth. */
1382         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383                 if (veb->enabled_tc & BIT_ULL(i)) {
1384                         /* For rubust, if bandwidth is 0, use 1 instead. */
1385                         if (veb->bw_info.bw_ets_share_credits[i])
1386                                 ets_data.tc_bw_share_credits[i] =
1387                                         veb->bw_info.bw_ets_share_credits[i];
1388                         else
1389                                 ets_data.tc_bw_share_credits[i] =
1390                                         I40E_QOS_BW_WEIGHT_MIN;
1391                 }
1392         }
1393
1394         if (!veb->strict_prio_tc)
1395                 ret = i40e_aq_config_switch_comp_ets(
1396                         hw, veb->uplink_seid,
1397                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1398                         NULL);
1399         else if (tc_map)
1400                 ret = i40e_aq_config_switch_comp_ets(
1401                         hw, veb->uplink_seid,
1402                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1403                         NULL);
1404         else
1405                 ret = i40e_aq_config_switch_comp_ets(
1406                         hw, veb->uplink_seid,
1407                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1408                         NULL);
1409
1410         if (ret) {
1411                 PMD_DRV_LOG(ERR,
1412                             "Failed to set TCs' strict priority mode."
1413                             " err (%d)", ret);
1414                 return -EINVAL;
1415         }
1416
1417         veb->strict_prio_tc = tc_map;
1418
1419         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1420         if (!tc_map) {
1421                 ret = i40e_aq_start_lldp(hw, NULL);
1422                 if (ret) {
1423                         PMD_DRV_LOG(ERR,
1424                                     "Failed to enable DCBx, err(%d).", ret);
1425                         return -EINVAL;
1426                 }
1427
1428                 PMD_DRV_LOG(INFO,
1429                             "DCBx is enabled again according to strict"
1430                             " priority setting.");
1431         }
1432
1433         return ret;
1434 }
1435
1436 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1437 #define I40E_MAX_PROFILE_NUM 16
1438
1439 static void
1440 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1441                                uint32_t track_id, uint8_t *profile_info_sec,
1442                                bool add)
1443 {
1444         struct i40e_profile_section_header *sec = NULL;
1445         struct i40e_profile_info *pinfo;
1446
1447         sec = (struct i40e_profile_section_header *)profile_info_sec;
1448         sec->tbl_size = 1;
1449         sec->data_end = sizeof(struct i40e_profile_section_header) +
1450                 sizeof(struct i40e_profile_info);
1451         sec->section.type = SECTION_TYPE_INFO;
1452         sec->section.offset = sizeof(struct i40e_profile_section_header);
1453         sec->section.size = sizeof(struct i40e_profile_info);
1454         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1455                                              sec->section.offset);
1456         pinfo->track_id = track_id;
1457         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1458         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1459         if (add)
1460                 pinfo->op = I40E_DDP_ADD_TRACKID;
1461         else
1462                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1463 }
1464
1465 static enum i40e_status_code
1466 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1467 {
1468         enum i40e_status_code status = I40E_SUCCESS;
1469         struct i40e_profile_section_header *sec;
1470         uint32_t track_id;
1471         uint32_t offset = 0;
1472         uint32_t info = 0;
1473
1474         sec = (struct i40e_profile_section_header *)profile_info_sec;
1475         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1476                                          sec->section.offset))->track_id;
1477
1478         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1479                                    track_id, &offset, &info, NULL);
1480         if (status)
1481                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1482                             "offset %d, info %d",
1483                             offset, info);
1484
1485         return status;
1486 }
1487
1488 /* Check if the profile info exists */
1489 static int
1490 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1491 {
1492         struct rte_eth_dev *dev = &rte_eth_devices[port];
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint8_t *buff;
1495         struct rte_pmd_i40e_profile_list *p_list;
1496         struct rte_pmd_i40e_profile_info *pinfo, *p;
1497         uint32_t i;
1498         int ret;
1499         static const uint32_t group_mask = 0x00ff0000;
1500
1501         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1502                              sizeof(struct i40e_profile_section_header));
1503         if (pinfo->track_id == 0) {
1504                 PMD_DRV_LOG(INFO, "Read-only profile.");
1505                 return 0;
1506         }
1507         buff = rte_zmalloc("pinfo_list",
1508                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1509                            0);
1510         if (!buff) {
1511                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1512                 return -1;
1513         }
1514
1515         ret = i40e_aq_get_ddp_list(
1516                 hw, (void *)buff,
1517                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1518                 0, NULL);
1519         if (ret) {
1520                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1521                 rte_free(buff);
1522                 return -1;
1523         }
1524         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1525         for (i = 0; i < p_list->p_count; i++) {
1526                 p = &p_list->p_info[i];
1527                 if (pinfo->track_id == p->track_id) {
1528                         PMD_DRV_LOG(INFO, "Profile exists.");
1529                         rte_free(buff);
1530                         return 1;
1531                 }
1532         }
1533         /* profile with group id 0xff is compatible with any other profile */
1534         if ((pinfo->track_id & group_mask) == group_mask) {
1535                 rte_free(buff);
1536                 return 0;
1537         }
1538         for (i = 0; i < p_list->p_count; i++) {
1539                 p = &p_list->p_info[i];
1540                 if ((p->track_id & group_mask) == 0) {
1541                         PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1542                         rte_free(buff);
1543                         return 2;
1544                 }
1545         }
1546         for (i = 0; i < p_list->p_count; i++) {
1547                 p = &p_list->p_info[i];
1548                 if ((p->track_id & group_mask) == group_mask)
1549                         continue;
1550                 if ((pinfo->track_id & group_mask) !=
1551                     (p->track_id & group_mask)) {
1552                         PMD_DRV_LOG(INFO, "Profile of different group exists.");
1553                         rte_free(buff);
1554                         return 3;
1555                 }
1556         }
1557
1558         rte_free(buff);
1559         return 0;
1560 }
1561
1562 int
1563 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1564                                  uint32_t size,
1565                                  enum rte_pmd_i40e_package_op op)
1566 {
1567         struct rte_eth_dev *dev;
1568         struct i40e_hw *hw;
1569         struct i40e_package_header *pkg_hdr;
1570         struct i40e_generic_seg_header *profile_seg_hdr;
1571         struct i40e_generic_seg_header *metadata_seg_hdr;
1572         uint32_t track_id;
1573         uint8_t *profile_info_sec;
1574         int is_exist;
1575         enum i40e_status_code status = I40E_SUCCESS;
1576         static const uint32_t type_mask = 0xff000000;
1577
1578         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1579                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1580                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1581                 PMD_DRV_LOG(ERR, "Operation not supported.");
1582                 return -ENOTSUP;
1583         }
1584
1585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1586
1587         dev = &rte_eth_devices[port];
1588
1589         if (!is_i40e_supported(dev))
1590                 return -ENOTSUP;
1591
1592         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593
1594         if (size < (sizeof(struct i40e_package_header) +
1595                     sizeof(struct i40e_metadata_segment) +
1596                     sizeof(uint32_t) * 2)) {
1597                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1598                 return -EINVAL;
1599         }
1600
1601         pkg_hdr = (struct i40e_package_header *)buff;
1602
1603         if (!pkg_hdr) {
1604                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1605                 return -EINVAL;
1606         }
1607
1608         if (pkg_hdr->segment_count < 2) {
1609                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1610                 return -EINVAL;
1611         }
1612
1613         /* Find metadata segment */
1614         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1615                                                         pkg_hdr);
1616         if (!metadata_seg_hdr) {
1617                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1618                 return -EINVAL;
1619         }
1620         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1621         if (track_id == I40E_DDP_TRACKID_INVALID) {
1622                 PMD_DRV_LOG(ERR, "Invalid track_id");
1623                 return -EINVAL;
1624         }
1625
1626         /* force read-only track_id for type 0 */
1627         if ((track_id & type_mask) == 0)
1628                 track_id = 0;
1629
1630         /* Find profile segment */
1631         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1632                                                        pkg_hdr);
1633         if (!profile_seg_hdr) {
1634                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1635                 return -EINVAL;
1636         }
1637
1638         profile_info_sec = rte_zmalloc(
1639                 "i40e_profile_info",
1640                 sizeof(struct i40e_profile_section_header) +
1641                 sizeof(struct i40e_profile_info),
1642                 0);
1643         if (!profile_info_sec) {
1644                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1645                 return -EINVAL;
1646         }
1647
1648         /* Check if the profile already loaded */
1649         i40e_generate_profile_info_sec(
1650                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1651                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1652                 track_id, profile_info_sec,
1653                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1654         is_exist = i40e_check_profile_info(port, profile_info_sec);
1655         if (is_exist < 0) {
1656                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1657                 rte_free(profile_info_sec);
1658                 return -EINVAL;
1659         }
1660
1661         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1662                 if (is_exist) {
1663                         if (is_exist == 1)
1664                                 PMD_DRV_LOG(ERR, "Profile already exists.");
1665                         else if (is_exist == 2)
1666                                 PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1667                         else if (is_exist == 3)
1668                                 PMD_DRV_LOG(ERR, "Profile of different group already exists");
1669                         rte_free(profile_info_sec);
1670                         return -EEXIST;
1671                 }
1672         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1673                 if (is_exist != 1) {
1674                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1675                         rte_free(profile_info_sec);
1676                         return -EACCES;
1677                 }
1678         }
1679
1680         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1681                 status = i40e_rollback_profile(
1682                         hw,
1683                         (struct i40e_profile_segment *)profile_seg_hdr,
1684                         track_id);
1685                 if (status) {
1686                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1687                         rte_free(profile_info_sec);
1688                         return status;
1689                 }
1690         } else {
1691                 status = i40e_write_profile(
1692                         hw,
1693                         (struct i40e_profile_segment *)profile_seg_hdr,
1694                         track_id);
1695                 if (status) {
1696                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1697                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1698                         else
1699                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1700                         rte_free(profile_info_sec);
1701                         return status;
1702                 }
1703         }
1704
1705         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1706                 /* Modify loaded profiles info list */
1707                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1708                 if (status) {
1709                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1710                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1711                         else
1712                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1713                 }
1714         }
1715
1716         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1717             op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1718                 i40e_update_customized_info(dev, buff, size, op);
1719
1720         rte_free(profile_info_sec);
1721         return status;
1722 }
1723
1724 /* Get number of tvl records in the section */
1725 static unsigned int
1726 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1727 {
1728         unsigned int i, nb_rec, nb_tlv = 0;
1729         struct i40e_profile_tlv_section_record *tlv;
1730
1731         if (!sec)
1732                 return nb_tlv;
1733
1734         /* get number of records in the section */
1735         nb_rec = sec->section.size /
1736                                 sizeof(struct i40e_profile_tlv_section_record);
1737         for (i = 0; i < nb_rec; ) {
1738                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1739                 i += tlv->len;
1740                 nb_tlv++;
1741         }
1742         return nb_tlv;
1743 }
1744
1745 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1746         uint8_t *info_buff, uint32_t info_size,
1747         enum rte_pmd_i40e_package_info type)
1748 {
1749         uint32_t ret_size;
1750         struct i40e_package_header *pkg_hdr;
1751         struct i40e_generic_seg_header *i40e_seg_hdr;
1752         struct i40e_generic_seg_header *note_seg_hdr;
1753         struct i40e_generic_seg_header *metadata_seg_hdr;
1754
1755         if (!info_buff) {
1756                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1757                 return -EINVAL;
1758         }
1759
1760         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1761                 sizeof(struct i40e_metadata_segment) +
1762                 sizeof(uint32_t) * 2)) {
1763                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1764                 return -EINVAL;
1765         }
1766
1767         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1768         if (pkg_hdr->segment_count < 2) {
1769                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1770                 return -EINVAL;
1771         }
1772
1773         /* Find metadata segment */
1774         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1775                 pkg_hdr);
1776
1777         /* Find global notes segment */
1778         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1779                 pkg_hdr);
1780
1781         /* Find i40e profile segment */
1782         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1783
1784         /* get global header info */
1785         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1786                 struct rte_pmd_i40e_profile_info *info =
1787                         (struct rte_pmd_i40e_profile_info *)info_buff;
1788
1789                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1790                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1791                         return -EINVAL;
1792                 }
1793
1794                 if (!metadata_seg_hdr) {
1795                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1796                         return -EINVAL;
1797                 }
1798
1799                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1800                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1801                 info->track_id =
1802                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1803
1804                 memcpy(info->name,
1805                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1806                         I40E_DDP_NAME_SIZE);
1807                 memcpy(&info->version,
1808                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1809                         sizeof(struct i40e_ddp_version));
1810                 return I40E_SUCCESS;
1811         }
1812
1813         /* get global note size */
1814         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1815                 if (info_size < sizeof(uint32_t)) {
1816                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1817                         return -EINVAL;
1818                 }
1819                 if (note_seg_hdr == NULL)
1820                         ret_size = 0;
1821                 else
1822                         ret_size = note_seg_hdr->size;
1823                 *(uint32_t *)info_buff = ret_size;
1824                 return I40E_SUCCESS;
1825         }
1826
1827         /* get global note */
1828         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1829                 if (note_seg_hdr == NULL)
1830                         return -ENOTSUP;
1831                 if (info_size < note_seg_hdr->size) {
1832                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1833                         return -EINVAL;
1834                 }
1835                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1836                 return I40E_SUCCESS;
1837         }
1838
1839         /* get i40e segment header info */
1840         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1841                 struct rte_pmd_i40e_profile_info *info =
1842                         (struct rte_pmd_i40e_profile_info *)info_buff;
1843
1844                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1845                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1846                         return -EINVAL;
1847                 }
1848
1849                 if (!metadata_seg_hdr) {
1850                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1851                         return -EINVAL;
1852                 }
1853
1854                 if (!i40e_seg_hdr) {
1855                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1856                         return -EINVAL;
1857                 }
1858
1859                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1860                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1861                 info->track_id =
1862                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1863
1864                 memcpy(info->name,
1865                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1866                         I40E_DDP_NAME_SIZE);
1867                 memcpy(&info->version,
1868                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1869                         sizeof(struct i40e_ddp_version));
1870                 return I40E_SUCCESS;
1871         }
1872
1873         /* get number of devices */
1874         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1875                 if (info_size < sizeof(uint32_t)) {
1876                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1877                         return -EINVAL;
1878                 }
1879                 *(uint32_t *)info_buff =
1880                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1881                 return I40E_SUCCESS;
1882         }
1883
1884         /* get list of devices */
1885         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1886                 uint32_t dev_num;
1887                 dev_num =
1888                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1889                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1890                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1891                         return -EINVAL;
1892                 }
1893                 memcpy(info_buff,
1894                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1895                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1896                 return I40E_SUCCESS;
1897         }
1898
1899         /* get number of protocols */
1900         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1901                 struct i40e_profile_section_header *proto;
1902
1903                 if (info_size < sizeof(uint32_t)) {
1904                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1905                         return -EINVAL;
1906                 }
1907                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1908                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1909                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1910                 return I40E_SUCCESS;
1911         }
1912
1913         /* get list of protocols */
1914         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1915                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1916                 struct rte_pmd_i40e_proto_info *pinfo;
1917                 struct i40e_profile_section_header *proto;
1918                 struct i40e_profile_tlv_section_record *tlv;
1919
1920                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1921                 nb_proto_info = info_size /
1922                                         sizeof(struct rte_pmd_i40e_proto_info);
1923                 for (i = 0; i < nb_proto_info; i++) {
1924                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1925                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1926                 }
1927                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1928                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1929                 nb_tlv = i40e_get_tlv_section_size(proto);
1930                 if (nb_tlv == 0)
1931                         return I40E_SUCCESS;
1932                 if (nb_proto_info < nb_tlv) {
1933                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1934                         return -EINVAL;
1935                 }
1936                 /* get number of records in the section */
1937                 nb_rec = proto->section.size /
1938                                 sizeof(struct i40e_profile_tlv_section_record);
1939                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1940                 for (i = j = 0; i < nb_rec; j++) {
1941                         pinfo[j].proto_id = tlv->data[0];
1942                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1943                                  (const char *)&tlv->data[1]);
1944                         i += tlv->len;
1945                         tlv = &tlv[tlv->len];
1946                 }
1947                 return I40E_SUCCESS;
1948         }
1949
1950         /* get number of packet classification types */
1951         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1952                 struct i40e_profile_section_header *pctype;
1953
1954                 if (info_size < sizeof(uint32_t)) {
1955                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1956                         return -EINVAL;
1957                 }
1958                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1959                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1960                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1961                 return I40E_SUCCESS;
1962         }
1963
1964         /* get list of packet classification types */
1965         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1966                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1967                 struct rte_pmd_i40e_ptype_info *pinfo;
1968                 struct i40e_profile_section_header *pctype;
1969                 struct i40e_profile_tlv_section_record *tlv;
1970
1971                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1972                 nb_proto_info = info_size /
1973                                         sizeof(struct rte_pmd_i40e_ptype_info);
1974                 for (i = 0; i < nb_proto_info; i++)
1975                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1976                                sizeof(struct rte_pmd_i40e_ptype_info));
1977                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1978                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1979                 nb_tlv = i40e_get_tlv_section_size(pctype);
1980                 if (nb_tlv == 0)
1981                         return I40E_SUCCESS;
1982                 if (nb_proto_info < nb_tlv) {
1983                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1984                         return -EINVAL;
1985                 }
1986
1987                 /* get number of records in the section */
1988                 nb_rec = pctype->section.size /
1989                                 sizeof(struct i40e_profile_tlv_section_record);
1990                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1991                 for (i = j = 0; i < nb_rec; j++) {
1992                         memcpy(&pinfo[j], tlv->data,
1993                                sizeof(struct rte_pmd_i40e_ptype_info));
1994                         i += tlv->len;
1995                         tlv = &tlv[tlv->len];
1996                 }
1997                 return I40E_SUCCESS;
1998         }
1999
2000         /* get number of packet types */
2001         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2002                 struct i40e_profile_section_header *ptype;
2003
2004                 if (info_size < sizeof(uint32_t)) {
2005                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2006                         return -EINVAL;
2007                 }
2008                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2009                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2010                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2011                 return I40E_SUCCESS;
2012         }
2013
2014         /* get list of packet types */
2015         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2016                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2017                 struct rte_pmd_i40e_ptype_info *pinfo;
2018                 struct i40e_profile_section_header *ptype;
2019                 struct i40e_profile_tlv_section_record *tlv;
2020
2021                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2022                 nb_proto_info = info_size /
2023                                         sizeof(struct rte_pmd_i40e_ptype_info);
2024                 for (i = 0; i < nb_proto_info; i++)
2025                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2026                                sizeof(struct rte_pmd_i40e_ptype_info));
2027                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2028                                 (struct i40e_profile_segment *)i40e_seg_hdr);
2029                 nb_tlv = i40e_get_tlv_section_size(ptype);
2030                 if (nb_tlv == 0)
2031                         return I40E_SUCCESS;
2032                 if (nb_proto_info < nb_tlv) {
2033                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
2034                         return -EINVAL;
2035                 }
2036                 /* get number of records in the section */
2037                 nb_rec = ptype->section.size /
2038                                 sizeof(struct i40e_profile_tlv_section_record);
2039                 for (i = j = 0; i < nb_rec; j++) {
2040                         tlv = (struct i40e_profile_tlv_section_record *)
2041                                                                 &ptype[1 + i];
2042                         memcpy(&pinfo[j], tlv->data,
2043                                sizeof(struct rte_pmd_i40e_ptype_info));
2044                         i += tlv->len;
2045                 }
2046                 return I40E_SUCCESS;
2047         }
2048
2049         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2050         return -EINVAL;
2051 }
2052
2053 int
2054 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2055 {
2056         struct rte_eth_dev *dev;
2057         struct i40e_hw *hw;
2058         enum i40e_status_code status = I40E_SUCCESS;
2059
2060         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2061
2062         dev = &rte_eth_devices[port];
2063
2064         if (!is_i40e_supported(dev))
2065                 return -ENOTSUP;
2066
2067         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2068                 return -EINVAL;
2069
2070         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071
2072         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2073                                       size, 0, NULL);
2074
2075         return status;
2076 }
2077
2078 static int check_invalid_pkt_type(uint32_t pkt_type)
2079 {
2080         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2081
2082         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2083         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2084         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2085         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2086         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2087         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2088         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2089
2090         if (l2 &&
2091             l2 != RTE_PTYPE_L2_ETHER &&
2092             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2093             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2094             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2095             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2096             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2097             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2098             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2099                 return -1;
2100
2101         if (l3 &&
2102             l3 != RTE_PTYPE_L3_IPV4 &&
2103             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2104             l3 != RTE_PTYPE_L3_IPV6 &&
2105             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2106             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2107             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2108                 return -1;
2109
2110         if (l4 &&
2111             l4 != RTE_PTYPE_L4_TCP &&
2112             l4 != RTE_PTYPE_L4_UDP &&
2113             l4 != RTE_PTYPE_L4_FRAG &&
2114             l4 != RTE_PTYPE_L4_SCTP &&
2115             l4 != RTE_PTYPE_L4_ICMP &&
2116             l4 != RTE_PTYPE_L4_NONFRAG)
2117                 return -1;
2118
2119         if (tnl &&
2120             tnl != RTE_PTYPE_TUNNEL_IP &&
2121             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2122             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2123             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2124             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2125             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2126             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2127             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2128             tnl != RTE_PTYPE_TUNNEL_L2TP)
2129                 return -1;
2130
2131         if (il2 &&
2132             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2133             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2134             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2135                 return -1;
2136
2137         if (il3 &&
2138             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2139             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2140             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2141             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2142             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2143             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2144                 return -1;
2145
2146         if (il4 &&
2147             il4 != RTE_PTYPE_INNER_L4_TCP &&
2148             il4 != RTE_PTYPE_INNER_L4_UDP &&
2149             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2150             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2151             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2152             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2153                 return -1;
2154
2155         return 0;
2156 }
2157
2158 static int check_invalid_ptype_mapping(
2159                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2160                 uint16_t count)
2161 {
2162         int i;
2163
2164         for (i = 0; i < count; i++) {
2165                 uint16_t ptype = mapping_table[i].hw_ptype;
2166                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2167
2168                 if (ptype >= I40E_MAX_PKT_TYPE)
2169                         return -1;
2170
2171                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2172                         continue;
2173
2174                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2175                         continue;
2176
2177                 if (check_invalid_pkt_type(pkt_type))
2178                         return -1;
2179         }
2180
2181         return 0;
2182 }
2183
2184 int
2185 rte_pmd_i40e_ptype_mapping_update(
2186                         uint16_t port,
2187                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2188                         uint16_t count,
2189                         uint8_t exclusive)
2190 {
2191         struct rte_eth_dev *dev;
2192         struct i40e_adapter *ad;
2193         int i;
2194
2195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2196
2197         dev = &rte_eth_devices[port];
2198
2199         if (!is_i40e_supported(dev))
2200                 return -ENOTSUP;
2201
2202         if (count > I40E_MAX_PKT_TYPE)
2203                 return -EINVAL;
2204
2205         if (check_invalid_ptype_mapping(mapping_items, count))
2206                 return -EINVAL;
2207
2208         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2209
2210         if (exclusive) {
2211                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2212                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2213         }
2214
2215         for (i = 0; i < count; i++)
2216                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2217                         = mapping_items[i].sw_ptype;
2218
2219         return 0;
2220 }
2221
2222 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2223 {
2224         struct rte_eth_dev *dev;
2225
2226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2227
2228         dev = &rte_eth_devices[port];
2229
2230         if (!is_i40e_supported(dev))
2231                 return -ENOTSUP;
2232
2233         i40e_set_default_ptype_table(dev);
2234
2235         return 0;
2236 }
2237
2238 int rte_pmd_i40e_ptype_mapping_get(
2239                         uint16_t port,
2240                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2241                         uint16_t size,
2242                         uint16_t *count,
2243                         uint8_t valid_only)
2244 {
2245         struct rte_eth_dev *dev;
2246         struct i40e_adapter *ad;
2247         int n = 0;
2248         uint16_t i;
2249
2250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2251
2252         dev = &rte_eth_devices[port];
2253
2254         if (!is_i40e_supported(dev))
2255                 return -ENOTSUP;
2256
2257         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2258
2259         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2260                 if (n >= size)
2261                         break;
2262                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2263                         continue;
2264                 mapping_items[n].hw_ptype = i;
2265                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2266                 n++;
2267         }
2268
2269         *count = n;
2270         return 0;
2271 }
2272
2273 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2274                                        uint32_t target,
2275                                        uint8_t mask,
2276                                        uint32_t pkt_type)
2277 {
2278         struct rte_eth_dev *dev;
2279         struct i40e_adapter *ad;
2280         uint16_t i;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2283
2284         dev = &rte_eth_devices[port];
2285
2286         if (!is_i40e_supported(dev))
2287                 return -ENOTSUP;
2288
2289         if (!mask && check_invalid_pkt_type(target))
2290                 return -EINVAL;
2291
2292         if (check_invalid_pkt_type(pkt_type))
2293                 return -EINVAL;
2294
2295         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2296
2297         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2298                 if (mask) {
2299                         if ((target | ad->ptype_tbl[i]) == target &&
2300                             (target & ad->ptype_tbl[i]))
2301                                 ad->ptype_tbl[i] = pkt_type;
2302                 } else {
2303                         if (ad->ptype_tbl[i] == target)
2304                                 ad->ptype_tbl[i] = pkt_type;
2305                 }
2306         }
2307
2308         return 0;
2309 }
2310
2311 int
2312 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2313                              struct ether_addr *mac_addr)
2314 {
2315         struct rte_eth_dev *dev;
2316         struct i40e_pf_vf *vf;
2317         struct i40e_vsi *vsi;
2318         struct i40e_pf *pf;
2319         struct i40e_mac_filter_info mac_filter;
2320         int ret;
2321
2322         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2323                 return -EINVAL;
2324
2325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2326
2327         dev = &rte_eth_devices[port];
2328
2329         if (!is_i40e_supported(dev))
2330                 return -ENOTSUP;
2331
2332         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2333
2334         if (vf_id >= pf->vf_num || !pf->vfs)
2335                 return -EINVAL;
2336
2337         vf = &pf->vfs[vf_id];
2338         vsi = vf->vsi;
2339         if (!vsi) {
2340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2341                 return -EINVAL;
2342         }
2343
2344         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2345         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2346         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2347         if (ret != I40E_SUCCESS) {
2348                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2349                 return -1;
2350         }
2351
2352         return 0;
2353 }
2354
2355 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2356 {
2357         struct rte_eth_dev *dev;
2358
2359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2360
2361         dev = &rte_eth_devices[port];
2362
2363         if (!is_i40e_supported(dev))
2364                 return -ENOTSUP;
2365
2366         i40e_set_default_pctype_table(dev);
2367
2368         return 0;
2369 }
2370
2371 int rte_pmd_i40e_flow_type_mapping_get(
2372                         uint16_t port,
2373                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2374 {
2375         struct rte_eth_dev *dev;
2376         struct i40e_adapter *ad;
2377         uint16_t i;
2378
2379         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2380
2381         dev = &rte_eth_devices[port];
2382
2383         if (!is_i40e_supported(dev))
2384                 return -ENOTSUP;
2385
2386         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2387
2388         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2389                 mapping_items[i].flow_type = i;
2390                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2391         }
2392
2393         return 0;
2394 }
2395
2396 int
2397 rte_pmd_i40e_flow_type_mapping_update(
2398                         uint16_t port,
2399                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2400                         uint16_t count,
2401                         uint8_t exclusive)
2402 {
2403         struct rte_eth_dev *dev;
2404         struct i40e_adapter *ad;
2405         int i;
2406
2407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2408
2409         dev = &rte_eth_devices[port];
2410
2411         if (!is_i40e_supported(dev))
2412                 return -ENOTSUP;
2413
2414         if (count > I40E_FLOW_TYPE_MAX)
2415                 return -EINVAL;
2416
2417         for (i = 0; i < count; i++)
2418                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2419                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2420                     (mapping_items[i].pctype &
2421                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2422                         return -EINVAL;
2423
2424         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2425
2426         if (exclusive) {
2427                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2428                         ad->pctypes_tbl[i] = 0ULL;
2429                 ad->flow_types_mask = 0ULL;
2430         }
2431
2432         for (i = 0; i < count; i++) {
2433                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2434                                                 mapping_items[i].pctype;
2435                 if (mapping_items[i].pctype)
2436                         ad->flow_types_mask |=
2437                                         (1ULL << mapping_items[i].flow_type);
2438                 else
2439                         ad->flow_types_mask &=
2440                                         ~(1ULL << mapping_items[i].flow_type);
2441         }
2442
2443         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2444                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2445
2446         return 0;
2447 }
2448
2449 int
2450 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2451 {
2452         struct rte_eth_dev *dev;
2453         struct ether_addr *mac;
2454         struct i40e_pf *pf;
2455         int vf_id;
2456         struct i40e_pf_vf *vf;
2457         uint16_t vf_num;
2458
2459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2460         dev = &rte_eth_devices[port];
2461
2462         if (!is_i40e_supported(dev))
2463                 return -ENOTSUP;
2464
2465         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2466         vf_num = pf->vf_num;
2467
2468         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2469                 vf = &pf->vfs[vf_id];
2470                 mac = &vf->mac_addr;
2471
2472                 if (is_same_ether_addr(mac, vf_mac))
2473                         return vf_id;
2474         }
2475
2476         return -EINVAL;
2477 }
2478
2479 static int
2480 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2481                               struct i40e_pf *pf)
2482 {
2483         uint16_t i;
2484         struct i40e_vsi *vsi = pf->main_vsi;
2485         uint16_t queue_offset, bsf, tc_index;
2486         struct i40e_vsi_context ctxt;
2487         struct i40e_aqc_vsi_properties_data *vsi_info;
2488         struct i40e_queue_regions *region_info =
2489                                 &pf->queue_region;
2490         int32_t ret = -EINVAL;
2491
2492         if (!region_info->queue_region_number) {
2493                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2494                 return ret;
2495         }
2496
2497         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2498
2499         /* Update Queue Pairs Mapping for currently enabled UPs */
2500         ctxt.seid = vsi->seid;
2501         ctxt.pf_num = hw->pf_id;
2502         ctxt.vf_num = 0;
2503         ctxt.uplink_seid = vsi->uplink_seid;
2504         ctxt.info = vsi->info;
2505         vsi_info = &ctxt.info;
2506
2507         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2508         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2509
2510         /* Configure queue region and queue mapping parameters,
2511          * for enabled queue region, allocate queues to this region.
2512          */
2513
2514         for (i = 0; i < region_info->queue_region_number; i++) {
2515                 tc_index = region_info->region[i].region_id;
2516                 bsf = rte_bsf32(region_info->region[i].queue_num);
2517                 queue_offset = region_info->region[i].queue_start_index;
2518                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2519                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2520                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2521         }
2522
2523         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2524         vsi_info->mapping_flags |=
2525                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2526         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2527         vsi_info->valid_sections |=
2528                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2529
2530         /* Update the VSI after updating the VSI queue-mapping information */
2531         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2532         if (ret) {
2533                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2534                                 hw->aq.asq_last_status);
2535                 return ret;
2536         }
2537         /* update the local VSI info with updated queue map */
2538         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2539                                         sizeof(vsi->info.tc_mapping));
2540         rte_memcpy(&vsi->info.queue_mapping,
2541                         &ctxt.info.queue_mapping,
2542                         sizeof(vsi->info.queue_mapping));
2543         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2544         vsi->info.valid_sections = 0;
2545
2546         return 0;
2547 }
2548
2549
2550 static int
2551 i40e_queue_region_set_region(struct i40e_pf *pf,
2552                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2553 {
2554         uint16_t i;
2555         struct i40e_vsi *main_vsi = pf->main_vsi;
2556         struct i40e_queue_regions *info = &pf->queue_region;
2557         int32_t ret = -EINVAL;
2558
2559         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2560                                 conf_ptr->queue_num <= 64)) {
2561                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2562                         "total number of queues do not exceed the VSI allocation");
2563                 return ret;
2564         }
2565
2566         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2567                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2568                 return ret;
2569         }
2570
2571         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2572                                         > main_vsi->nb_used_qps) {
2573                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2574                 return ret;
2575         }
2576
2577         for (i = 0; i < info->queue_region_number; i++)
2578                 if (conf_ptr->region_id == info->region[i].region_id)
2579                         break;
2580
2581         if (i == info->queue_region_number &&
2582                                 i <= I40E_REGION_MAX_INDEX) {
2583                 info->region[i].region_id = conf_ptr->region_id;
2584                 info->region[i].queue_num = conf_ptr->queue_num;
2585                 info->region[i].queue_start_index =
2586                         conf_ptr->queue_start_index;
2587                 info->queue_region_number++;
2588         } else {
2589                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2590                 return ret;
2591         }
2592
2593         return 0;
2594 }
2595
2596 static int
2597 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2598                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2599 {
2600         int32_t ret = -EINVAL;
2601         struct i40e_queue_regions *info = &pf->queue_region;
2602         uint16_t i, j;
2603         uint16_t region_index, flowtype_index;
2604
2605         /* For the pctype or hardware flowtype of packet,
2606          * the specific index for each type has been defined
2607          * in file i40e_type.h as enum i40e_filter_pctype.
2608          */
2609
2610         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2611                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2612                 return ret;
2613         }
2614
2615         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2616                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2617                 return ret;
2618         }
2619
2620
2621         for (i = 0; i < info->queue_region_number; i++)
2622                 if (rss_region_conf->region_id == info->region[i].region_id)
2623                         break;
2624
2625         if (i == info->queue_region_number) {
2626                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2627                 ret = -EINVAL;
2628                 return ret;
2629         }
2630         region_index = i;
2631
2632         for (i = 0; i < info->queue_region_number; i++) {
2633                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2634                         if (rss_region_conf->hw_flowtype ==
2635                                 info->region[i].hw_flowtype[j]) {
2636                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2637                                 return 0;
2638                         }
2639                 }
2640         }
2641
2642         flowtype_index = info->region[region_index].flowtype_num;
2643         info->region[region_index].hw_flowtype[flowtype_index] =
2644                                         rss_region_conf->hw_flowtype;
2645         info->region[region_index].flowtype_num++;
2646
2647         return 0;
2648 }
2649
2650 static void
2651 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2652                                 struct i40e_pf *pf)
2653 {
2654         uint8_t hw_flowtype;
2655         uint32_t pfqf_hregion;
2656         uint16_t i, j, index;
2657         struct i40e_queue_regions *info = &pf->queue_region;
2658
2659         /* For the pctype or hardware flowtype of packet,
2660          * the specific index for each type has been defined
2661          * in file i40e_type.h as enum i40e_filter_pctype.
2662          */
2663
2664         for (i = 0; i < info->queue_region_number; i++) {
2665                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2666                         hw_flowtype = info->region[i].hw_flowtype[j];
2667                         index = hw_flowtype >> 3;
2668                         pfqf_hregion =
2669                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2670
2671                         if ((hw_flowtype & 0x7) == 0) {
2672                                 pfqf_hregion |= info->region[i].region_id <<
2673                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2674                                 pfqf_hregion |= 1 <<
2675                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2676                         } else if ((hw_flowtype & 0x7) == 1) {
2677                                 pfqf_hregion |= info->region[i].region_id  <<
2678                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2679                                 pfqf_hregion |= 1 <<
2680                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2681                         } else if ((hw_flowtype & 0x7) == 2) {
2682                                 pfqf_hregion |= info->region[i].region_id  <<
2683                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2684                                 pfqf_hregion |= 1 <<
2685                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2686                         } else if ((hw_flowtype & 0x7) == 3) {
2687                                 pfqf_hregion |= info->region[i].region_id  <<
2688                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2689                                 pfqf_hregion |= 1 <<
2690                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2691                         } else if ((hw_flowtype & 0x7) == 4) {
2692                                 pfqf_hregion |= info->region[i].region_id  <<
2693                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2694                                 pfqf_hregion |= 1 <<
2695                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2696                         } else if ((hw_flowtype & 0x7) == 5) {
2697                                 pfqf_hregion |= info->region[i].region_id  <<
2698                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2699                                 pfqf_hregion |= 1 <<
2700                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2701                         } else if ((hw_flowtype & 0x7) == 6) {
2702                                 pfqf_hregion |= info->region[i].region_id  <<
2703                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2704                                 pfqf_hregion |= 1 <<
2705                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2706                         } else {
2707                                 pfqf_hregion |= info->region[i].region_id  <<
2708                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2709                                 pfqf_hregion |= 1 <<
2710                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2711                         }
2712
2713                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2714                                                 pfqf_hregion);
2715                 }
2716         }
2717 }
2718
2719 static int
2720 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2721                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2722 {
2723         struct i40e_queue_regions *info = &pf->queue_region;
2724         int32_t ret = -EINVAL;
2725         uint16_t i, j, region_index;
2726
2727         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2728                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2729                 return ret;
2730         }
2731
2732         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2733                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2734                 return ret;
2735         }
2736
2737         for (i = 0; i < info->queue_region_number; i++)
2738                 if (rss_region_conf->region_id == info->region[i].region_id)
2739                         break;
2740
2741         if (i == info->queue_region_number) {
2742                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2743                 ret = -EINVAL;
2744                 return ret;
2745         }
2746
2747         region_index = i;
2748
2749         for (i = 0; i < info->queue_region_number; i++) {
2750                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2751                         if (info->region[i].user_priority[j] ==
2752                                 rss_region_conf->user_priority) {
2753                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2754                                 return 0;
2755                         }
2756                 }
2757         }
2758
2759         j = info->region[region_index].user_priority_num;
2760         info->region[region_index].user_priority[j] =
2761                                         rss_region_conf->user_priority;
2762         info->region[region_index].user_priority_num++;
2763
2764         return 0;
2765 }
2766
2767 static int
2768 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2769                                 struct i40e_pf *pf)
2770 {
2771         struct i40e_dcbx_config dcb_cfg_local;
2772         struct i40e_dcbx_config *dcb_cfg;
2773         struct i40e_queue_regions *info = &pf->queue_region;
2774         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2775         int32_t ret = -EINVAL;
2776         uint16_t i, j, prio_index, region_index;
2777         uint8_t tc_map, tc_bw, bw_lf;
2778
2779         if (!info->queue_region_number) {
2780                 PMD_DRV_LOG(ERR, "No queue region been set before");
2781                 return ret;
2782         }
2783
2784         dcb_cfg = &dcb_cfg_local;
2785         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2786
2787         /* assume each tc has the same bw */
2788         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2789         for (i = 0; i < info->queue_region_number; i++)
2790                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2791         /* to ensure the sum of tcbw is equal to 100 */
2792         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2793         for (i = 0; i < bw_lf; i++)
2794                 dcb_cfg->etscfg.tcbwtable[i]++;
2795
2796         /* assume each tc has the same Transmission Selection Algorithm */
2797         for (i = 0; i < info->queue_region_number; i++)
2798                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2799
2800         for (i = 0; i < info->queue_region_number; i++) {
2801                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2802                         prio_index = info->region[i].user_priority[j];
2803                         region_index = info->region[i].region_id;
2804                         dcb_cfg->etscfg.prioritytable[prio_index] =
2805                                                 region_index;
2806                 }
2807         }
2808
2809         /* FW needs one App to configure HW */
2810         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2811         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2812         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2813         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2814
2815         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2816
2817         dcb_cfg->pfc.willing = 0;
2818         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2819         dcb_cfg->pfc.pfcenable = tc_map;
2820
2821         /* Copy the new config to the current config */
2822         *old_cfg = *dcb_cfg;
2823         old_cfg->etsrec = old_cfg->etscfg;
2824         ret = i40e_set_dcb_config(hw);
2825
2826         if (ret) {
2827                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2828                          i40e_stat_str(hw, ret),
2829                          i40e_aq_str(hw, hw->aq.asq_last_status));
2830                 return ret;
2831         }
2832
2833         return 0;
2834 }
2835
2836 int
2837 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2838         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2839 {
2840         int32_t ret = -EINVAL;
2841         struct i40e_queue_regions *info = &pf->queue_region;
2842         struct i40e_vsi *main_vsi = pf->main_vsi;
2843
2844         if (on) {
2845                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2846
2847                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2848                 if (ret != I40E_SUCCESS) {
2849                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2850                         return ret;
2851                 }
2852
2853                 ret = i40e_queue_region_dcb_configure(hw, pf);
2854                 if (ret != I40E_SUCCESS) {
2855                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2856                         return ret;
2857                 }
2858
2859                 return 0;
2860         }
2861
2862         if (info->queue_region_number) {
2863                 info->queue_region_number = 1;
2864                 info->region[0].queue_num = main_vsi->nb_used_qps;
2865                 info->region[0].queue_start_index = 0;
2866
2867                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2868                 if (ret != I40E_SUCCESS)
2869                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2870
2871                 ret = i40e_dcb_init_configure(dev, TRUE);
2872                 if (ret != I40E_SUCCESS) {
2873                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2874                         pf->flags &= ~I40E_FLAG_DCB;
2875                 }
2876
2877                 i40e_init_queue_region_conf(dev);
2878         }
2879         return 0;
2880 }
2881
2882 static int
2883 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2884 {
2885         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2886         uint64_t hena;
2887
2888         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2889         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2890
2891         if (!hena)
2892                 return -ENOTSUP;
2893
2894         return 0;
2895 }
2896
2897 static int
2898 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2899                 struct i40e_queue_regions *regions_ptr)
2900 {
2901         struct i40e_queue_regions *info = &pf->queue_region;
2902
2903         rte_memcpy(regions_ptr, info,
2904                         sizeof(struct i40e_queue_regions));
2905
2906         return 0;
2907 }
2908
2909 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2910                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2911 {
2912         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2913         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2914         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2915         int32_t ret;
2916
2917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2918
2919         if (!is_i40e_supported(dev))
2920                 return -ENOTSUP;
2921
2922         if (!(!i40e_queue_region_pf_check_rss(pf)))
2923                 return -ENOTSUP;
2924
2925         /* This queue region feature only support pf by now. It should
2926          * be called after dev_start, and will be clear after dev_stop.
2927          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2928          * is just an enable function which server for other configuration,
2929          * it is for all configuration about queue region from up layer,
2930          * at first will only keep in DPDK softwarestored in driver,
2931          * only after "FLUSH_ON", it commit all configuration to HW.
2932          * Because PMD had to set hardware configuration at a time, so
2933          * it will record all up layer command at first.
2934          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2935          * just clean all configuration about queue region just now,
2936          * and restore all to DPDK i40e driver default
2937          * config when start up.
2938          */
2939
2940         switch (op_type) {
2941         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2942                 ret = i40e_queue_region_set_region(pf,
2943                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2944                 break;
2945         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2946                 ret = i40e_queue_region_set_flowtype(pf,
2947                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2948                 break;
2949         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2950                 ret = i40e_queue_region_set_user_priority(pf,
2951                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2952                 break;
2953         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2954                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2955                 break;
2956         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2957                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2958                 break;
2959         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2960                 ret = i40e_queue_region_get_all_info(pf,
2961                                 (struct i40e_queue_regions *)arg);
2962                 break;
2963         default:
2964                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2965                             op_type);
2966                 ret = -EINVAL;
2967         }
2968
2969         I40E_WRITE_FLUSH(hw);
2970
2971         return ret;
2972 }
2973
2974 int rte_pmd_i40e_flow_add_del_packet_template(
2975                         uint16_t port,
2976                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2977                         uint8_t add)
2978 {
2979         struct rte_eth_dev *dev = &rte_eth_devices[port];
2980         struct i40e_fdir_filter_conf filter_conf;
2981
2982         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2983
2984         if (!is_i40e_supported(dev))
2985                 return -ENOTSUP;
2986
2987         memset(&filter_conf, 0, sizeof(filter_conf));
2988         filter_conf.soft_id = conf->soft_id;
2989         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
2990         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
2991         filter_conf.input.flow.raw_flow.length = conf->input.length;
2992         filter_conf.input.flow_ext.pkt_template = true;
2993
2994         filter_conf.action.rx_queue = conf->action.rx_queue;
2995         filter_conf.action.behavior =
2996                 (enum i40e_fdir_behavior)conf->action.behavior;
2997         filter_conf.action.report_status =
2998                 (enum i40e_fdir_status)conf->action.report_status;
2999         filter_conf.action.flex_off = conf->action.flex_off;
3000
3001         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3002 }
3003
3004 int
3005 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
3006                        struct rte_pmd_i40e_inset *inset,
3007                        enum rte_pmd_i40e_inset_type inset_type)
3008 {
3009         struct rte_eth_dev *dev;
3010         struct i40e_hw *hw;
3011         uint64_t inset_reg;
3012         uint32_t mask_reg[2];
3013         int i;
3014
3015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3016
3017         dev = &rte_eth_devices[port];
3018
3019         if (!is_i40e_supported(dev))
3020                 return -ENOTSUP;
3021
3022         if (pctype > 63)
3023                 return -EINVAL;
3024
3025         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3026         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3027
3028         switch (inset_type) {
3029         case INSET_HASH:
3030                 /* Get input set */
3031                 inset_reg =
3032                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3033                 inset_reg <<= I40E_32_BIT_WIDTH;
3034                 inset_reg |=
3035                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3036                 /* Get field mask */
3037                 mask_reg[0] =
3038                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3039                 mask_reg[1] =
3040                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3041                 break;
3042         case INSET_FDIR:
3043                 inset_reg =
3044                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3045                 inset_reg <<= I40E_32_BIT_WIDTH;
3046                 inset_reg |=
3047                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3048                 mask_reg[0] =
3049                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3050                 mask_reg[1] =
3051                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3052                 break;
3053         case INSET_FDIR_FLX:
3054                 inset_reg =
3055                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3056                 mask_reg[0] =
3057                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3058                 mask_reg[1] =
3059                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3060                 break;
3061         default:
3062                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3063                 return -EINVAL;
3064         }
3065
3066         inset->inset = inset_reg;
3067
3068         for (i = 0; i < 2; i++) {
3069                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3070                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3071         }
3072
3073         return 0;
3074 }
3075
3076 int
3077 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3078                        struct rte_pmd_i40e_inset *inset,
3079                        enum rte_pmd_i40e_inset_type inset_type)
3080 {
3081         struct rte_eth_dev *dev;
3082         struct i40e_hw *hw;
3083         struct i40e_pf *pf;
3084         uint64_t inset_reg;
3085         uint32_t mask_reg[2];
3086         int i;
3087
3088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3089
3090         dev = &rte_eth_devices[port];
3091
3092         if (!is_i40e_supported(dev))
3093                 return -ENOTSUP;
3094
3095         if (pctype > 63)
3096                 return -EINVAL;
3097
3098         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3099         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3100
3101         if (pf->support_multi_driver) {
3102                 PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
3103                 return -ENOTSUP;
3104         }
3105
3106         inset_reg = inset->inset;
3107         for (i = 0; i < 2; i++)
3108                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3109                         inset->mask[i].mask;
3110
3111         switch (inset_type) {
3112         case INSET_HASH:
3113                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3114                                             (uint32_t)(inset_reg & UINT32_MAX));
3115                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3116                                             (uint32_t)((inset_reg >>
3117                                              I40E_32_BIT_WIDTH) & UINT32_MAX));
3118                 for (i = 0; i < 2; i++)
3119                         i40e_check_write_global_reg(hw,
3120                                                   I40E_GLQF_HASH_MSK(i, pctype),
3121                                                   mask_reg[i]);
3122                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
3123                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
3124                 break;
3125         case INSET_FDIR:
3126                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3127                                      (uint32_t)(inset_reg & UINT32_MAX));
3128                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3129                                      (uint32_t)((inset_reg >>
3130                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3131                 for (i = 0; i < 2; i++)
3132                         i40e_check_write_global_reg(hw,
3133                                                     I40E_GLQF_FD_MSK(i, pctype),
3134                                                     mask_reg[i]);
3135                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
3136                 break;
3137         case INSET_FDIR_FLX:
3138                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3139                                      (uint32_t)(inset_reg & UINT32_MAX));
3140                 for (i = 0; i < 2; i++)
3141                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3142                                              mask_reg[i]);
3143                 break;
3144         default:
3145                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3146                 return -EINVAL;
3147         }
3148
3149         I40E_WRITE_FLUSH(hw);
3150         return 0;
3151 }