net/i40e: support input set configuration
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_tailq.h>
7
8 #include "base/i40e_prototype.h"
9 #include "base/i40e_dcb.h"
10 #include "i40e_ethdev.h"
11 #include "i40e_pf.h"
12 #include "i40e_rxtx.h"
13 #include "rte_pmd_i40e.h"
14
15 int
16 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
17 {
18         struct rte_eth_dev *dev;
19         struct i40e_pf *pf;
20
21         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
22
23         dev = &rte_eth_devices[port];
24
25         if (!is_i40e_supported(dev))
26                 return -ENOTSUP;
27
28         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
29
30         if (vf >= pf->vf_num || !pf->vfs) {
31                 PMD_DRV_LOG(ERR, "Invalid argument.");
32                 return -EINVAL;
33         }
34
35         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
36
37         return 0;
38 }
39
40 int
41 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
42 {
43         struct rte_eth_dev *dev;
44         struct i40e_pf *pf;
45         struct i40e_vsi *vsi;
46         struct i40e_hw *hw;
47         struct i40e_vsi_context ctxt;
48         int ret;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf_id >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         vsi = pf->vfs[vf_id].vsi;
65         if (!vsi) {
66                 PMD_DRV_LOG(ERR, "Invalid VSI.");
67                 return -EINVAL;
68         }
69
70         /* Check if it has been already on or off */
71         if (vsi->info.valid_sections &
72                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
73                 if (on) {
74                         if ((vsi->info.sec_flags &
75                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
76                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
77                                 return 0; /* already on */
78                 } else {
79                         if ((vsi->info.sec_flags &
80                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
81                                 return 0; /* already off */
82                 }
83         }
84
85         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
86         if (on)
87                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
88         else
89                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
90
91         memset(&ctxt, 0, sizeof(ctxt));
92         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
93         ctxt.seid = vsi->seid;
94
95         hw = I40E_VSI_TO_HW(vsi);
96         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
97         if (ret != I40E_SUCCESS) {
98                 ret = -ENOTSUP;
99                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
100         }
101
102         return ret;
103 }
104
105 static int
106 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
107 {
108         uint32_t j, k;
109         uint16_t vlan_id;
110         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
111         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
112         int ret;
113
114         for (j = 0; j < I40E_VFTA_SIZE; j++) {
115                 if (!vsi->vfta[j])
116                         continue;
117
118                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
119                         if (!(vsi->vfta[j] & (1 << k)))
120                                 continue;
121
122                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
123                         if (!vlan_id)
124                                 continue;
125
126                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
127                         if (add)
128                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
129                                                        &vlan_data, 1, NULL);
130                         else
131                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
132                                                           &vlan_data, 1, NULL);
133                         if (ret != I40E_SUCCESS) {
134                                 PMD_DRV_LOG(ERR,
135                                             "Failed to add/rm vlan filter");
136                                 return ret;
137                         }
138                 }
139         }
140
141         return I40E_SUCCESS;
142 }
143
144 int
145 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
146 {
147         struct rte_eth_dev *dev;
148         struct i40e_pf *pf;
149         struct i40e_vsi *vsi;
150         struct i40e_hw *hw;
151         struct i40e_vsi_context ctxt;
152         int ret;
153
154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
155
156         dev = &rte_eth_devices[port];
157
158         if (!is_i40e_supported(dev))
159                 return -ENOTSUP;
160
161         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
162
163         if (vf_id >= pf->vf_num || !pf->vfs) {
164                 PMD_DRV_LOG(ERR, "Invalid argument.");
165                 return -EINVAL;
166         }
167
168         vsi = pf->vfs[vf_id].vsi;
169         if (!vsi) {
170                 PMD_DRV_LOG(ERR, "Invalid VSI.");
171                 return -EINVAL;
172         }
173
174         /* Check if it has been already on or off */
175         if (vsi->vlan_anti_spoof_on == on)
176                 return 0; /* already on or off */
177
178         vsi->vlan_anti_spoof_on = on;
179         if (!vsi->vlan_filter_on) {
180                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
181                 if (ret) {
182                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
183                         return -ENOTSUP;
184                 }
185         }
186
187         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
188         if (on)
189                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
190         else
191                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
192
193         memset(&ctxt, 0, sizeof(ctxt));
194         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
195         ctxt.seid = vsi->seid;
196
197         hw = I40E_VSI_TO_HW(vsi);
198         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
199         if (ret != I40E_SUCCESS) {
200                 ret = -ENOTSUP;
201                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
202         }
203
204         return ret;
205 }
206
207 static int
208 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
209 {
210         struct i40e_mac_filter *f;
211         struct i40e_macvlan_filter *mv_f;
212         int i, vlan_num;
213         enum rte_mac_filter_type filter_type;
214         int ret = I40E_SUCCESS;
215         void *temp;
216
217         /* remove all the MACs */
218         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
219                 vlan_num = vsi->vlan_num;
220                 filter_type = f->mac_info.filter_type;
221                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
222                     filter_type == RTE_MACVLAN_HASH_MATCH) {
223                         if (vlan_num == 0) {
224                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
225                                 return I40E_ERR_PARAM;
226                         }
227                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
228                            filter_type == RTE_MAC_HASH_MATCH)
229                         vlan_num = 1;
230
231                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
232                 if (!mv_f) {
233                         PMD_DRV_LOG(ERR, "failed to allocate memory");
234                         return I40E_ERR_NO_MEMORY;
235                 }
236
237                 for (i = 0; i < vlan_num; i++) {
238                         mv_f[i].filter_type = filter_type;
239                         rte_memcpy(&mv_f[i].macaddr,
240                                          &f->mac_info.mac_addr,
241                                          ETH_ADDR_LEN);
242                 }
243                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
244                     filter_type == RTE_MACVLAN_HASH_MATCH) {
245                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
246                                                          &f->mac_info.mac_addr);
247                         if (ret != I40E_SUCCESS) {
248                                 rte_free(mv_f);
249                                 return ret;
250                         }
251                 }
252
253                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
254                 if (ret != I40E_SUCCESS) {
255                         rte_free(mv_f);
256                         return ret;
257                 }
258
259                 rte_free(mv_f);
260                 ret = I40E_SUCCESS;
261         }
262
263         return ret;
264 }
265
266 static int
267 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
268 {
269         struct i40e_mac_filter *f;
270         struct i40e_macvlan_filter *mv_f;
271         int i, vlan_num = 0;
272         int ret = I40E_SUCCESS;
273         void *temp;
274
275         /* restore all the MACs */
276         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
277                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
278                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
279                         /**
280                          * If vlan_num is 0, that's the first time to add mac,
281                          * set mask for vlan_id 0.
282                          */
283                         if (vsi->vlan_num == 0) {
284                                 i40e_set_vlan_filter(vsi, 0, 1);
285                                 vsi->vlan_num = 1;
286                         }
287                         vlan_num = vsi->vlan_num;
288                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
289                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
290                         vlan_num = 1;
291
292                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
293                 if (!mv_f) {
294                         PMD_DRV_LOG(ERR, "failed to allocate memory");
295                         return I40E_ERR_NO_MEMORY;
296                 }
297
298                 for (i = 0; i < vlan_num; i++) {
299                         mv_f[i].filter_type = f->mac_info.filter_type;
300                         rte_memcpy(&mv_f[i].macaddr,
301                                          &f->mac_info.mac_addr,
302                                          ETH_ADDR_LEN);
303                 }
304
305                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
306                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
307                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
308                                                          &f->mac_info.mac_addr);
309                         if (ret != I40E_SUCCESS) {
310                                 rte_free(mv_f);
311                                 return ret;
312                         }
313                 }
314
315                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
316                 if (ret != I40E_SUCCESS) {
317                         rte_free(mv_f);
318                         return ret;
319                 }
320
321                 rte_free(mv_f);
322                 ret = I40E_SUCCESS;
323         }
324
325         return ret;
326 }
327
328 static int
329 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
330 {
331         struct i40e_vsi_context ctxt;
332         struct i40e_hw *hw;
333         int ret;
334
335         if (!vsi)
336                 return -EINVAL;
337
338         hw = I40E_VSI_TO_HW(vsi);
339
340         /* Use the FW API if FW >= v5.0 */
341         if (hw->aq.fw_maj_ver < 5) {
342                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
343                 return -ENOTSUP;
344         }
345
346         /* Check if it has been already on or off */
347         if (vsi->info.valid_sections &
348                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
349                 if (on) {
350                         if ((vsi->info.switch_id &
351                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
352                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
353                                 return 0; /* already on */
354                 } else {
355                         if ((vsi->info.switch_id &
356                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
357                                 return 0; /* already off */
358                 }
359         }
360
361         /* remove all the MAC and VLAN first */
362         ret = i40e_vsi_rm_mac_filter(vsi);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
365                 return ret;
366         }
367         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
368                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
369                 if (ret) {
370                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
371                         return ret;
372                 }
373         }
374
375         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
376         if (on)
377                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
378         else
379                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
380
381         memset(&ctxt, 0, sizeof(ctxt));
382         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
383         ctxt.seid = vsi->seid;
384
385         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
386         if (ret != I40E_SUCCESS) {
387                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
388                 return ret;
389         }
390
391         /* add all the MAC and VLAN back */
392         ret = i40e_vsi_restore_mac_filter(vsi);
393         if (ret)
394                 return ret;
395         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
396                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
397                 if (ret)
398                         return ret;
399         }
400
401         return ret;
402 }
403
404 int
405 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
406 {
407         struct rte_eth_dev *dev;
408         struct i40e_pf *pf;
409         struct i40e_pf_vf *vf;
410         struct i40e_vsi *vsi;
411         uint16_t vf_id;
412         int ret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
415
416         dev = &rte_eth_devices[port];
417
418         if (!is_i40e_supported(dev))
419                 return -ENOTSUP;
420
421         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
422
423         /* setup PF TX loopback */
424         vsi = pf->main_vsi;
425         ret = i40e_vsi_set_tx_loopback(vsi, on);
426         if (ret)
427                 return -ENOTSUP;
428
429         /* setup TX loopback for all the VFs */
430         if (!pf->vfs) {
431                 /* if no VF, do nothing. */
432                 return 0;
433         }
434
435         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
436                 vf = &pf->vfs[vf_id];
437                 vsi = vf->vsi;
438
439                 ret = i40e_vsi_set_tx_loopback(vsi, on);
440                 if (ret)
441                         return -ENOTSUP;
442         }
443
444         return ret;
445 }
446
447 int
448 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
449 {
450         struct rte_eth_dev *dev;
451         struct i40e_pf *pf;
452         struct i40e_vsi *vsi;
453         struct i40e_hw *hw;
454         int ret;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
457
458         dev = &rte_eth_devices[port];
459
460         if (!is_i40e_supported(dev))
461                 return -ENOTSUP;
462
463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464
465         if (vf_id >= pf->vf_num || !pf->vfs) {
466                 PMD_DRV_LOG(ERR, "Invalid argument.");
467                 return -EINVAL;
468         }
469
470         vsi = pf->vfs[vf_id].vsi;
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Invalid VSI.");
473                 return -EINVAL;
474         }
475
476         hw = I40E_VSI_TO_HW(vsi);
477
478         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
479                                                   on, NULL, true);
480         if (ret != I40E_SUCCESS) {
481                 ret = -ENOTSUP;
482                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
483         }
484
485         return ret;
486 }
487
488 int
489 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
490 {
491         struct rte_eth_dev *dev;
492         struct i40e_pf *pf;
493         struct i40e_vsi *vsi;
494         struct i40e_hw *hw;
495         int ret;
496
497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
498
499         dev = &rte_eth_devices[port];
500
501         if (!is_i40e_supported(dev))
502                 return -ENOTSUP;
503
504         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
505
506         if (vf_id >= pf->vf_num || !pf->vfs) {
507                 PMD_DRV_LOG(ERR, "Invalid argument.");
508                 return -EINVAL;
509         }
510
511         vsi = pf->vfs[vf_id].vsi;
512         if (!vsi) {
513                 PMD_DRV_LOG(ERR, "Invalid VSI.");
514                 return -EINVAL;
515         }
516
517         hw = I40E_VSI_TO_HW(vsi);
518
519         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
520                                                     on, NULL);
521         if (ret != I40E_SUCCESS) {
522                 ret = -ENOTSUP;
523                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
524         }
525
526         return ret;
527 }
528
529 int
530 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
531                              struct ether_addr *mac_addr)
532 {
533         struct i40e_mac_filter *f;
534         struct rte_eth_dev *dev;
535         struct i40e_pf_vf *vf;
536         struct i40e_vsi *vsi;
537         struct i40e_pf *pf;
538         void *temp;
539
540         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
541                 return -EINVAL;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
544
545         dev = &rte_eth_devices[port];
546
547         if (!is_i40e_supported(dev))
548                 return -ENOTSUP;
549
550         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
551
552         if (vf_id >= pf->vf_num || !pf->vfs)
553                 return -EINVAL;
554
555         vf = &pf->vfs[vf_id];
556         vsi = vf->vsi;
557         if (!vsi) {
558                 PMD_DRV_LOG(ERR, "Invalid VSI.");
559                 return -EINVAL;
560         }
561
562         ether_addr_copy(mac_addr, &vf->mac_addr);
563
564         /* Remove all existing mac */
565         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
566                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
567                                 != I40E_SUCCESS)
568                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
569
570         return 0;
571 }
572
573 /* Set vlan strip on/off for specific VF from host */
574 int
575 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
576 {
577         struct rte_eth_dev *dev;
578         struct i40e_pf *pf;
579         struct i40e_vsi *vsi;
580         int ret;
581
582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
583
584         dev = &rte_eth_devices[port];
585
586         if (!is_i40e_supported(dev))
587                 return -ENOTSUP;
588
589         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590
591         if (vf_id >= pf->vf_num || !pf->vfs) {
592                 PMD_DRV_LOG(ERR, "Invalid argument.");
593                 return -EINVAL;
594         }
595
596         vsi = pf->vfs[vf_id].vsi;
597
598         if (!vsi)
599                 return -EINVAL;
600
601         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
602         if (ret != I40E_SUCCESS) {
603                 ret = -ENOTSUP;
604                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
605         }
606
607         return ret;
608 }
609
610 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
611                                     uint16_t vlan_id)
612 {
613         struct rte_eth_dev *dev;
614         struct i40e_pf *pf;
615         struct i40e_hw *hw;
616         struct i40e_vsi *vsi;
617         struct i40e_vsi_context ctxt;
618         int ret;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
621
622         if (vlan_id > ETHER_MAX_VLAN_ID) {
623                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
624                 return -EINVAL;
625         }
626
627         dev = &rte_eth_devices[port];
628
629         if (!is_i40e_supported(dev))
630                 return -ENOTSUP;
631
632         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
633         hw = I40E_PF_TO_HW(pf);
634
635         /**
636          * return -ENODEV if SRIOV not enabled, VF number not configured
637          * or no queue assigned.
638          */
639         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
640             pf->vf_nb_qps == 0)
641                 return -ENODEV;
642
643         if (vf_id >= pf->vf_num || !pf->vfs) {
644                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
645                 return -EINVAL;
646         }
647
648         vsi = pf->vfs[vf_id].vsi;
649         if (!vsi) {
650                 PMD_DRV_LOG(ERR, "Invalid VSI.");
651                 return -EINVAL;
652         }
653
654         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
655         vsi->info.pvid = vlan_id;
656         if (vlan_id > 0)
657                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
658         else
659                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
660
661         memset(&ctxt, 0, sizeof(ctxt));
662         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
663         ctxt.seid = vsi->seid;
664
665         hw = I40E_VSI_TO_HW(vsi);
666         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
667         if (ret != I40E_SUCCESS) {
668                 ret = -ENOTSUP;
669                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
670         }
671
672         return ret;
673 }
674
675 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
676                                   uint8_t on)
677 {
678         struct rte_eth_dev *dev;
679         struct i40e_pf *pf;
680         struct i40e_vsi *vsi;
681         struct i40e_hw *hw;
682         struct i40e_mac_filter_info filter;
683         struct ether_addr broadcast = {
684                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
685         int ret;
686
687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
688
689         if (on > 1) {
690                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
691                 return -EINVAL;
692         }
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_i40e_supported(dev))
697                 return -ENOTSUP;
698
699         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700         hw = I40E_PF_TO_HW(pf);
701
702         if (vf_id >= pf->vf_num || !pf->vfs) {
703                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
704                 return -EINVAL;
705         }
706
707         /**
708          * return -ENODEV if SRIOV not enabled, VF number not configured
709          * or no queue assigned.
710          */
711         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
712             pf->vf_nb_qps == 0) {
713                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
714                 return -ENODEV;
715         }
716
717         vsi = pf->vfs[vf_id].vsi;
718         if (!vsi) {
719                 PMD_DRV_LOG(ERR, "Invalid VSI.");
720                 return -EINVAL;
721         }
722
723         if (on) {
724                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
725                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
726                 ret = i40e_vsi_add_mac(vsi, &filter);
727         } else {
728                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
729         }
730
731         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
732                 ret = -ENOTSUP;
733                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
734         } else {
735                 ret = 0;
736         }
737
738         return ret;
739 }
740
741 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
742 {
743         struct rte_eth_dev *dev;
744         struct i40e_pf *pf;
745         struct i40e_hw *hw;
746         struct i40e_vsi *vsi;
747         struct i40e_vsi_context ctxt;
748         int ret;
749
750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
751
752         if (on > 1) {
753                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
754                 return -EINVAL;
755         }
756
757         dev = &rte_eth_devices[port];
758
759         if (!is_i40e_supported(dev))
760                 return -ENOTSUP;
761
762         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763         hw = I40E_PF_TO_HW(pf);
764
765         /**
766          * return -ENODEV if SRIOV not enabled, VF number not configured
767          * or no queue assigned.
768          */
769         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
770             pf->vf_nb_qps == 0) {
771                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
772                 return -ENODEV;
773         }
774
775         if (vf_id >= pf->vf_num || !pf->vfs) {
776                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
777                 return -EINVAL;
778         }
779
780         vsi = pf->vfs[vf_id].vsi;
781         if (!vsi) {
782                 PMD_DRV_LOG(ERR, "Invalid VSI.");
783                 return -EINVAL;
784         }
785
786         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
787         if (on) {
788                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
789                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
790         } else {
791                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
792                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
793         }
794
795         memset(&ctxt, 0, sizeof(ctxt));
796         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
797         ctxt.seid = vsi->seid;
798
799         hw = I40E_VSI_TO_HW(vsi);
800         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
801         if (ret != I40E_SUCCESS) {
802                 ret = -ENOTSUP;
803                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
804         }
805
806         return ret;
807 }
808
809 static int
810 i40e_vlan_filter_count(struct i40e_vsi *vsi)
811 {
812         uint32_t j, k;
813         uint16_t vlan_id;
814         int count = 0;
815
816         for (j = 0; j < I40E_VFTA_SIZE; j++) {
817                 if (!vsi->vfta[j])
818                         continue;
819
820                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
821                         if (!(vsi->vfta[j] & (1 << k)))
822                                 continue;
823
824                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
825                         if (!vlan_id)
826                                 continue;
827
828                         count++;
829                 }
830         }
831
832         return count;
833 }
834
835 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
836                                     uint64_t vf_mask, uint8_t on)
837 {
838         struct rte_eth_dev *dev;
839         struct i40e_pf *pf;
840         struct i40e_hw *hw;
841         struct i40e_vsi *vsi;
842         uint16_t vf_idx;
843         int ret = I40E_SUCCESS;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
846
847         dev = &rte_eth_devices[port];
848
849         if (!is_i40e_supported(dev))
850                 return -ENOTSUP;
851
852         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
853                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
854                 return -EINVAL;
855         }
856
857         if (vf_mask == 0) {
858                 PMD_DRV_LOG(ERR, "No VF.");
859                 return -EINVAL;
860         }
861
862         if (on > 1) {
863                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
864                 return -EINVAL;
865         }
866
867         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         hw = I40E_PF_TO_HW(pf);
869
870         /**
871          * return -ENODEV if SRIOV not enabled, VF number not configured
872          * or no queue assigned.
873          */
874         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
875             pf->vf_nb_qps == 0) {
876                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
877                 return -ENODEV;
878         }
879
880         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
881                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
882                         vsi = pf->vfs[vf_idx].vsi;
883                         if (on) {
884                                 if (!vsi->vlan_filter_on) {
885                                         vsi->vlan_filter_on = true;
886                                         i40e_aq_set_vsi_vlan_promisc(hw,
887                                                                      vsi->seid,
888                                                                      false,
889                                                                      NULL);
890                                         if (!vsi->vlan_anti_spoof_on)
891                                                 i40e_add_rm_all_vlan_filter(
892                                                         vsi, true);
893                                 }
894                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
895                         } else {
896                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
897
898                                 if (!i40e_vlan_filter_count(vsi)) {
899                                         vsi->vlan_filter_on = false;
900                                         i40e_aq_set_vsi_vlan_promisc(hw,
901                                                                      vsi->seid,
902                                                                      true,
903                                                                      NULL);
904                                 }
905                         }
906                 }
907         }
908
909         if (ret != I40E_SUCCESS) {
910                 ret = -ENOTSUP;
911                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
912         }
913
914         return ret;
915 }
916
917 int
918 rte_pmd_i40e_get_vf_stats(uint16_t port,
919                           uint16_t vf_id,
920                           struct rte_eth_stats *stats)
921 {
922         struct rte_eth_dev *dev;
923         struct i40e_pf *pf;
924         struct i40e_vsi *vsi;
925
926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
927
928         dev = &rte_eth_devices[port];
929
930         if (!is_i40e_supported(dev))
931                 return -ENOTSUP;
932
933         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934
935         if (vf_id >= pf->vf_num || !pf->vfs) {
936                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
937                 return -EINVAL;
938         }
939
940         vsi = pf->vfs[vf_id].vsi;
941         if (!vsi) {
942                 PMD_DRV_LOG(ERR, "Invalid VSI.");
943                 return -EINVAL;
944         }
945
946         i40e_update_vsi_stats(vsi);
947
948         stats->ipackets = vsi->eth_stats.rx_unicast +
949                         vsi->eth_stats.rx_multicast +
950                         vsi->eth_stats.rx_broadcast;
951         stats->opackets = vsi->eth_stats.tx_unicast +
952                         vsi->eth_stats.tx_multicast +
953                         vsi->eth_stats.tx_broadcast;
954         stats->ibytes   = vsi->eth_stats.rx_bytes;
955         stats->obytes   = vsi->eth_stats.tx_bytes;
956         stats->ierrors  = vsi->eth_stats.rx_discards;
957         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
958
959         return 0;
960 }
961
962 int
963 rte_pmd_i40e_reset_vf_stats(uint16_t port,
964                             uint16_t vf_id)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         vsi->offset_loaded = false;
991         i40e_update_vsi_stats(vsi);
992
993         return 0;
994 }
995
996 int
997 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
998 {
999         struct rte_eth_dev *dev;
1000         struct i40e_pf *pf;
1001         struct i40e_vsi *vsi;
1002         struct i40e_hw *hw;
1003         int ret = 0;
1004         int i;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1007
1008         dev = &rte_eth_devices[port];
1009
1010         if (!is_i40e_supported(dev))
1011                 return -ENOTSUP;
1012
1013         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014
1015         if (vf_id >= pf->vf_num || !pf->vfs) {
1016                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1017                 return -EINVAL;
1018         }
1019
1020         vsi = pf->vfs[vf_id].vsi;
1021         if (!vsi) {
1022                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1023                 return -EINVAL;
1024         }
1025
1026         if (bw > I40E_QOS_BW_MAX) {
1027                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1028                             I40E_QOS_BW_MAX);
1029                 return -EINVAL;
1030         }
1031
1032         if (bw % I40E_QOS_BW_GRANULARITY) {
1033                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1034                             I40E_QOS_BW_GRANULARITY);
1035                 return -EINVAL;
1036         }
1037
1038         bw /= I40E_QOS_BW_GRANULARITY;
1039
1040         hw = I40E_VSI_TO_HW(vsi);
1041
1042         /* No change. */
1043         if (bw == vsi->bw_info.bw_limit) {
1044                 PMD_DRV_LOG(INFO,
1045                             "No change for VF max bandwidth. Nothing to do.");
1046                 return 0;
1047         }
1048
1049         /**
1050          * VF bandwidth limitation and TC bandwidth limitation cannot be
1051          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1052          *
1053          * If bw is 0, means disable bandwidth limitation. Then no need to
1054          * check TC bandwidth limitation.
1055          */
1056         if (bw) {
1057                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1058                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1059                             vsi->bw_info.bw_ets_credits[i])
1060                                 break;
1061                 }
1062                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1063                         PMD_DRV_LOG(ERR,
1064                                     "TC max bandwidth has been set on this VF,"
1065                                     " please disable it first.");
1066                         return -EINVAL;
1067                 }
1068         }
1069
1070         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1071         if (ret) {
1072                 PMD_DRV_LOG(ERR,
1073                             "Failed to set VF %d bandwidth, err(%d).",
1074                             vf_id, ret);
1075                 return -EINVAL;
1076         }
1077
1078         /* Store the configuration. */
1079         vsi->bw_info.bw_limit = (uint16_t)bw;
1080         vsi->bw_info.bw_max = 0;
1081
1082         return 0;
1083 }
1084
1085 int
1086 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1087                                 uint8_t tc_num, uint8_t *bw_weight)
1088 {
1089         struct rte_eth_dev *dev;
1090         struct i40e_pf *pf;
1091         struct i40e_vsi *vsi;
1092         struct i40e_hw *hw;
1093         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1094         int ret = 0;
1095         int i, j;
1096         uint16_t sum;
1097         bool b_change = false;
1098
1099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1100
1101         dev = &rte_eth_devices[port];
1102
1103         if (!is_i40e_supported(dev))
1104                 return -ENOTSUP;
1105
1106         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1107
1108         if (vf_id >= pf->vf_num || !pf->vfs) {
1109                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1110                 return -EINVAL;
1111         }
1112
1113         vsi = pf->vfs[vf_id].vsi;
1114         if (!vsi) {
1115                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1116                 return -EINVAL;
1117         }
1118
1119         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1120                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1121                             I40E_MAX_TRAFFIC_CLASS);
1122                 return -EINVAL;
1123         }
1124
1125         sum = 0;
1126         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1127                 if (vsi->enabled_tc & BIT_ULL(i))
1128                         sum++;
1129         }
1130         if (sum != tc_num) {
1131                 PMD_DRV_LOG(ERR,
1132                             "Weight should be set for all %d enabled TCs.",
1133                             sum);
1134                 return -EINVAL;
1135         }
1136
1137         sum = 0;
1138         for (i = 0; i < tc_num; i++) {
1139                 if (!bw_weight[i]) {
1140                         PMD_DRV_LOG(ERR,
1141                                     "The weight should be 1 at least.");
1142                         return -EINVAL;
1143                 }
1144                 sum += bw_weight[i];
1145         }
1146         if (sum != 100) {
1147                 PMD_DRV_LOG(ERR,
1148                             "The summary of the TC weight should be 100.");
1149                 return -EINVAL;
1150         }
1151
1152         /**
1153          * Create the configuration for all the TCs.
1154          */
1155         memset(&tc_bw, 0, sizeof(tc_bw));
1156         tc_bw.tc_valid_bits = vsi->enabled_tc;
1157         j = 0;
1158         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1159                 if (vsi->enabled_tc & BIT_ULL(i)) {
1160                         if (bw_weight[j] !=
1161                                 vsi->bw_info.bw_ets_share_credits[i])
1162                                 b_change = true;
1163
1164                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1165                         j++;
1166                 }
1167         }
1168
1169         /* No change. */
1170         if (!b_change) {
1171                 PMD_DRV_LOG(INFO,
1172                             "No change for TC allocated bandwidth."
1173                             " Nothing to do.");
1174                 return 0;
1175         }
1176
1177         hw = I40E_VSI_TO_HW(vsi);
1178
1179         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1180         if (ret) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1183                             vf_id, ret);
1184                 return -EINVAL;
1185         }
1186
1187         /* Store the configuration. */
1188         j = 0;
1189         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1190                 if (vsi->enabled_tc & BIT_ULL(i)) {
1191                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1192                         j++;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 int
1200 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1201                               uint8_t tc_no, uint32_t bw)
1202 {
1203         struct rte_eth_dev *dev;
1204         struct i40e_pf *pf;
1205         struct i40e_vsi *vsi;
1206         struct i40e_hw *hw;
1207         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1208         int ret = 0;
1209         int i;
1210
1211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1212
1213         dev = &rte_eth_devices[port];
1214
1215         if (!is_i40e_supported(dev))
1216                 return -ENOTSUP;
1217
1218         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219
1220         if (vf_id >= pf->vf_num || !pf->vfs) {
1221                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1222                 return -EINVAL;
1223         }
1224
1225         vsi = pf->vfs[vf_id].vsi;
1226         if (!vsi) {
1227                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1228                 return -EINVAL;
1229         }
1230
1231         if (bw > I40E_QOS_BW_MAX) {
1232                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1233                             I40E_QOS_BW_MAX);
1234                 return -EINVAL;
1235         }
1236
1237         if (bw % I40E_QOS_BW_GRANULARITY) {
1238                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1239                             I40E_QOS_BW_GRANULARITY);
1240                 return -EINVAL;
1241         }
1242
1243         bw /= I40E_QOS_BW_GRANULARITY;
1244
1245         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1246                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1247                             I40E_MAX_TRAFFIC_CLASS);
1248                 return -EINVAL;
1249         }
1250
1251         hw = I40E_VSI_TO_HW(vsi);
1252
1253         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1254                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1255                             vf_id, tc_no);
1256                 return -EINVAL;
1257         }
1258
1259         /* No change. */
1260         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1261                 PMD_DRV_LOG(INFO,
1262                             "No change for TC max bandwidth. Nothing to do.");
1263                 return 0;
1264         }
1265
1266         /**
1267          * VF bandwidth limitation and TC bandwidth limitation cannot be
1268          * enabled in parallel, disable VF bandwidth limitation if it's
1269          * enabled.
1270          * If bw is 0, means disable bandwidth limitation. Then no need to
1271          * care about VF bandwidth limitation configuration.
1272          */
1273         if (bw && vsi->bw_info.bw_limit) {
1274                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1275                 if (ret) {
1276                         PMD_DRV_LOG(ERR,
1277                                     "Failed to disable VF(%d)"
1278                                     " bandwidth limitation, err(%d).",
1279                                     vf_id, ret);
1280                         return -EINVAL;
1281                 }
1282
1283                 PMD_DRV_LOG(INFO,
1284                             "VF max bandwidth is disabled according"
1285                             " to TC max bandwidth setting.");
1286         }
1287
1288         /**
1289          * Get all the TCs' info to create a whole picture.
1290          * Because the incremental change isn't permitted.
1291          */
1292         memset(&tc_bw, 0, sizeof(tc_bw));
1293         tc_bw.tc_valid_bits = vsi->enabled_tc;
1294         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295                 if (vsi->enabled_tc & BIT_ULL(i)) {
1296                         tc_bw.tc_bw_credits[i] =
1297                                 rte_cpu_to_le_16(
1298                                         vsi->bw_info.bw_ets_credits[i]);
1299                 }
1300         }
1301         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1302
1303         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1304         if (ret) {
1305                 PMD_DRV_LOG(ERR,
1306                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1307                             vf_id, tc_no, ret);
1308                 return -EINVAL;
1309         }
1310
1311         /* Store the configuration. */
1312         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1313
1314         return 0;
1315 }
1316
1317 int
1318 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct i40e_pf *pf;
1322         struct i40e_vsi *vsi;
1323         struct i40e_veb *veb;
1324         struct i40e_hw *hw;
1325         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1326         int i;
1327         int ret;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1330
1331         dev = &rte_eth_devices[port];
1332
1333         if (!is_i40e_supported(dev))
1334                 return -ENOTSUP;
1335
1336         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337
1338         vsi = pf->main_vsi;
1339         if (!vsi) {
1340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1341                 return -EINVAL;
1342         }
1343
1344         veb = vsi->veb;
1345         if (!veb) {
1346                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1347                 return -EINVAL;
1348         }
1349
1350         if ((tc_map & veb->enabled_tc) != tc_map) {
1351                 PMD_DRV_LOG(ERR,
1352                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1353                             veb->enabled_tc);
1354                 return -EINVAL;
1355         }
1356
1357         if (tc_map == veb->strict_prio_tc) {
1358                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1359                 return 0;
1360         }
1361
1362         hw = I40E_VSI_TO_HW(vsi);
1363
1364         /* Disable DCBx if it's the first time to set strict priority. */
1365         if (!veb->strict_prio_tc) {
1366                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1367                 if (ret)
1368                         PMD_DRV_LOG(INFO,
1369                                     "Failed to disable DCBx as it's already"
1370                                     " disabled.");
1371                 else
1372                         PMD_DRV_LOG(INFO,
1373                                     "DCBx is disabled according to strict"
1374                                     " priority setting.");
1375         }
1376
1377         memset(&ets_data, 0, sizeof(ets_data));
1378         ets_data.tc_valid_bits = veb->enabled_tc;
1379         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1380         ets_data.tc_strict_priority_flags = tc_map;
1381         /* Get all TCs' bandwidth. */
1382         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383                 if (veb->enabled_tc & BIT_ULL(i)) {
1384                         /* For rubust, if bandwidth is 0, use 1 instead. */
1385                         if (veb->bw_info.bw_ets_share_credits[i])
1386                                 ets_data.tc_bw_share_credits[i] =
1387                                         veb->bw_info.bw_ets_share_credits[i];
1388                         else
1389                                 ets_data.tc_bw_share_credits[i] =
1390                                         I40E_QOS_BW_WEIGHT_MIN;
1391                 }
1392         }
1393
1394         if (!veb->strict_prio_tc)
1395                 ret = i40e_aq_config_switch_comp_ets(
1396                         hw, veb->uplink_seid,
1397                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1398                         NULL);
1399         else if (tc_map)
1400                 ret = i40e_aq_config_switch_comp_ets(
1401                         hw, veb->uplink_seid,
1402                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1403                         NULL);
1404         else
1405                 ret = i40e_aq_config_switch_comp_ets(
1406                         hw, veb->uplink_seid,
1407                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1408                         NULL);
1409
1410         if (ret) {
1411                 PMD_DRV_LOG(ERR,
1412                             "Failed to set TCs' strict priority mode."
1413                             " err (%d)", ret);
1414                 return -EINVAL;
1415         }
1416
1417         veb->strict_prio_tc = tc_map;
1418
1419         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1420         if (!tc_map) {
1421                 ret = i40e_aq_start_lldp(hw, NULL);
1422                 if (ret) {
1423                         PMD_DRV_LOG(ERR,
1424                                     "Failed to enable DCBx, err(%d).", ret);
1425                         return -EINVAL;
1426                 }
1427
1428                 PMD_DRV_LOG(INFO,
1429                             "DCBx is enabled again according to strict"
1430                             " priority setting.");
1431         }
1432
1433         return ret;
1434 }
1435
1436 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1437 #define I40E_MAX_PROFILE_NUM 16
1438
1439 static void
1440 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1441                                uint32_t track_id, uint8_t *profile_info_sec,
1442                                bool add)
1443 {
1444         struct i40e_profile_section_header *sec = NULL;
1445         struct i40e_profile_info *pinfo;
1446
1447         sec = (struct i40e_profile_section_header *)profile_info_sec;
1448         sec->tbl_size = 1;
1449         sec->data_end = sizeof(struct i40e_profile_section_header) +
1450                 sizeof(struct i40e_profile_info);
1451         sec->section.type = SECTION_TYPE_INFO;
1452         sec->section.offset = sizeof(struct i40e_profile_section_header);
1453         sec->section.size = sizeof(struct i40e_profile_info);
1454         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1455                                              sec->section.offset);
1456         pinfo->track_id = track_id;
1457         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1458         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1459         if (add)
1460                 pinfo->op = I40E_DDP_ADD_TRACKID;
1461         else
1462                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1463 }
1464
1465 static enum i40e_status_code
1466 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1467 {
1468         enum i40e_status_code status = I40E_SUCCESS;
1469         struct i40e_profile_section_header *sec;
1470         uint32_t track_id;
1471         uint32_t offset = 0;
1472         uint32_t info = 0;
1473
1474         sec = (struct i40e_profile_section_header *)profile_info_sec;
1475         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1476                                          sec->section.offset))->track_id;
1477
1478         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1479                                    track_id, &offset, &info, NULL);
1480         if (status)
1481                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1482                             "offset %d, info %d",
1483                             offset, info);
1484
1485         return status;
1486 }
1487
1488 /* Check if the profile info exists */
1489 static int
1490 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1491 {
1492         struct rte_eth_dev *dev = &rte_eth_devices[port];
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint8_t *buff;
1495         struct rte_pmd_i40e_profile_list *p_list;
1496         struct rte_pmd_i40e_profile_info *pinfo, *p;
1497         uint32_t i;
1498         int ret;
1499
1500         buff = rte_zmalloc("pinfo_list",
1501                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1502                            0);
1503         if (!buff) {
1504                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1505                 return -1;
1506         }
1507
1508         ret = i40e_aq_get_ddp_list(
1509                 hw, (void *)buff,
1510                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1511                 0, NULL);
1512         if (ret) {
1513                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1514                 rte_free(buff);
1515                 return -1;
1516         }
1517         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1518         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1519                              sizeof(struct i40e_profile_section_header));
1520         for (i = 0; i < p_list->p_count; i++) {
1521                 p = &p_list->p_info[i];
1522                 if (pinfo->track_id == p->track_id) {
1523                         PMD_DRV_LOG(INFO, "Profile exists.");
1524                         rte_free(buff);
1525                         return 1;
1526                 }
1527         }
1528
1529         rte_free(buff);
1530         return 0;
1531 }
1532
1533 int
1534 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1535                                  uint32_t size,
1536                                  enum rte_pmd_i40e_package_op op)
1537 {
1538         struct rte_eth_dev *dev;
1539         struct i40e_hw *hw;
1540         struct i40e_package_header *pkg_hdr;
1541         struct i40e_generic_seg_header *profile_seg_hdr;
1542         struct i40e_generic_seg_header *metadata_seg_hdr;
1543         uint32_t track_id;
1544         uint8_t *profile_info_sec;
1545         int is_exist;
1546         enum i40e_status_code status = I40E_SUCCESS;
1547
1548         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1549                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1550                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1551                 PMD_DRV_LOG(ERR, "Operation not supported.");
1552                 return -ENOTSUP;
1553         }
1554
1555         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1556
1557         dev = &rte_eth_devices[port];
1558
1559         if (!is_i40e_supported(dev))
1560                 return -ENOTSUP;
1561
1562         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1563
1564         if (size < (sizeof(struct i40e_package_header) +
1565                     sizeof(struct i40e_metadata_segment) +
1566                     sizeof(uint32_t) * 2)) {
1567                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1568                 return -EINVAL;
1569         }
1570
1571         pkg_hdr = (struct i40e_package_header *)buff;
1572
1573         if (!pkg_hdr) {
1574                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1575                 return -EINVAL;
1576         }
1577
1578         if (pkg_hdr->segment_count < 2) {
1579                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1580                 return -EINVAL;
1581         }
1582
1583         i40e_update_customized_info(dev, buff, size);
1584
1585         /* Find metadata segment */
1586         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1587                                                         pkg_hdr);
1588         if (!metadata_seg_hdr) {
1589                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1590                 return -EINVAL;
1591         }
1592         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1593         if (track_id == I40E_DDP_TRACKID_INVALID) {
1594                 PMD_DRV_LOG(ERR, "Invalid track_id");
1595                 return -EINVAL;
1596         }
1597
1598         /* Find profile segment */
1599         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1600                                                        pkg_hdr);
1601         if (!profile_seg_hdr) {
1602                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1603                 return -EINVAL;
1604         }
1605
1606         profile_info_sec = rte_zmalloc(
1607                 "i40e_profile_info",
1608                 sizeof(struct i40e_profile_section_header) +
1609                 sizeof(struct i40e_profile_info),
1610                 0);
1611         if (!profile_info_sec) {
1612                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1613                 return -EINVAL;
1614         }
1615
1616         /* Check if the profile already loaded */
1617         i40e_generate_profile_info_sec(
1618                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1619                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1620                 track_id, profile_info_sec,
1621                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1622         is_exist = i40e_check_profile_info(port, profile_info_sec);
1623         if (is_exist < 0) {
1624                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1625                 rte_free(profile_info_sec);
1626                 return -EINVAL;
1627         }
1628
1629         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1630                 if (is_exist) {
1631                         PMD_DRV_LOG(ERR, "Profile already exists.");
1632                         rte_free(profile_info_sec);
1633                         return -EEXIST;
1634                 }
1635         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1636                 if (!is_exist) {
1637                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1638                         rte_free(profile_info_sec);
1639                         return -EACCES;
1640                 }
1641         }
1642
1643         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1644                 status = i40e_rollback_profile(
1645                         hw,
1646                         (struct i40e_profile_segment *)profile_seg_hdr,
1647                         track_id);
1648                 if (status) {
1649                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1650                         rte_free(profile_info_sec);
1651                         return status;
1652                 }
1653         } else {
1654                 status = i40e_write_profile(
1655                         hw,
1656                         (struct i40e_profile_segment *)profile_seg_hdr,
1657                         track_id);
1658                 if (status) {
1659                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1660                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1661                         else
1662                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1663                         rte_free(profile_info_sec);
1664                         return status;
1665                 }
1666         }
1667
1668         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1669                 /* Modify loaded profiles info list */
1670                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1671                 if (status) {
1672                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1673                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1674                         else
1675                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1676                 }
1677         }
1678
1679         rte_free(profile_info_sec);
1680         return status;
1681 }
1682
1683 /* Get number of tvl records in the section */
1684 static unsigned int
1685 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1686 {
1687         unsigned int i, nb_rec, nb_tlv = 0;
1688         struct i40e_profile_tlv_section_record *tlv;
1689
1690         if (!sec)
1691                 return nb_tlv;
1692
1693         /* get number of records in the section */
1694         nb_rec = sec->section.size /
1695                                 sizeof(struct i40e_profile_tlv_section_record);
1696         for (i = 0; i < nb_rec; ) {
1697                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1698                 i += tlv->len;
1699                 nb_tlv++;
1700         }
1701         return nb_tlv;
1702 }
1703
1704 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1705         uint8_t *info_buff, uint32_t info_size,
1706         enum rte_pmd_i40e_package_info type)
1707 {
1708         uint32_t ret_size;
1709         struct i40e_package_header *pkg_hdr;
1710         struct i40e_generic_seg_header *i40e_seg_hdr;
1711         struct i40e_generic_seg_header *note_seg_hdr;
1712         struct i40e_generic_seg_header *metadata_seg_hdr;
1713
1714         if (!info_buff) {
1715                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1716                 return -EINVAL;
1717         }
1718
1719         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1720                 sizeof(struct i40e_metadata_segment) +
1721                 sizeof(uint32_t) * 2)) {
1722                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1723                 return -EINVAL;
1724         }
1725
1726         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1727         if (pkg_hdr->segment_count < 2) {
1728                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1729                 return -EINVAL;
1730         }
1731
1732         /* Find metadata segment */
1733         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1734                 pkg_hdr);
1735
1736         /* Find global notes segment */
1737         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1738                 pkg_hdr);
1739
1740         /* Find i40e profile segment */
1741         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1742
1743         /* get global header info */
1744         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1745                 struct rte_pmd_i40e_profile_info *info =
1746                         (struct rte_pmd_i40e_profile_info *)info_buff;
1747
1748                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1749                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1750                         return -EINVAL;
1751                 }
1752
1753                 if (!metadata_seg_hdr) {
1754                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1755                         return -EINVAL;
1756                 }
1757
1758                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1759                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1760                 info->track_id =
1761                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1762
1763                 memcpy(info->name,
1764                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1765                         I40E_DDP_NAME_SIZE);
1766                 memcpy(&info->version,
1767                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1768                         sizeof(struct i40e_ddp_version));
1769                 return I40E_SUCCESS;
1770         }
1771
1772         /* get global note size */
1773         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1774                 if (info_size < sizeof(uint32_t)) {
1775                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1776                         return -EINVAL;
1777                 }
1778                 if (note_seg_hdr == NULL)
1779                         ret_size = 0;
1780                 else
1781                         ret_size = note_seg_hdr->size;
1782                 *(uint32_t *)info_buff = ret_size;
1783                 return I40E_SUCCESS;
1784         }
1785
1786         /* get global note */
1787         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1788                 if (note_seg_hdr == NULL)
1789                         return -ENOTSUP;
1790                 if (info_size < note_seg_hdr->size) {
1791                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1792                         return -EINVAL;
1793                 }
1794                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1795                 return I40E_SUCCESS;
1796         }
1797
1798         /* get i40e segment header info */
1799         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1800                 struct rte_pmd_i40e_profile_info *info =
1801                         (struct rte_pmd_i40e_profile_info *)info_buff;
1802
1803                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1804                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1805                         return -EINVAL;
1806                 }
1807
1808                 if (!metadata_seg_hdr) {
1809                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1810                         return -EINVAL;
1811                 }
1812
1813                 if (!i40e_seg_hdr) {
1814                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1815                         return -EINVAL;
1816                 }
1817
1818                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1819                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1820                 info->track_id =
1821                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1822
1823                 memcpy(info->name,
1824                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1825                         I40E_DDP_NAME_SIZE);
1826                 memcpy(&info->version,
1827                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1828                         sizeof(struct i40e_ddp_version));
1829                 return I40E_SUCCESS;
1830         }
1831
1832         /* get number of devices */
1833         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1834                 if (info_size < sizeof(uint32_t)) {
1835                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1836                         return -EINVAL;
1837                 }
1838                 *(uint32_t *)info_buff =
1839                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1840                 return I40E_SUCCESS;
1841         }
1842
1843         /* get list of devices */
1844         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1845                 uint32_t dev_num;
1846                 dev_num =
1847                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1848                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1849                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1850                         return -EINVAL;
1851                 }
1852                 memcpy(info_buff,
1853                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1854                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1855                 return I40E_SUCCESS;
1856         }
1857
1858         /* get number of protocols */
1859         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1860                 struct i40e_profile_section_header *proto;
1861
1862                 if (info_size < sizeof(uint32_t)) {
1863                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1864                         return -EINVAL;
1865                 }
1866                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1867                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1868                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1869                 return I40E_SUCCESS;
1870         }
1871
1872         /* get list of protocols */
1873         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1874                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1875                 struct rte_pmd_i40e_proto_info *pinfo;
1876                 struct i40e_profile_section_header *proto;
1877                 struct i40e_profile_tlv_section_record *tlv;
1878
1879                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1880                 nb_proto_info = info_size /
1881                                         sizeof(struct rte_pmd_i40e_proto_info);
1882                 for (i = 0; i < nb_proto_info; i++) {
1883                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1884                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1885                 }
1886                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1887                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1888                 nb_tlv = i40e_get_tlv_section_size(proto);
1889                 if (nb_tlv == 0)
1890                         return I40E_SUCCESS;
1891                 if (nb_proto_info < nb_tlv) {
1892                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1893                         return -EINVAL;
1894                 }
1895                 /* get number of records in the section */
1896                 nb_rec = proto->section.size /
1897                                 sizeof(struct i40e_profile_tlv_section_record);
1898                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1899                 for (i = j = 0; i < nb_rec; j++) {
1900                         pinfo[j].proto_id = tlv->data[0];
1901                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1902                                  (const char *)&tlv->data[1]);
1903                         i += tlv->len;
1904                         tlv = &tlv[tlv->len];
1905                 }
1906                 return I40E_SUCCESS;
1907         }
1908
1909         /* get number of packet classification types */
1910         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1911                 struct i40e_profile_section_header *pctype;
1912
1913                 if (info_size < sizeof(uint32_t)) {
1914                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1915                         return -EINVAL;
1916                 }
1917                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1918                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1919                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1920                 return I40E_SUCCESS;
1921         }
1922
1923         /* get list of packet classification types */
1924         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1925                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1926                 struct rte_pmd_i40e_ptype_info *pinfo;
1927                 struct i40e_profile_section_header *pctype;
1928                 struct i40e_profile_tlv_section_record *tlv;
1929
1930                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1931                 nb_proto_info = info_size /
1932                                         sizeof(struct rte_pmd_i40e_ptype_info);
1933                 for (i = 0; i < nb_proto_info; i++)
1934                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1935                                sizeof(struct rte_pmd_i40e_ptype_info));
1936                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1937                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1938                 nb_tlv = i40e_get_tlv_section_size(pctype);
1939                 if (nb_tlv == 0)
1940                         return I40E_SUCCESS;
1941                 if (nb_proto_info < nb_tlv) {
1942                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1943                         return -EINVAL;
1944                 }
1945
1946                 /* get number of records in the section */
1947                 nb_rec = pctype->section.size /
1948                                 sizeof(struct i40e_profile_tlv_section_record);
1949                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1950                 for (i = j = 0; i < nb_rec; j++) {
1951                         memcpy(&pinfo[j], tlv->data,
1952                                sizeof(struct rte_pmd_i40e_ptype_info));
1953                         i += tlv->len;
1954                         tlv = &tlv[tlv->len];
1955                 }
1956                 return I40E_SUCCESS;
1957         }
1958
1959         /* get number of packet types */
1960         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
1961                 struct i40e_profile_section_header *ptype;
1962
1963                 if (info_size < sizeof(uint32_t)) {
1964                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1965                         return -EINVAL;
1966                 }
1967                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
1968                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1969                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
1970                 return I40E_SUCCESS;
1971         }
1972
1973         /* get list of packet types */
1974         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
1975                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1976                 struct rte_pmd_i40e_ptype_info *pinfo;
1977                 struct i40e_profile_section_header *ptype;
1978                 struct i40e_profile_tlv_section_record *tlv;
1979
1980                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1981                 nb_proto_info = info_size /
1982                                         sizeof(struct rte_pmd_i40e_ptype_info);
1983                 for (i = 0; i < nb_proto_info; i++)
1984                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1985                                sizeof(struct rte_pmd_i40e_ptype_info));
1986                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
1987                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1988                 nb_tlv = i40e_get_tlv_section_size(ptype);
1989                 if (nb_tlv == 0)
1990                         return I40E_SUCCESS;
1991                 if (nb_proto_info < nb_tlv) {
1992                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1993                         return -EINVAL;
1994                 }
1995                 /* get number of records in the section */
1996                 nb_rec = ptype->section.size /
1997                                 sizeof(struct i40e_profile_tlv_section_record);
1998                 for (i = j = 0; i < nb_rec; j++) {
1999                         tlv = (struct i40e_profile_tlv_section_record *)
2000                                                                 &ptype[1 + i];
2001                         memcpy(&pinfo[j], tlv->data,
2002                                sizeof(struct rte_pmd_i40e_ptype_info));
2003                         i += tlv->len;
2004                 }
2005                 return I40E_SUCCESS;
2006         }
2007
2008         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2009         return -EINVAL;
2010 }
2011
2012 int
2013 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2014 {
2015         struct rte_eth_dev *dev;
2016         struct i40e_hw *hw;
2017         enum i40e_status_code status = I40E_SUCCESS;
2018
2019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2020
2021         dev = &rte_eth_devices[port];
2022
2023         if (!is_i40e_supported(dev))
2024                 return -ENOTSUP;
2025
2026         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2027                 return -EINVAL;
2028
2029         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2030
2031         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2032                                       size, 0, NULL);
2033
2034         return status;
2035 }
2036
2037 static int check_invalid_pkt_type(uint32_t pkt_type)
2038 {
2039         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2040
2041         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2042         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2043         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2044         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2045         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2046         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2047         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2048
2049         if (l2 &&
2050             l2 != RTE_PTYPE_L2_ETHER &&
2051             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2052             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2053             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2054             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2055             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2056             l2 != RTE_PTYPE_L2_ETHER_QINQ)
2057                 return -1;
2058
2059         if (l3 &&
2060             l3 != RTE_PTYPE_L3_IPV4 &&
2061             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2062             l3 != RTE_PTYPE_L3_IPV6 &&
2063             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2064             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2065             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2066                 return -1;
2067
2068         if (l4 &&
2069             l4 != RTE_PTYPE_L4_TCP &&
2070             l4 != RTE_PTYPE_L4_UDP &&
2071             l4 != RTE_PTYPE_L4_FRAG &&
2072             l4 != RTE_PTYPE_L4_SCTP &&
2073             l4 != RTE_PTYPE_L4_ICMP &&
2074             l4 != RTE_PTYPE_L4_NONFRAG)
2075                 return -1;
2076
2077         if (tnl &&
2078             tnl != RTE_PTYPE_TUNNEL_IP &&
2079             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2080             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2081             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2082             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2083             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2084             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2085             tnl != RTE_PTYPE_TUNNEL_GTPU)
2086                 return -1;
2087
2088         if (il2 &&
2089             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2090             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2091             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2092                 return -1;
2093
2094         if (il3 &&
2095             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2096             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2097             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2098             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2099             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2100             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2101                 return -1;
2102
2103         if (il4 &&
2104             il4 != RTE_PTYPE_INNER_L4_TCP &&
2105             il4 != RTE_PTYPE_INNER_L4_UDP &&
2106             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2107             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2108             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2109             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2110                 return -1;
2111
2112         return 0;
2113 }
2114
2115 static int check_invalid_ptype_mapping(
2116                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2117                 uint16_t count)
2118 {
2119         int i;
2120
2121         for (i = 0; i < count; i++) {
2122                 uint16_t ptype = mapping_table[i].hw_ptype;
2123                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2124
2125                 if (ptype >= I40E_MAX_PKT_TYPE)
2126                         return -1;
2127
2128                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2129                         continue;
2130
2131                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2132                         continue;
2133
2134                 if (check_invalid_pkt_type(pkt_type))
2135                         return -1;
2136         }
2137
2138         return 0;
2139 }
2140
2141 int
2142 rte_pmd_i40e_ptype_mapping_update(
2143                         uint16_t port,
2144                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2145                         uint16_t count,
2146                         uint8_t exclusive)
2147 {
2148         struct rte_eth_dev *dev;
2149         struct i40e_adapter *ad;
2150         int i;
2151
2152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2153
2154         dev = &rte_eth_devices[port];
2155
2156         if (!is_i40e_supported(dev))
2157                 return -ENOTSUP;
2158
2159         if (count > I40E_MAX_PKT_TYPE)
2160                 return -EINVAL;
2161
2162         if (check_invalid_ptype_mapping(mapping_items, count))
2163                 return -EINVAL;
2164
2165         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2166
2167         if (exclusive) {
2168                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2169                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2170         }
2171
2172         for (i = 0; i < count; i++)
2173                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2174                         = mapping_items[i].sw_ptype;
2175
2176         return 0;
2177 }
2178
2179 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2180 {
2181         struct rte_eth_dev *dev;
2182
2183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2184
2185         dev = &rte_eth_devices[port];
2186
2187         if (!is_i40e_supported(dev))
2188                 return -ENOTSUP;
2189
2190         i40e_set_default_ptype_table(dev);
2191
2192         return 0;
2193 }
2194
2195 int rte_pmd_i40e_ptype_mapping_get(
2196                         uint16_t port,
2197                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2198                         uint16_t size,
2199                         uint16_t *count,
2200                         uint8_t valid_only)
2201 {
2202         struct rte_eth_dev *dev;
2203         struct i40e_adapter *ad;
2204         int n = 0;
2205         uint16_t i;
2206
2207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2208
2209         dev = &rte_eth_devices[port];
2210
2211         if (!is_i40e_supported(dev))
2212                 return -ENOTSUP;
2213
2214         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2215
2216         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2217                 if (n >= size)
2218                         break;
2219                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2220                         continue;
2221                 mapping_items[n].hw_ptype = i;
2222                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2223                 n++;
2224         }
2225
2226         *count = n;
2227         return 0;
2228 }
2229
2230 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2231                                        uint32_t target,
2232                                        uint8_t mask,
2233                                        uint32_t pkt_type)
2234 {
2235         struct rte_eth_dev *dev;
2236         struct i40e_adapter *ad;
2237         uint16_t i;
2238
2239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2240
2241         dev = &rte_eth_devices[port];
2242
2243         if (!is_i40e_supported(dev))
2244                 return -ENOTSUP;
2245
2246         if (!mask && check_invalid_pkt_type(target))
2247                 return -EINVAL;
2248
2249         if (check_invalid_pkt_type(pkt_type))
2250                 return -EINVAL;
2251
2252         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2253
2254         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2255                 if (mask) {
2256                         if ((target | ad->ptype_tbl[i]) == target &&
2257                             (target & ad->ptype_tbl[i]))
2258                                 ad->ptype_tbl[i] = pkt_type;
2259                 } else {
2260                         if (ad->ptype_tbl[i] == target)
2261                                 ad->ptype_tbl[i] = pkt_type;
2262                 }
2263         }
2264
2265         return 0;
2266 }
2267
2268 int
2269 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2270                              struct ether_addr *mac_addr)
2271 {
2272         struct rte_eth_dev *dev;
2273         struct i40e_pf_vf *vf;
2274         struct i40e_vsi *vsi;
2275         struct i40e_pf *pf;
2276         struct i40e_mac_filter_info mac_filter;
2277         int ret;
2278
2279         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2280                 return -EINVAL;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2283
2284         dev = &rte_eth_devices[port];
2285
2286         if (!is_i40e_supported(dev))
2287                 return -ENOTSUP;
2288
2289         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2290
2291         if (vf_id >= pf->vf_num || !pf->vfs)
2292                 return -EINVAL;
2293
2294         vf = &pf->vfs[vf_id];
2295         vsi = vf->vsi;
2296         if (!vsi) {
2297                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2298                 return -EINVAL;
2299         }
2300
2301         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2302         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2303         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2304         if (ret != I40E_SUCCESS) {
2305                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2306                 return -1;
2307         }
2308
2309         return 0;
2310 }
2311
2312 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2313 {
2314         struct rte_eth_dev *dev;
2315
2316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2317
2318         dev = &rte_eth_devices[port];
2319
2320         if (!is_i40e_supported(dev))
2321                 return -ENOTSUP;
2322
2323         i40e_set_default_pctype_table(dev);
2324
2325         return 0;
2326 }
2327
2328 int rte_pmd_i40e_flow_type_mapping_get(
2329                         uint16_t port,
2330                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2331 {
2332         struct rte_eth_dev *dev;
2333         struct i40e_adapter *ad;
2334         uint16_t i;
2335
2336         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2337
2338         dev = &rte_eth_devices[port];
2339
2340         if (!is_i40e_supported(dev))
2341                 return -ENOTSUP;
2342
2343         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2344
2345         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2346                 mapping_items[i].flow_type = i;
2347                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2348         }
2349
2350         return 0;
2351 }
2352
2353 int
2354 rte_pmd_i40e_flow_type_mapping_update(
2355                         uint16_t port,
2356                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2357                         uint16_t count,
2358                         uint8_t exclusive)
2359 {
2360         struct rte_eth_dev *dev;
2361         struct i40e_adapter *ad;
2362         int i;
2363
2364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2365
2366         dev = &rte_eth_devices[port];
2367
2368         if (!is_i40e_supported(dev))
2369                 return -ENOTSUP;
2370
2371         if (count > I40E_FLOW_TYPE_MAX)
2372                 return -EINVAL;
2373
2374         for (i = 0; i < count; i++)
2375                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2376                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2377                     (mapping_items[i].pctype &
2378                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2379                         return -EINVAL;
2380
2381         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2382
2383         if (exclusive) {
2384                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2385                         ad->pctypes_tbl[i] = 0ULL;
2386                 ad->flow_types_mask = 0ULL;
2387         }
2388
2389         for (i = 0; i < count; i++) {
2390                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2391                                                 mapping_items[i].pctype;
2392                 if (mapping_items[i].pctype)
2393                         ad->flow_types_mask |=
2394                                         (1ULL << mapping_items[i].flow_type);
2395                 else
2396                         ad->flow_types_mask &=
2397                                         ~(1ULL << mapping_items[i].flow_type);
2398         }
2399
2400         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2401                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2402
2403         return 0;
2404 }
2405
2406 int
2407 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2408 {
2409         struct rte_eth_dev *dev;
2410         struct ether_addr *mac;
2411         struct i40e_pf *pf;
2412         int vf_id;
2413         struct i40e_pf_vf *vf;
2414         uint16_t vf_num;
2415
2416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2417         dev = &rte_eth_devices[port];
2418
2419         if (!is_i40e_supported(dev))
2420                 return -ENOTSUP;
2421
2422         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2423         vf_num = pf->vf_num;
2424
2425         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2426                 vf = &pf->vfs[vf_id];
2427                 mac = &vf->mac_addr;
2428
2429                 if (is_same_ether_addr(mac, vf_mac))
2430                         return vf_id;
2431         }
2432
2433         return -EINVAL;
2434 }
2435
2436 static int
2437 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2438                               struct i40e_pf *pf)
2439 {
2440         uint16_t i;
2441         struct i40e_vsi *vsi = pf->main_vsi;
2442         uint16_t queue_offset, bsf, tc_index;
2443         struct i40e_vsi_context ctxt;
2444         struct i40e_aqc_vsi_properties_data *vsi_info;
2445         struct i40e_queue_regions *region_info =
2446                                 &pf->queue_region;
2447         int32_t ret = -EINVAL;
2448
2449         if (!region_info->queue_region_number) {
2450                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2451                 return ret;
2452         }
2453
2454         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2455
2456         /* Update Queue Pairs Mapping for currently enabled UPs */
2457         ctxt.seid = vsi->seid;
2458         ctxt.pf_num = hw->pf_id;
2459         ctxt.vf_num = 0;
2460         ctxt.uplink_seid = vsi->uplink_seid;
2461         ctxt.info = vsi->info;
2462         vsi_info = &ctxt.info;
2463
2464         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2465         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2466
2467         /* Configure queue region and queue mapping parameters,
2468          * for enabled queue region, allocate queues to this region.
2469          */
2470
2471         for (i = 0; i < region_info->queue_region_number; i++) {
2472                 tc_index = region_info->region[i].region_id;
2473                 bsf = rte_bsf32(region_info->region[i].queue_num);
2474                 queue_offset = region_info->region[i].queue_start_index;
2475                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2476                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2477                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2478         }
2479
2480         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2481         vsi_info->mapping_flags |=
2482                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2483         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2484         vsi_info->valid_sections |=
2485                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2486
2487         /* Update the VSI after updating the VSI queue-mapping information */
2488         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2489         if (ret) {
2490                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2491                                 hw->aq.asq_last_status);
2492                 return ret;
2493         }
2494         /* update the local VSI info with updated queue map */
2495         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2496                                         sizeof(vsi->info.tc_mapping));
2497         rte_memcpy(&vsi->info.queue_mapping,
2498                         &ctxt.info.queue_mapping,
2499                         sizeof(vsi->info.queue_mapping));
2500         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2501         vsi->info.valid_sections = 0;
2502
2503         return 0;
2504 }
2505
2506
2507 static int
2508 i40e_queue_region_set_region(struct i40e_pf *pf,
2509                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2510 {
2511         uint16_t i;
2512         struct i40e_vsi *main_vsi = pf->main_vsi;
2513         struct i40e_queue_regions *info = &pf->queue_region;
2514         int32_t ret = -EINVAL;
2515
2516         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2517                                 conf_ptr->queue_num <= 64)) {
2518                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2519                         "total number of queues do not exceed the VSI allocation");
2520                 return ret;
2521         }
2522
2523         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2524                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2525                 return ret;
2526         }
2527
2528         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2529                                         > main_vsi->nb_used_qps) {
2530                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2531                 return ret;
2532         }
2533
2534         for (i = 0; i < info->queue_region_number; i++)
2535                 if (conf_ptr->region_id == info->region[i].region_id)
2536                         break;
2537
2538         if (i == info->queue_region_number &&
2539                                 i <= I40E_REGION_MAX_INDEX) {
2540                 info->region[i].region_id = conf_ptr->region_id;
2541                 info->region[i].queue_num = conf_ptr->queue_num;
2542                 info->region[i].queue_start_index =
2543                         conf_ptr->queue_start_index;
2544                 info->queue_region_number++;
2545         } else {
2546                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2547                 return ret;
2548         }
2549
2550         return 0;
2551 }
2552
2553 static int
2554 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2555                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2556 {
2557         int32_t ret = -EINVAL;
2558         struct i40e_queue_regions *info = &pf->queue_region;
2559         uint16_t i, j;
2560         uint16_t region_index, flowtype_index;
2561
2562         /* For the pctype or hardware flowtype of packet,
2563          * the specific index for each type has been defined
2564          * in file i40e_type.h as enum i40e_filter_pctype.
2565          */
2566
2567         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2568                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2569                 return ret;
2570         }
2571
2572         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2573                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2574                 return ret;
2575         }
2576
2577
2578         for (i = 0; i < info->queue_region_number; i++)
2579                 if (rss_region_conf->region_id == info->region[i].region_id)
2580                         break;
2581
2582         if (i == info->queue_region_number) {
2583                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2584                 ret = -EINVAL;
2585                 return ret;
2586         }
2587         region_index = i;
2588
2589         for (i = 0; i < info->queue_region_number; i++) {
2590                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2591                         if (rss_region_conf->hw_flowtype ==
2592                                 info->region[i].hw_flowtype[j]) {
2593                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2594                                 return 0;
2595                         }
2596                 }
2597         }
2598
2599         flowtype_index = info->region[region_index].flowtype_num;
2600         info->region[region_index].hw_flowtype[flowtype_index] =
2601                                         rss_region_conf->hw_flowtype;
2602         info->region[region_index].flowtype_num++;
2603
2604         return 0;
2605 }
2606
2607 static void
2608 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2609                                 struct i40e_pf *pf)
2610 {
2611         uint8_t hw_flowtype;
2612         uint32_t pfqf_hregion;
2613         uint16_t i, j, index;
2614         struct i40e_queue_regions *info = &pf->queue_region;
2615
2616         /* For the pctype or hardware flowtype of packet,
2617          * the specific index for each type has been defined
2618          * in file i40e_type.h as enum i40e_filter_pctype.
2619          */
2620
2621         for (i = 0; i < info->queue_region_number; i++) {
2622                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2623                         hw_flowtype = info->region[i].hw_flowtype[j];
2624                         index = hw_flowtype >> 3;
2625                         pfqf_hregion =
2626                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2627
2628                         if ((hw_flowtype & 0x7) == 0) {
2629                                 pfqf_hregion |= info->region[i].region_id <<
2630                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2631                                 pfqf_hregion |= 1 <<
2632                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2633                         } else if ((hw_flowtype & 0x7) == 1) {
2634                                 pfqf_hregion |= info->region[i].region_id  <<
2635                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2636                                 pfqf_hregion |= 1 <<
2637                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2638                         } else if ((hw_flowtype & 0x7) == 2) {
2639                                 pfqf_hregion |= info->region[i].region_id  <<
2640                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2641                                 pfqf_hregion |= 1 <<
2642                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2643                         } else if ((hw_flowtype & 0x7) == 3) {
2644                                 pfqf_hregion |= info->region[i].region_id  <<
2645                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2646                                 pfqf_hregion |= 1 <<
2647                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2648                         } else if ((hw_flowtype & 0x7) == 4) {
2649                                 pfqf_hregion |= info->region[i].region_id  <<
2650                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2651                                 pfqf_hregion |= 1 <<
2652                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2653                         } else if ((hw_flowtype & 0x7) == 5) {
2654                                 pfqf_hregion |= info->region[i].region_id  <<
2655                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2656                                 pfqf_hregion |= 1 <<
2657                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2658                         } else if ((hw_flowtype & 0x7) == 6) {
2659                                 pfqf_hregion |= info->region[i].region_id  <<
2660                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2661                                 pfqf_hregion |= 1 <<
2662                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2663                         } else {
2664                                 pfqf_hregion |= info->region[i].region_id  <<
2665                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2666                                 pfqf_hregion |= 1 <<
2667                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2668                         }
2669
2670                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2671                                                 pfqf_hregion);
2672                 }
2673         }
2674 }
2675
2676 static int
2677 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2678                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2679 {
2680         struct i40e_queue_regions *info = &pf->queue_region;
2681         int32_t ret = -EINVAL;
2682         uint16_t i, j, region_index;
2683
2684         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2685                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2686                 return ret;
2687         }
2688
2689         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2690                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2691                 return ret;
2692         }
2693
2694         for (i = 0; i < info->queue_region_number; i++)
2695                 if (rss_region_conf->region_id == info->region[i].region_id)
2696                         break;
2697
2698         if (i == info->queue_region_number) {
2699                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2700                 ret = -EINVAL;
2701                 return ret;
2702         }
2703
2704         region_index = i;
2705
2706         for (i = 0; i < info->queue_region_number; i++) {
2707                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2708                         if (info->region[i].user_priority[j] ==
2709                                 rss_region_conf->user_priority) {
2710                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2711                                 return 0;
2712                         }
2713                 }
2714         }
2715
2716         j = info->region[region_index].user_priority_num;
2717         info->region[region_index].user_priority[j] =
2718                                         rss_region_conf->user_priority;
2719         info->region[region_index].user_priority_num++;
2720
2721         return 0;
2722 }
2723
2724 static int
2725 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2726                                 struct i40e_pf *pf)
2727 {
2728         struct i40e_dcbx_config dcb_cfg_local;
2729         struct i40e_dcbx_config *dcb_cfg;
2730         struct i40e_queue_regions *info = &pf->queue_region;
2731         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2732         int32_t ret = -EINVAL;
2733         uint16_t i, j, prio_index, region_index;
2734         uint8_t tc_map, tc_bw, bw_lf;
2735
2736         if (!info->queue_region_number) {
2737                 PMD_DRV_LOG(ERR, "No queue region been set before");
2738                 return ret;
2739         }
2740
2741         dcb_cfg = &dcb_cfg_local;
2742         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2743
2744         /* assume each tc has the same bw */
2745         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2746         for (i = 0; i < info->queue_region_number; i++)
2747                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2748         /* to ensure the sum of tcbw is equal to 100 */
2749         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2750         for (i = 0; i < bw_lf; i++)
2751                 dcb_cfg->etscfg.tcbwtable[i]++;
2752
2753         /* assume each tc has the same Transmission Selection Algorithm */
2754         for (i = 0; i < info->queue_region_number; i++)
2755                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2756
2757         for (i = 0; i < info->queue_region_number; i++) {
2758                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2759                         prio_index = info->region[i].user_priority[j];
2760                         region_index = info->region[i].region_id;
2761                         dcb_cfg->etscfg.prioritytable[prio_index] =
2762                                                 region_index;
2763                 }
2764         }
2765
2766         /* FW needs one App to configure HW */
2767         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2768         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2769         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2770         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2771
2772         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2773
2774         dcb_cfg->pfc.willing = 0;
2775         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2776         dcb_cfg->pfc.pfcenable = tc_map;
2777
2778         /* Copy the new config to the current config */
2779         *old_cfg = *dcb_cfg;
2780         old_cfg->etsrec = old_cfg->etscfg;
2781         ret = i40e_set_dcb_config(hw);
2782
2783         if (ret) {
2784                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2785                          i40e_stat_str(hw, ret),
2786                          i40e_aq_str(hw, hw->aq.asq_last_status));
2787                 return ret;
2788         }
2789
2790         return 0;
2791 }
2792
2793 int
2794 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2795         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2796 {
2797         int32_t ret = -EINVAL;
2798         struct i40e_queue_regions *info = &pf->queue_region;
2799         struct i40e_vsi *main_vsi = pf->main_vsi;
2800
2801         if (on) {
2802                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2803
2804                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2805                 if (ret != I40E_SUCCESS) {
2806                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2807                         return ret;
2808                 }
2809
2810                 ret = i40e_queue_region_dcb_configure(hw, pf);
2811                 if (ret != I40E_SUCCESS) {
2812                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2813                         return ret;
2814                 }
2815
2816                 return 0;
2817         }
2818
2819         info->queue_region_number = 1;
2820         info->region[0].queue_num = main_vsi->nb_used_qps;
2821         info->region[0].queue_start_index = 0;
2822
2823         ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2824         if (ret != I40E_SUCCESS)
2825                 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2826
2827         ret = i40e_dcb_init_configure(dev, TRUE);
2828         if (ret != I40E_SUCCESS) {
2829                 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2830                 pf->flags &= ~I40E_FLAG_DCB;
2831         }
2832
2833         i40e_init_queue_region_conf(dev);
2834
2835         return 0;
2836 }
2837
2838 static int
2839 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2840 {
2841         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2842         uint64_t hena;
2843
2844         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2845         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2846
2847         if (!hena)
2848                 return -ENOTSUP;
2849
2850         return 0;
2851 }
2852
2853 static int
2854 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2855                 struct i40e_queue_regions *regions_ptr)
2856 {
2857         struct i40e_queue_regions *info = &pf->queue_region;
2858
2859         rte_memcpy(regions_ptr, info,
2860                         sizeof(struct i40e_queue_regions));
2861
2862         return 0;
2863 }
2864
2865 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2866                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2867 {
2868         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2869         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2870         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2871         int32_t ret;
2872
2873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2874
2875         if (!is_i40e_supported(dev))
2876                 return -ENOTSUP;
2877
2878         if (!(!i40e_queue_region_pf_check_rss(pf)))
2879                 return -ENOTSUP;
2880
2881         /* This queue region feature only support pf by now. It should
2882          * be called after dev_start, and will be clear after dev_stop.
2883          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2884          * is just an enable function which server for other configuration,
2885          * it is for all configuration about queue region from up layer,
2886          * at first will only keep in DPDK softwarestored in driver,
2887          * only after "FLUSH_ON", it commit all configuration to HW.
2888          * Because PMD had to set hardware configuration at a time, so
2889          * it will record all up layer command at first.
2890          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2891          * just clean all configuration about queue region just now,
2892          * and restore all to DPDK i40e driver default
2893          * config when start up.
2894          */
2895
2896         switch (op_type) {
2897         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2898                 ret = i40e_queue_region_set_region(pf,
2899                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2900                 break;
2901         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2902                 ret = i40e_queue_region_set_flowtype(pf,
2903                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2904                 break;
2905         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2906                 ret = i40e_queue_region_set_user_priority(pf,
2907                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2908                 break;
2909         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2910                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2911                 break;
2912         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2913                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2914                 break;
2915         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2916                 ret = i40e_queue_region_get_all_info(pf,
2917                                 (struct i40e_queue_regions *)arg);
2918                 break;
2919         default:
2920                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2921                             op_type);
2922                 ret = -EINVAL;
2923         }
2924
2925         I40E_WRITE_FLUSH(hw);
2926
2927         return ret;
2928 }
2929
2930 int rte_pmd_i40e_flow_add_del_packet_template(
2931                         uint16_t port,
2932                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2933                         uint8_t add)
2934 {
2935         struct rte_eth_dev *dev = &rte_eth_devices[port];
2936         struct i40e_fdir_filter_conf filter_conf;
2937
2938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2939
2940         if (!is_i40e_supported(dev))
2941                 return -ENOTSUP;
2942
2943         memset(&filter_conf, 0, sizeof(filter_conf));
2944         filter_conf.soft_id = conf->soft_id;
2945         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
2946         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
2947         filter_conf.input.flow.raw_flow.length = conf->input.length;
2948         filter_conf.input.flow_ext.pkt_template = true;
2949
2950         filter_conf.action.rx_queue = conf->action.rx_queue;
2951         filter_conf.action.behavior =
2952                 (enum i40e_fdir_behavior)conf->action.behavior;
2953         filter_conf.action.report_status =
2954                 (enum i40e_fdir_status)conf->action.report_status;
2955         filter_conf.action.flex_off = conf->action.flex_off;
2956
2957         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
2958 }
2959
2960 int
2961 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
2962                        struct rte_pmd_i40e_inset *inset,
2963                        enum rte_pmd_i40e_inset_type inset_type)
2964 {
2965         struct rte_eth_dev *dev;
2966         struct i40e_hw *hw;
2967         uint64_t inset_reg;
2968         uint32_t mask_reg[2];
2969         int i;
2970
2971         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2972
2973         dev = &rte_eth_devices[port];
2974
2975         if (!is_i40e_supported(dev))
2976                 return -ENOTSUP;
2977
2978         if (pctype > 63)
2979                 return -EINVAL;
2980
2981         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2982         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
2983
2984         switch (inset_type) {
2985         case INSET_HASH:
2986                 /* Get input set */
2987                 inset_reg =
2988                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
2989                 inset_reg <<= I40E_32_BIT_WIDTH;
2990                 inset_reg |=
2991                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
2992                 /* Get field mask */
2993                 mask_reg[0] =
2994                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
2995                 mask_reg[1] =
2996                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
2997                 break;
2998         case INSET_FDIR:
2999                 inset_reg =
3000                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3001                 inset_reg <<= I40E_32_BIT_WIDTH;
3002                 inset_reg |=
3003                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3004                 mask_reg[0] =
3005                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3006                 mask_reg[1] =
3007                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3008                 break;
3009         case INSET_FDIR_FLX:
3010                 inset_reg =
3011                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3012                 mask_reg[0] =
3013                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3014                 mask_reg[1] =
3015                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3016                 break;
3017         default:
3018                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3019                 return -EINVAL;
3020         }
3021
3022         inset->inset = inset_reg;
3023
3024         for (i = 0; i < 2; i++) {
3025                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3026                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3027         }
3028
3029         return 0;
3030 }
3031
3032 int
3033 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3034                        struct rte_pmd_i40e_inset *inset,
3035                        enum rte_pmd_i40e_inset_type inset_type)
3036 {
3037         struct rte_eth_dev *dev;
3038         struct i40e_hw *hw;
3039         uint64_t inset_reg;
3040         uint32_t mask_reg[2];
3041         int i;
3042
3043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3044
3045         dev = &rte_eth_devices[port];
3046
3047         if (!is_i40e_supported(dev))
3048                 return -ENOTSUP;
3049
3050         if (pctype > 63)
3051                 return -EINVAL;
3052
3053         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3054
3055         /* Clear mask first */
3056         for (i = 0; i < 2; i++)
3057                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
3058
3059         inset_reg = inset->inset;
3060         for (i = 0; i < 2; i++)
3061                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3062                         inset->mask[i].mask;
3063
3064         switch (inset_type) {
3065         case INSET_HASH:
3066                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3067                                      (uint32_t)(inset_reg & UINT32_MAX));
3068                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3069                                      (uint32_t)((inset_reg >>
3070                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3071                 for (i = 0; i < 2; i++)
3072                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
3073                                              mask_reg[i]);
3074                 break;
3075         case INSET_FDIR:
3076                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3077                                      (uint32_t)(inset_reg & UINT32_MAX));
3078                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3079                                      (uint32_t)((inset_reg >>
3080                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3081                 for (i = 0; i < 2; i++)
3082                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
3083                                              mask_reg[i]);
3084                 break;
3085         case INSET_FDIR_FLX:
3086                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3087                                      (uint32_t)(inset_reg & UINT32_MAX));
3088                 for (i = 0; i < 2; i++)
3089                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3090                                              mask_reg[i]);
3091                 break;
3092         default:
3093                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3094                 return -EINVAL;
3095         }
3096
3097         I40E_WRITE_FLUSH(hw);
3098         return 0;
3099 }