net/i40e: fix fail to update packet type table
[dpdk.git] / drivers / net / i40e / rte_pmd_i40e.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_tailq.h>
7
8 #include "base/i40e_prototype.h"
9 #include "base/i40e_dcb.h"
10 #include "i40e_ethdev.h"
11 #include "i40e_pf.h"
12 #include "i40e_rxtx.h"
13 #include "rte_pmd_i40e.h"
14
15 int
16 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
17 {
18         struct rte_eth_dev *dev;
19         struct i40e_pf *pf;
20
21         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
22
23         dev = &rte_eth_devices[port];
24
25         if (!is_i40e_supported(dev))
26                 return -ENOTSUP;
27
28         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
29
30         if (vf >= pf->vf_num || !pf->vfs) {
31                 PMD_DRV_LOG(ERR, "Invalid argument.");
32                 return -EINVAL;
33         }
34
35         i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
36
37         return 0;
38 }
39
40 int
41 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
42 {
43         struct rte_eth_dev *dev;
44         struct i40e_pf *pf;
45         struct i40e_vsi *vsi;
46         struct i40e_hw *hw;
47         struct i40e_vsi_context ctxt;
48         int ret;
49
50         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
51
52         dev = &rte_eth_devices[port];
53
54         if (!is_i40e_supported(dev))
55                 return -ENOTSUP;
56
57         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
58
59         if (vf_id >= pf->vf_num || !pf->vfs) {
60                 PMD_DRV_LOG(ERR, "Invalid argument.");
61                 return -EINVAL;
62         }
63
64         vsi = pf->vfs[vf_id].vsi;
65         if (!vsi) {
66                 PMD_DRV_LOG(ERR, "Invalid VSI.");
67                 return -EINVAL;
68         }
69
70         /* Check if it has been already on or off */
71         if (vsi->info.valid_sections &
72                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
73                 if (on) {
74                         if ((vsi->info.sec_flags &
75                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
76                             I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
77                                 return 0; /* already on */
78                 } else {
79                         if ((vsi->info.sec_flags &
80                              I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
81                                 return 0; /* already off */
82                 }
83         }
84
85         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
86         if (on)
87                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
88         else
89                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
90
91         memset(&ctxt, 0, sizeof(ctxt));
92         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
93         ctxt.seid = vsi->seid;
94
95         hw = I40E_VSI_TO_HW(vsi);
96         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
97         if (ret != I40E_SUCCESS) {
98                 ret = -ENOTSUP;
99                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
100         }
101
102         return ret;
103 }
104
105 static int
106 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
107 {
108         uint32_t j, k;
109         uint16_t vlan_id;
110         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
111         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
112         int ret;
113
114         for (j = 0; j < I40E_VFTA_SIZE; j++) {
115                 if (!vsi->vfta[j])
116                         continue;
117
118                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
119                         if (!(vsi->vfta[j] & (1 << k)))
120                                 continue;
121
122                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
123                         if (!vlan_id)
124                                 continue;
125
126                         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
127                         if (add)
128                                 ret = i40e_aq_add_vlan(hw, vsi->seid,
129                                                        &vlan_data, 1, NULL);
130                         else
131                                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
132                                                           &vlan_data, 1, NULL);
133                         if (ret != I40E_SUCCESS) {
134                                 PMD_DRV_LOG(ERR,
135                                             "Failed to add/rm vlan filter");
136                                 return ret;
137                         }
138                 }
139         }
140
141         return I40E_SUCCESS;
142 }
143
144 int
145 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
146 {
147         struct rte_eth_dev *dev;
148         struct i40e_pf *pf;
149         struct i40e_vsi *vsi;
150         struct i40e_hw *hw;
151         struct i40e_vsi_context ctxt;
152         int ret;
153
154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
155
156         dev = &rte_eth_devices[port];
157
158         if (!is_i40e_supported(dev))
159                 return -ENOTSUP;
160
161         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
162
163         if (vf_id >= pf->vf_num || !pf->vfs) {
164                 PMD_DRV_LOG(ERR, "Invalid argument.");
165                 return -EINVAL;
166         }
167
168         vsi = pf->vfs[vf_id].vsi;
169         if (!vsi) {
170                 PMD_DRV_LOG(ERR, "Invalid VSI.");
171                 return -EINVAL;
172         }
173
174         /* Check if it has been already on or off */
175         if (vsi->vlan_anti_spoof_on == on)
176                 return 0; /* already on or off */
177
178         vsi->vlan_anti_spoof_on = on;
179         if (!vsi->vlan_filter_on) {
180                 ret = i40e_add_rm_all_vlan_filter(vsi, on);
181                 if (ret) {
182                         PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
183                         return -ENOTSUP;
184                 }
185         }
186
187         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
188         if (on)
189                 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
190         else
191                 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
192
193         memset(&ctxt, 0, sizeof(ctxt));
194         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
195         ctxt.seid = vsi->seid;
196
197         hw = I40E_VSI_TO_HW(vsi);
198         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
199         if (ret != I40E_SUCCESS) {
200                 ret = -ENOTSUP;
201                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
202         }
203
204         return ret;
205 }
206
207 static int
208 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
209 {
210         struct i40e_mac_filter *f;
211         struct i40e_macvlan_filter *mv_f;
212         int i, vlan_num;
213         enum rte_mac_filter_type filter_type;
214         int ret = I40E_SUCCESS;
215         void *temp;
216
217         /* remove all the MACs */
218         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
219                 vlan_num = vsi->vlan_num;
220                 filter_type = f->mac_info.filter_type;
221                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
222                     filter_type == RTE_MACVLAN_HASH_MATCH) {
223                         if (vlan_num == 0) {
224                                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
225                                 return I40E_ERR_PARAM;
226                         }
227                 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
228                            filter_type == RTE_MAC_HASH_MATCH)
229                         vlan_num = 1;
230
231                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
232                 if (!mv_f) {
233                         PMD_DRV_LOG(ERR, "failed to allocate memory");
234                         return I40E_ERR_NO_MEMORY;
235                 }
236
237                 for (i = 0; i < vlan_num; i++) {
238                         mv_f[i].filter_type = filter_type;
239                         rte_memcpy(&mv_f[i].macaddr,
240                                          &f->mac_info.mac_addr,
241                                          ETH_ADDR_LEN);
242                 }
243                 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
244                     filter_type == RTE_MACVLAN_HASH_MATCH) {
245                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
246                                                          &f->mac_info.mac_addr);
247                         if (ret != I40E_SUCCESS) {
248                                 rte_free(mv_f);
249                                 return ret;
250                         }
251                 }
252
253                 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
254                 if (ret != I40E_SUCCESS) {
255                         rte_free(mv_f);
256                         return ret;
257                 }
258
259                 rte_free(mv_f);
260                 ret = I40E_SUCCESS;
261         }
262
263         return ret;
264 }
265
266 static int
267 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
268 {
269         struct i40e_mac_filter *f;
270         struct i40e_macvlan_filter *mv_f;
271         int i, vlan_num = 0;
272         int ret = I40E_SUCCESS;
273         void *temp;
274
275         /* restore all the MACs */
276         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
277                 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
278                     (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
279                         /**
280                          * If vlan_num is 0, that's the first time to add mac,
281                          * set mask for vlan_id 0.
282                          */
283                         if (vsi->vlan_num == 0) {
284                                 i40e_set_vlan_filter(vsi, 0, 1);
285                                 vsi->vlan_num = 1;
286                         }
287                         vlan_num = vsi->vlan_num;
288                 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
289                            (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
290                         vlan_num = 1;
291
292                 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
293                 if (!mv_f) {
294                         PMD_DRV_LOG(ERR, "failed to allocate memory");
295                         return I40E_ERR_NO_MEMORY;
296                 }
297
298                 for (i = 0; i < vlan_num; i++) {
299                         mv_f[i].filter_type = f->mac_info.filter_type;
300                         rte_memcpy(&mv_f[i].macaddr,
301                                          &f->mac_info.mac_addr,
302                                          ETH_ADDR_LEN);
303                 }
304
305                 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
306                     f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
307                         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
308                                                          &f->mac_info.mac_addr);
309                         if (ret != I40E_SUCCESS) {
310                                 rte_free(mv_f);
311                                 return ret;
312                         }
313                 }
314
315                 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
316                 if (ret != I40E_SUCCESS) {
317                         rte_free(mv_f);
318                         return ret;
319                 }
320
321                 rte_free(mv_f);
322                 ret = I40E_SUCCESS;
323         }
324
325         return ret;
326 }
327
328 static int
329 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
330 {
331         struct i40e_vsi_context ctxt;
332         struct i40e_hw *hw;
333         int ret;
334
335         if (!vsi)
336                 return -EINVAL;
337
338         hw = I40E_VSI_TO_HW(vsi);
339
340         /* Use the FW API if FW >= v5.0 */
341         if (hw->aq.fw_maj_ver < 5) {
342                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
343                 return -ENOTSUP;
344         }
345
346         /* Check if it has been already on or off */
347         if (vsi->info.valid_sections &
348                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
349                 if (on) {
350                         if ((vsi->info.switch_id &
351                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
352                             I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
353                                 return 0; /* already on */
354                 } else {
355                         if ((vsi->info.switch_id &
356                              I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
357                                 return 0; /* already off */
358                 }
359         }
360
361         /* remove all the MAC and VLAN first */
362         ret = i40e_vsi_rm_mac_filter(vsi);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
365                 return ret;
366         }
367         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
368                 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
369                 if (ret) {
370                         PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
371                         return ret;
372                 }
373         }
374
375         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
376         if (on)
377                 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
378         else
379                 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
380
381         memset(&ctxt, 0, sizeof(ctxt));
382         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
383         ctxt.seid = vsi->seid;
384
385         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
386         if (ret != I40E_SUCCESS) {
387                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
388                 return ret;
389         }
390
391         /* add all the MAC and VLAN back */
392         ret = i40e_vsi_restore_mac_filter(vsi);
393         if (ret)
394                 return ret;
395         if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
396                 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
397                 if (ret)
398                         return ret;
399         }
400
401         return ret;
402 }
403
404 int
405 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
406 {
407         struct rte_eth_dev *dev;
408         struct i40e_pf *pf;
409         struct i40e_pf_vf *vf;
410         struct i40e_vsi *vsi;
411         uint16_t vf_id;
412         int ret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
415
416         dev = &rte_eth_devices[port];
417
418         if (!is_i40e_supported(dev))
419                 return -ENOTSUP;
420
421         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
422
423         /* setup PF TX loopback */
424         vsi = pf->main_vsi;
425         ret = i40e_vsi_set_tx_loopback(vsi, on);
426         if (ret)
427                 return -ENOTSUP;
428
429         /* setup TX loopback for all the VFs */
430         if (!pf->vfs) {
431                 /* if no VF, do nothing. */
432                 return 0;
433         }
434
435         for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
436                 vf = &pf->vfs[vf_id];
437                 vsi = vf->vsi;
438
439                 ret = i40e_vsi_set_tx_loopback(vsi, on);
440                 if (ret)
441                         return -ENOTSUP;
442         }
443
444         return ret;
445 }
446
447 int
448 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
449 {
450         struct rte_eth_dev *dev;
451         struct i40e_pf *pf;
452         struct i40e_vsi *vsi;
453         struct i40e_hw *hw;
454         int ret;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
457
458         dev = &rte_eth_devices[port];
459
460         if (!is_i40e_supported(dev))
461                 return -ENOTSUP;
462
463         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464
465         if (vf_id >= pf->vf_num || !pf->vfs) {
466                 PMD_DRV_LOG(ERR, "Invalid argument.");
467                 return -EINVAL;
468         }
469
470         vsi = pf->vfs[vf_id].vsi;
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Invalid VSI.");
473                 return -EINVAL;
474         }
475
476         hw = I40E_VSI_TO_HW(vsi);
477
478         ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
479                                                   on, NULL, true);
480         if (ret != I40E_SUCCESS) {
481                 ret = -ENOTSUP;
482                 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
483         }
484
485         return ret;
486 }
487
488 int
489 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
490 {
491         struct rte_eth_dev *dev;
492         struct i40e_pf *pf;
493         struct i40e_vsi *vsi;
494         struct i40e_hw *hw;
495         int ret;
496
497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
498
499         dev = &rte_eth_devices[port];
500
501         if (!is_i40e_supported(dev))
502                 return -ENOTSUP;
503
504         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
505
506         if (vf_id >= pf->vf_num || !pf->vfs) {
507                 PMD_DRV_LOG(ERR, "Invalid argument.");
508                 return -EINVAL;
509         }
510
511         vsi = pf->vfs[vf_id].vsi;
512         if (!vsi) {
513                 PMD_DRV_LOG(ERR, "Invalid VSI.");
514                 return -EINVAL;
515         }
516
517         hw = I40E_VSI_TO_HW(vsi);
518
519         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
520                                                     on, NULL);
521         if (ret != I40E_SUCCESS) {
522                 ret = -ENOTSUP;
523                 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
524         }
525
526         return ret;
527 }
528
529 int
530 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
531                              struct ether_addr *mac_addr)
532 {
533         struct i40e_mac_filter *f;
534         struct rte_eth_dev *dev;
535         struct i40e_pf_vf *vf;
536         struct i40e_vsi *vsi;
537         struct i40e_pf *pf;
538         void *temp;
539
540         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
541                 return -EINVAL;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
544
545         dev = &rte_eth_devices[port];
546
547         if (!is_i40e_supported(dev))
548                 return -ENOTSUP;
549
550         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
551
552         if (vf_id >= pf->vf_num || !pf->vfs)
553                 return -EINVAL;
554
555         vf = &pf->vfs[vf_id];
556         vsi = vf->vsi;
557         if (!vsi) {
558                 PMD_DRV_LOG(ERR, "Invalid VSI.");
559                 return -EINVAL;
560         }
561
562         ether_addr_copy(mac_addr, &vf->mac_addr);
563
564         /* Remove all existing mac */
565         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
566                 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
567                                 != I40E_SUCCESS)
568                         PMD_DRV_LOG(WARNING, "Delete MAC failed");
569
570         return 0;
571 }
572
573 /* Set vlan strip on/off for specific VF from host */
574 int
575 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
576 {
577         struct rte_eth_dev *dev;
578         struct i40e_pf *pf;
579         struct i40e_vsi *vsi;
580         int ret;
581
582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
583
584         dev = &rte_eth_devices[port];
585
586         if (!is_i40e_supported(dev))
587                 return -ENOTSUP;
588
589         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590
591         if (vf_id >= pf->vf_num || !pf->vfs) {
592                 PMD_DRV_LOG(ERR, "Invalid argument.");
593                 return -EINVAL;
594         }
595
596         vsi = pf->vfs[vf_id].vsi;
597
598         if (!vsi)
599                 return -EINVAL;
600
601         ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
602         if (ret != I40E_SUCCESS) {
603                 ret = -ENOTSUP;
604                 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
605         }
606
607         return ret;
608 }
609
610 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
611                                     uint16_t vlan_id)
612 {
613         struct rte_eth_dev *dev;
614         struct i40e_pf *pf;
615         struct i40e_hw *hw;
616         struct i40e_vsi *vsi;
617         struct i40e_vsi_context ctxt;
618         int ret;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
621
622         if (vlan_id > ETHER_MAX_VLAN_ID) {
623                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
624                 return -EINVAL;
625         }
626
627         dev = &rte_eth_devices[port];
628
629         if (!is_i40e_supported(dev))
630                 return -ENOTSUP;
631
632         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
633         hw = I40E_PF_TO_HW(pf);
634
635         /**
636          * return -ENODEV if SRIOV not enabled, VF number not configured
637          * or no queue assigned.
638          */
639         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
640             pf->vf_nb_qps == 0)
641                 return -ENODEV;
642
643         if (vf_id >= pf->vf_num || !pf->vfs) {
644                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
645                 return -EINVAL;
646         }
647
648         vsi = pf->vfs[vf_id].vsi;
649         if (!vsi) {
650                 PMD_DRV_LOG(ERR, "Invalid VSI.");
651                 return -EINVAL;
652         }
653
654         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
655         vsi->info.pvid = vlan_id;
656         if (vlan_id > 0)
657                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
658         else
659                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
660
661         memset(&ctxt, 0, sizeof(ctxt));
662         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
663         ctxt.seid = vsi->seid;
664
665         hw = I40E_VSI_TO_HW(vsi);
666         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
667         if (ret != I40E_SUCCESS) {
668                 ret = -ENOTSUP;
669                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
670         }
671
672         return ret;
673 }
674
675 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
676                                   uint8_t on)
677 {
678         struct rte_eth_dev *dev;
679         struct i40e_pf *pf;
680         struct i40e_vsi *vsi;
681         struct i40e_hw *hw;
682         struct i40e_mac_filter_info filter;
683         struct ether_addr broadcast = {
684                 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
685         int ret;
686
687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
688
689         if (on > 1) {
690                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
691                 return -EINVAL;
692         }
693
694         dev = &rte_eth_devices[port];
695
696         if (!is_i40e_supported(dev))
697                 return -ENOTSUP;
698
699         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
700         hw = I40E_PF_TO_HW(pf);
701
702         if (vf_id >= pf->vf_num || !pf->vfs) {
703                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
704                 return -EINVAL;
705         }
706
707         /**
708          * return -ENODEV if SRIOV not enabled, VF number not configured
709          * or no queue assigned.
710          */
711         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
712             pf->vf_nb_qps == 0) {
713                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
714                 return -ENODEV;
715         }
716
717         vsi = pf->vfs[vf_id].vsi;
718         if (!vsi) {
719                 PMD_DRV_LOG(ERR, "Invalid VSI.");
720                 return -EINVAL;
721         }
722
723         if (on) {
724                 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
725                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
726                 ret = i40e_vsi_add_mac(vsi, &filter);
727         } else {
728                 ret = i40e_vsi_delete_mac(vsi, &broadcast);
729         }
730
731         if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
732                 ret = -ENOTSUP;
733                 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
734         } else {
735                 ret = 0;
736         }
737
738         return ret;
739 }
740
741 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
742 {
743         struct rte_eth_dev *dev;
744         struct i40e_pf *pf;
745         struct i40e_hw *hw;
746         struct i40e_vsi *vsi;
747         struct i40e_vsi_context ctxt;
748         int ret;
749
750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
751
752         if (on > 1) {
753                 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
754                 return -EINVAL;
755         }
756
757         dev = &rte_eth_devices[port];
758
759         if (!is_i40e_supported(dev))
760                 return -ENOTSUP;
761
762         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763         hw = I40E_PF_TO_HW(pf);
764
765         /**
766          * return -ENODEV if SRIOV not enabled, VF number not configured
767          * or no queue assigned.
768          */
769         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
770             pf->vf_nb_qps == 0) {
771                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
772                 return -ENODEV;
773         }
774
775         if (vf_id >= pf->vf_num || !pf->vfs) {
776                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
777                 return -EINVAL;
778         }
779
780         vsi = pf->vfs[vf_id].vsi;
781         if (!vsi) {
782                 PMD_DRV_LOG(ERR, "Invalid VSI.");
783                 return -EINVAL;
784         }
785
786         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
787         if (on) {
788                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
789                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
790         } else {
791                 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
792                 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
793         }
794
795         memset(&ctxt, 0, sizeof(ctxt));
796         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
797         ctxt.seid = vsi->seid;
798
799         hw = I40E_VSI_TO_HW(vsi);
800         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
801         if (ret != I40E_SUCCESS) {
802                 ret = -ENOTSUP;
803                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
804         }
805
806         return ret;
807 }
808
809 static int
810 i40e_vlan_filter_count(struct i40e_vsi *vsi)
811 {
812         uint32_t j, k;
813         uint16_t vlan_id;
814         int count = 0;
815
816         for (j = 0; j < I40E_VFTA_SIZE; j++) {
817                 if (!vsi->vfta[j])
818                         continue;
819
820                 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
821                         if (!(vsi->vfta[j] & (1 << k)))
822                                 continue;
823
824                         vlan_id = j * I40E_UINT32_BIT_SIZE + k;
825                         if (!vlan_id)
826                                 continue;
827
828                         count++;
829                 }
830         }
831
832         return count;
833 }
834
835 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
836                                     uint64_t vf_mask, uint8_t on)
837 {
838         struct rte_eth_dev *dev;
839         struct i40e_pf *pf;
840         struct i40e_hw *hw;
841         struct i40e_vsi *vsi;
842         uint16_t vf_idx;
843         int ret = I40E_SUCCESS;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
846
847         dev = &rte_eth_devices[port];
848
849         if (!is_i40e_supported(dev))
850                 return -ENOTSUP;
851
852         if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
853                 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
854                 return -EINVAL;
855         }
856
857         if (vf_mask == 0) {
858                 PMD_DRV_LOG(ERR, "No VF.");
859                 return -EINVAL;
860         }
861
862         if (on > 1) {
863                 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
864                 return -EINVAL;
865         }
866
867         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         hw = I40E_PF_TO_HW(pf);
869
870         /**
871          * return -ENODEV if SRIOV not enabled, VF number not configured
872          * or no queue assigned.
873          */
874         if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
875             pf->vf_nb_qps == 0) {
876                 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
877                 return -ENODEV;
878         }
879
880         for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
881                 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
882                         vsi = pf->vfs[vf_idx].vsi;
883                         if (on) {
884                                 if (!vsi->vlan_filter_on) {
885                                         vsi->vlan_filter_on = true;
886                                         i40e_aq_set_vsi_vlan_promisc(hw,
887                                                                      vsi->seid,
888                                                                      false,
889                                                                      NULL);
890                                         if (!vsi->vlan_anti_spoof_on)
891                                                 i40e_add_rm_all_vlan_filter(
892                                                         vsi, true);
893                                 }
894                                 ret = i40e_vsi_add_vlan(vsi, vlan_id);
895                         } else {
896                                 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
897
898                                 if (!i40e_vlan_filter_count(vsi)) {
899                                         vsi->vlan_filter_on = false;
900                                         i40e_aq_set_vsi_vlan_promisc(hw,
901                                                                      vsi->seid,
902                                                                      true,
903                                                                      NULL);
904                                 }
905                         }
906                 }
907         }
908
909         if (ret != I40E_SUCCESS) {
910                 ret = -ENOTSUP;
911                 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
912         }
913
914         return ret;
915 }
916
917 int
918 rte_pmd_i40e_get_vf_stats(uint16_t port,
919                           uint16_t vf_id,
920                           struct rte_eth_stats *stats)
921 {
922         struct rte_eth_dev *dev;
923         struct i40e_pf *pf;
924         struct i40e_vsi *vsi;
925
926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
927
928         dev = &rte_eth_devices[port];
929
930         if (!is_i40e_supported(dev))
931                 return -ENOTSUP;
932
933         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934
935         if (vf_id >= pf->vf_num || !pf->vfs) {
936                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
937                 return -EINVAL;
938         }
939
940         vsi = pf->vfs[vf_id].vsi;
941         if (!vsi) {
942                 PMD_DRV_LOG(ERR, "Invalid VSI.");
943                 return -EINVAL;
944         }
945
946         i40e_update_vsi_stats(vsi);
947
948         stats->ipackets = vsi->eth_stats.rx_unicast +
949                         vsi->eth_stats.rx_multicast +
950                         vsi->eth_stats.rx_broadcast;
951         stats->opackets = vsi->eth_stats.tx_unicast +
952                         vsi->eth_stats.tx_multicast +
953                         vsi->eth_stats.tx_broadcast;
954         stats->ibytes   = vsi->eth_stats.rx_bytes;
955         stats->obytes   = vsi->eth_stats.tx_bytes;
956         stats->ierrors  = vsi->eth_stats.rx_discards;
957         stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
958
959         return 0;
960 }
961
962 int
963 rte_pmd_i40e_reset_vf_stats(uint16_t port,
964                             uint16_t vf_id)
965 {
966         struct rte_eth_dev *dev;
967         struct i40e_pf *pf;
968         struct i40e_vsi *vsi;
969
970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
971
972         dev = &rte_eth_devices[port];
973
974         if (!is_i40e_supported(dev))
975                 return -ENOTSUP;
976
977         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
978
979         if (vf_id >= pf->vf_num || !pf->vfs) {
980                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
981                 return -EINVAL;
982         }
983
984         vsi = pf->vfs[vf_id].vsi;
985         if (!vsi) {
986                 PMD_DRV_LOG(ERR, "Invalid VSI.");
987                 return -EINVAL;
988         }
989
990         vsi->offset_loaded = false;
991         i40e_update_vsi_stats(vsi);
992
993         return 0;
994 }
995
996 int
997 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
998 {
999         struct rte_eth_dev *dev;
1000         struct i40e_pf *pf;
1001         struct i40e_vsi *vsi;
1002         struct i40e_hw *hw;
1003         int ret = 0;
1004         int i;
1005
1006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1007
1008         dev = &rte_eth_devices[port];
1009
1010         if (!is_i40e_supported(dev))
1011                 return -ENOTSUP;
1012
1013         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014
1015         if (vf_id >= pf->vf_num || !pf->vfs) {
1016                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1017                 return -EINVAL;
1018         }
1019
1020         vsi = pf->vfs[vf_id].vsi;
1021         if (!vsi) {
1022                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1023                 return -EINVAL;
1024         }
1025
1026         if (bw > I40E_QOS_BW_MAX) {
1027                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1028                             I40E_QOS_BW_MAX);
1029                 return -EINVAL;
1030         }
1031
1032         if (bw % I40E_QOS_BW_GRANULARITY) {
1033                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1034                             I40E_QOS_BW_GRANULARITY);
1035                 return -EINVAL;
1036         }
1037
1038         bw /= I40E_QOS_BW_GRANULARITY;
1039
1040         hw = I40E_VSI_TO_HW(vsi);
1041
1042         /* No change. */
1043         if (bw == vsi->bw_info.bw_limit) {
1044                 PMD_DRV_LOG(INFO,
1045                             "No change for VF max bandwidth. Nothing to do.");
1046                 return 0;
1047         }
1048
1049         /**
1050          * VF bandwidth limitation and TC bandwidth limitation cannot be
1051          * enabled in parallel, quit if TC bandwidth limitation is enabled.
1052          *
1053          * If bw is 0, means disable bandwidth limitation. Then no need to
1054          * check TC bandwidth limitation.
1055          */
1056         if (bw) {
1057                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1058                         if ((vsi->enabled_tc & BIT_ULL(i)) &&
1059                             vsi->bw_info.bw_ets_credits[i])
1060                                 break;
1061                 }
1062                 if (i != I40E_MAX_TRAFFIC_CLASS) {
1063                         PMD_DRV_LOG(ERR,
1064                                     "TC max bandwidth has been set on this VF,"
1065                                     " please disable it first.");
1066                         return -EINVAL;
1067                 }
1068         }
1069
1070         ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1071         if (ret) {
1072                 PMD_DRV_LOG(ERR,
1073                             "Failed to set VF %d bandwidth, err(%d).",
1074                             vf_id, ret);
1075                 return -EINVAL;
1076         }
1077
1078         /* Store the configuration. */
1079         vsi->bw_info.bw_limit = (uint16_t)bw;
1080         vsi->bw_info.bw_max = 0;
1081
1082         return 0;
1083 }
1084
1085 int
1086 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1087                                 uint8_t tc_num, uint8_t *bw_weight)
1088 {
1089         struct rte_eth_dev *dev;
1090         struct i40e_pf *pf;
1091         struct i40e_vsi *vsi;
1092         struct i40e_hw *hw;
1093         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1094         int ret = 0;
1095         int i, j;
1096         uint16_t sum;
1097         bool b_change = false;
1098
1099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1100
1101         dev = &rte_eth_devices[port];
1102
1103         if (!is_i40e_supported(dev))
1104                 return -ENOTSUP;
1105
1106         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1107
1108         if (vf_id >= pf->vf_num || !pf->vfs) {
1109                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1110                 return -EINVAL;
1111         }
1112
1113         vsi = pf->vfs[vf_id].vsi;
1114         if (!vsi) {
1115                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1116                 return -EINVAL;
1117         }
1118
1119         if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1120                 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1121                             I40E_MAX_TRAFFIC_CLASS);
1122                 return -EINVAL;
1123         }
1124
1125         sum = 0;
1126         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1127                 if (vsi->enabled_tc & BIT_ULL(i))
1128                         sum++;
1129         }
1130         if (sum != tc_num) {
1131                 PMD_DRV_LOG(ERR,
1132                             "Weight should be set for all %d enabled TCs.",
1133                             sum);
1134                 return -EINVAL;
1135         }
1136
1137         sum = 0;
1138         for (i = 0; i < tc_num; i++) {
1139                 if (!bw_weight[i]) {
1140                         PMD_DRV_LOG(ERR,
1141                                     "The weight should be 1 at least.");
1142                         return -EINVAL;
1143                 }
1144                 sum += bw_weight[i];
1145         }
1146         if (sum != 100) {
1147                 PMD_DRV_LOG(ERR,
1148                             "The summary of the TC weight should be 100.");
1149                 return -EINVAL;
1150         }
1151
1152         /**
1153          * Create the configuration for all the TCs.
1154          */
1155         memset(&tc_bw, 0, sizeof(tc_bw));
1156         tc_bw.tc_valid_bits = vsi->enabled_tc;
1157         j = 0;
1158         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1159                 if (vsi->enabled_tc & BIT_ULL(i)) {
1160                         if (bw_weight[j] !=
1161                                 vsi->bw_info.bw_ets_share_credits[i])
1162                                 b_change = true;
1163
1164                         tc_bw.tc_bw_credits[i] = bw_weight[j];
1165                         j++;
1166                 }
1167         }
1168
1169         /* No change. */
1170         if (!b_change) {
1171                 PMD_DRV_LOG(INFO,
1172                             "No change for TC allocated bandwidth."
1173                             " Nothing to do.");
1174                 return 0;
1175         }
1176
1177         hw = I40E_VSI_TO_HW(vsi);
1178
1179         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1180         if (ret) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to set VF %d TC bandwidth weight, err(%d).",
1183                             vf_id, ret);
1184                 return -EINVAL;
1185         }
1186
1187         /* Store the configuration. */
1188         j = 0;
1189         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1190                 if (vsi->enabled_tc & BIT_ULL(i)) {
1191                         vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1192                         j++;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 int
1200 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1201                               uint8_t tc_no, uint32_t bw)
1202 {
1203         struct rte_eth_dev *dev;
1204         struct i40e_pf *pf;
1205         struct i40e_vsi *vsi;
1206         struct i40e_hw *hw;
1207         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1208         int ret = 0;
1209         int i;
1210
1211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1212
1213         dev = &rte_eth_devices[port];
1214
1215         if (!is_i40e_supported(dev))
1216                 return -ENOTSUP;
1217
1218         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219
1220         if (vf_id >= pf->vf_num || !pf->vfs) {
1221                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1222                 return -EINVAL;
1223         }
1224
1225         vsi = pf->vfs[vf_id].vsi;
1226         if (!vsi) {
1227                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1228                 return -EINVAL;
1229         }
1230
1231         if (bw > I40E_QOS_BW_MAX) {
1232                 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1233                             I40E_QOS_BW_MAX);
1234                 return -EINVAL;
1235         }
1236
1237         if (bw % I40E_QOS_BW_GRANULARITY) {
1238                 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1239                             I40E_QOS_BW_GRANULARITY);
1240                 return -EINVAL;
1241         }
1242
1243         bw /= I40E_QOS_BW_GRANULARITY;
1244
1245         if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1246                 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1247                             I40E_MAX_TRAFFIC_CLASS);
1248                 return -EINVAL;
1249         }
1250
1251         hw = I40E_VSI_TO_HW(vsi);
1252
1253         if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1254                 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1255                             vf_id, tc_no);
1256                 return -EINVAL;
1257         }
1258
1259         /* No change. */
1260         if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1261                 PMD_DRV_LOG(INFO,
1262                             "No change for TC max bandwidth. Nothing to do.");
1263                 return 0;
1264         }
1265
1266         /**
1267          * VF bandwidth limitation and TC bandwidth limitation cannot be
1268          * enabled in parallel, disable VF bandwidth limitation if it's
1269          * enabled.
1270          * If bw is 0, means disable bandwidth limitation. Then no need to
1271          * care about VF bandwidth limitation configuration.
1272          */
1273         if (bw && vsi->bw_info.bw_limit) {
1274                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1275                 if (ret) {
1276                         PMD_DRV_LOG(ERR,
1277                                     "Failed to disable VF(%d)"
1278                                     " bandwidth limitation, err(%d).",
1279                                     vf_id, ret);
1280                         return -EINVAL;
1281                 }
1282
1283                 PMD_DRV_LOG(INFO,
1284                             "VF max bandwidth is disabled according"
1285                             " to TC max bandwidth setting.");
1286         }
1287
1288         /**
1289          * Get all the TCs' info to create a whole picture.
1290          * Because the incremental change isn't permitted.
1291          */
1292         memset(&tc_bw, 0, sizeof(tc_bw));
1293         tc_bw.tc_valid_bits = vsi->enabled_tc;
1294         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295                 if (vsi->enabled_tc & BIT_ULL(i)) {
1296                         tc_bw.tc_bw_credits[i] =
1297                                 rte_cpu_to_le_16(
1298                                         vsi->bw_info.bw_ets_credits[i]);
1299                 }
1300         }
1301         tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1302
1303         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1304         if (ret) {
1305                 PMD_DRV_LOG(ERR,
1306                             "Failed to set VF %d TC %d max bandwidth, err(%d).",
1307                             vf_id, tc_no, ret);
1308                 return -EINVAL;
1309         }
1310
1311         /* Store the configuration. */
1312         vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1313
1314         return 0;
1315 }
1316
1317 int
1318 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct i40e_pf *pf;
1322         struct i40e_vsi *vsi;
1323         struct i40e_veb *veb;
1324         struct i40e_hw *hw;
1325         struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1326         int i;
1327         int ret;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1330
1331         dev = &rte_eth_devices[port];
1332
1333         if (!is_i40e_supported(dev))
1334                 return -ENOTSUP;
1335
1336         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337
1338         vsi = pf->main_vsi;
1339         if (!vsi) {
1340                 PMD_DRV_LOG(ERR, "Invalid VSI.");
1341                 return -EINVAL;
1342         }
1343
1344         veb = vsi->veb;
1345         if (!veb) {
1346                 PMD_DRV_LOG(ERR, "Invalid VEB.");
1347                 return -EINVAL;
1348         }
1349
1350         if ((tc_map & veb->enabled_tc) != tc_map) {
1351                 PMD_DRV_LOG(ERR,
1352                             "TC bitmap isn't the subset of enabled TCs 0x%x.",
1353                             veb->enabled_tc);
1354                 return -EINVAL;
1355         }
1356
1357         if (tc_map == veb->strict_prio_tc) {
1358                 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1359                 return 0;
1360         }
1361
1362         hw = I40E_VSI_TO_HW(vsi);
1363
1364         /* Disable DCBx if it's the first time to set strict priority. */
1365         if (!veb->strict_prio_tc) {
1366                 ret = i40e_aq_stop_lldp(hw, true, NULL);
1367                 if (ret)
1368                         PMD_DRV_LOG(INFO,
1369                                     "Failed to disable DCBx as it's already"
1370                                     " disabled.");
1371                 else
1372                         PMD_DRV_LOG(INFO,
1373                                     "DCBx is disabled according to strict"
1374                                     " priority setting.");
1375         }
1376
1377         memset(&ets_data, 0, sizeof(ets_data));
1378         ets_data.tc_valid_bits = veb->enabled_tc;
1379         ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1380         ets_data.tc_strict_priority_flags = tc_map;
1381         /* Get all TCs' bandwidth. */
1382         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383                 if (veb->enabled_tc & BIT_ULL(i)) {
1384                         /* For rubust, if bandwidth is 0, use 1 instead. */
1385                         if (veb->bw_info.bw_ets_share_credits[i])
1386                                 ets_data.tc_bw_share_credits[i] =
1387                                         veb->bw_info.bw_ets_share_credits[i];
1388                         else
1389                                 ets_data.tc_bw_share_credits[i] =
1390                                         I40E_QOS_BW_WEIGHT_MIN;
1391                 }
1392         }
1393
1394         if (!veb->strict_prio_tc)
1395                 ret = i40e_aq_config_switch_comp_ets(
1396                         hw, veb->uplink_seid,
1397                         &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1398                         NULL);
1399         else if (tc_map)
1400                 ret = i40e_aq_config_switch_comp_ets(
1401                         hw, veb->uplink_seid,
1402                         &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1403                         NULL);
1404         else
1405                 ret = i40e_aq_config_switch_comp_ets(
1406                         hw, veb->uplink_seid,
1407                         &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1408                         NULL);
1409
1410         if (ret) {
1411                 PMD_DRV_LOG(ERR,
1412                             "Failed to set TCs' strict priority mode."
1413                             " err (%d)", ret);
1414                 return -EINVAL;
1415         }
1416
1417         veb->strict_prio_tc = tc_map;
1418
1419         /* Enable DCBx again, if all the TCs' strict priority disabled. */
1420         if (!tc_map) {
1421                 ret = i40e_aq_start_lldp(hw, NULL);
1422                 if (ret) {
1423                         PMD_DRV_LOG(ERR,
1424                                     "Failed to enable DCBx, err(%d).", ret);
1425                         return -EINVAL;
1426                 }
1427
1428                 PMD_DRV_LOG(INFO,
1429                             "DCBx is enabled again according to strict"
1430                             " priority setting.");
1431         }
1432
1433         return ret;
1434 }
1435
1436 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1437 #define I40E_MAX_PROFILE_NUM 16
1438
1439 static void
1440 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1441                                uint32_t track_id, uint8_t *profile_info_sec,
1442                                bool add)
1443 {
1444         struct i40e_profile_section_header *sec = NULL;
1445         struct i40e_profile_info *pinfo;
1446
1447         sec = (struct i40e_profile_section_header *)profile_info_sec;
1448         sec->tbl_size = 1;
1449         sec->data_end = sizeof(struct i40e_profile_section_header) +
1450                 sizeof(struct i40e_profile_info);
1451         sec->section.type = SECTION_TYPE_INFO;
1452         sec->section.offset = sizeof(struct i40e_profile_section_header);
1453         sec->section.size = sizeof(struct i40e_profile_info);
1454         pinfo = (struct i40e_profile_info *)(profile_info_sec +
1455                                              sec->section.offset);
1456         pinfo->track_id = track_id;
1457         memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1458         memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1459         if (add)
1460                 pinfo->op = I40E_DDP_ADD_TRACKID;
1461         else
1462                 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1463 }
1464
1465 static enum i40e_status_code
1466 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1467 {
1468         enum i40e_status_code status = I40E_SUCCESS;
1469         struct i40e_profile_section_header *sec;
1470         uint32_t track_id;
1471         uint32_t offset = 0;
1472         uint32_t info = 0;
1473
1474         sec = (struct i40e_profile_section_header *)profile_info_sec;
1475         track_id = ((struct i40e_profile_info *)(profile_info_sec +
1476                                          sec->section.offset))->track_id;
1477
1478         status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1479                                    track_id, &offset, &info, NULL);
1480         if (status)
1481                 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1482                             "offset %d, info %d",
1483                             offset, info);
1484
1485         return status;
1486 }
1487
1488 /* Check if the profile info exists */
1489 static int
1490 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1491 {
1492         struct rte_eth_dev *dev = &rte_eth_devices[port];
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint8_t *buff;
1495         struct rte_pmd_i40e_profile_list *p_list;
1496         struct rte_pmd_i40e_profile_info *pinfo, *p;
1497         uint32_t i;
1498         int ret;
1499
1500         buff = rte_zmalloc("pinfo_list",
1501                            (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1502                            0);
1503         if (!buff) {
1504                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1505                 return -1;
1506         }
1507
1508         ret = i40e_aq_get_ddp_list(
1509                 hw, (void *)buff,
1510                 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1511                 0, NULL);
1512         if (ret) {
1513                 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1514                 rte_free(buff);
1515                 return -1;
1516         }
1517         p_list = (struct rte_pmd_i40e_profile_list *)buff;
1518         pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1519                              sizeof(struct i40e_profile_section_header));
1520         for (i = 0; i < p_list->p_count; i++) {
1521                 p = &p_list->p_info[i];
1522                 if (pinfo->track_id == p->track_id) {
1523                         PMD_DRV_LOG(INFO, "Profile exists.");
1524                         rte_free(buff);
1525                         return 1;
1526                 }
1527         }
1528
1529         rte_free(buff);
1530         return 0;
1531 }
1532
1533 int
1534 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1535                                  uint32_t size,
1536                                  enum rte_pmd_i40e_package_op op)
1537 {
1538         struct rte_eth_dev *dev;
1539         struct i40e_hw *hw;
1540         struct i40e_package_header *pkg_hdr;
1541         struct i40e_generic_seg_header *profile_seg_hdr;
1542         struct i40e_generic_seg_header *metadata_seg_hdr;
1543         uint32_t track_id;
1544         uint8_t *profile_info_sec;
1545         int is_exist;
1546         enum i40e_status_code status = I40E_SUCCESS;
1547
1548         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1549                 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1550                 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1551                 PMD_DRV_LOG(ERR, "Operation not supported.");
1552                 return -ENOTSUP;
1553         }
1554
1555         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1556
1557         dev = &rte_eth_devices[port];
1558
1559         if (!is_i40e_supported(dev))
1560                 return -ENOTSUP;
1561
1562         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1563
1564         if (size < (sizeof(struct i40e_package_header) +
1565                     sizeof(struct i40e_metadata_segment) +
1566                     sizeof(uint32_t) * 2)) {
1567                 PMD_DRV_LOG(ERR, "Buff is invalid.");
1568                 return -EINVAL;
1569         }
1570
1571         pkg_hdr = (struct i40e_package_header *)buff;
1572
1573         if (!pkg_hdr) {
1574                 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1575                 return -EINVAL;
1576         }
1577
1578         if (pkg_hdr->segment_count < 2) {
1579                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1580                 return -EINVAL;
1581         }
1582
1583         i40e_update_customized_info(dev, buff, size);
1584
1585         /* Find metadata segment */
1586         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1587                                                         pkg_hdr);
1588         if (!metadata_seg_hdr) {
1589                 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1590                 return -EINVAL;
1591         }
1592         track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1593         if (track_id == I40E_DDP_TRACKID_INVALID) {
1594                 PMD_DRV_LOG(ERR, "Invalid track_id");
1595                 return -EINVAL;
1596         }
1597
1598         /* Find profile segment */
1599         profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1600                                                        pkg_hdr);
1601         if (!profile_seg_hdr) {
1602                 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1603                 return -EINVAL;
1604         }
1605
1606         profile_info_sec = rte_zmalloc(
1607                 "i40e_profile_info",
1608                 sizeof(struct i40e_profile_section_header) +
1609                 sizeof(struct i40e_profile_info),
1610                 0);
1611         if (!profile_info_sec) {
1612                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1613                 return -EINVAL;
1614         }
1615
1616         /* Check if the profile already loaded */
1617         i40e_generate_profile_info_sec(
1618                 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1619                 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1620                 track_id, profile_info_sec,
1621                 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1622         is_exist = i40e_check_profile_info(port, profile_info_sec);
1623         if (is_exist < 0) {
1624                 PMD_DRV_LOG(ERR, "Failed to check profile.");
1625                 rte_free(profile_info_sec);
1626                 return -EINVAL;
1627         }
1628
1629         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1630                 if (is_exist) {
1631                         PMD_DRV_LOG(ERR, "Profile already exists.");
1632                         rte_free(profile_info_sec);
1633                         return -EEXIST;
1634                 }
1635         } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1636                 if (!is_exist) {
1637                         PMD_DRV_LOG(ERR, "Profile does not exist.");
1638                         rte_free(profile_info_sec);
1639                         return -EACCES;
1640                 }
1641         }
1642
1643         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1644                 status = i40e_rollback_profile(
1645                         hw,
1646                         (struct i40e_profile_segment *)profile_seg_hdr,
1647                         track_id);
1648                 if (status) {
1649                         PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1650                         rte_free(profile_info_sec);
1651                         return status;
1652                 }
1653         } else {
1654                 status = i40e_write_profile(
1655                         hw,
1656                         (struct i40e_profile_segment *)profile_seg_hdr,
1657                         track_id);
1658                 if (status) {
1659                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1660                                 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1661                         else
1662                                 PMD_DRV_LOG(ERR, "Failed to write profile.");
1663                         rte_free(profile_info_sec);
1664                         return status;
1665                 }
1666         }
1667
1668         if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1669                 /* Modify loaded profiles info list */
1670                 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1671                 if (status) {
1672                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1673                                 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1674                         else
1675                                 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1676                 }
1677         }
1678
1679         rte_free(profile_info_sec);
1680         return status;
1681 }
1682
1683 /* Get number of tvl records in the section */
1684 static unsigned int
1685 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1686 {
1687         unsigned int i, nb_rec, nb_tlv = 0;
1688         struct i40e_profile_tlv_section_record *tlv;
1689
1690         if (!sec)
1691                 return nb_tlv;
1692
1693         /* get number of records in the section */
1694         nb_rec = sec->section.size /
1695                                 sizeof(struct i40e_profile_tlv_section_record);
1696         for (i = 0; i < nb_rec; ) {
1697                 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1698                 i += tlv->len;
1699                 nb_tlv++;
1700         }
1701         return nb_tlv;
1702 }
1703
1704 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1705         uint8_t *info_buff, uint32_t info_size,
1706         enum rte_pmd_i40e_package_info type)
1707 {
1708         uint32_t ret_size;
1709         struct i40e_package_header *pkg_hdr;
1710         struct i40e_generic_seg_header *i40e_seg_hdr;
1711         struct i40e_generic_seg_header *note_seg_hdr;
1712         struct i40e_generic_seg_header *metadata_seg_hdr;
1713
1714         if (!info_buff) {
1715                 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1716                 return -EINVAL;
1717         }
1718
1719         if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1720                 sizeof(struct i40e_metadata_segment) +
1721                 sizeof(uint32_t) * 2)) {
1722                 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1723                 return -EINVAL;
1724         }
1725
1726         pkg_hdr = (struct i40e_package_header *)pkg_buff;
1727         if (pkg_hdr->segment_count < 2) {
1728                 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1729                 return -EINVAL;
1730         }
1731
1732         /* Find metadata segment */
1733         metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1734                 pkg_hdr);
1735
1736         /* Find global notes segment */
1737         note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1738                 pkg_hdr);
1739
1740         /* Find i40e profile segment */
1741         i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1742
1743         /* get global header info */
1744         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1745                 struct rte_pmd_i40e_profile_info *info =
1746                         (struct rte_pmd_i40e_profile_info *)info_buff;
1747
1748                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1749                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1750                         return -EINVAL;
1751                 }
1752
1753                 if (!metadata_seg_hdr) {
1754                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1755                         return -EINVAL;
1756                 }
1757
1758                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1759                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1760                 info->track_id =
1761                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1762
1763                 memcpy(info->name,
1764                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1765                         I40E_DDP_NAME_SIZE);
1766                 memcpy(&info->version,
1767                         &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1768                         sizeof(struct i40e_ddp_version));
1769                 return I40E_SUCCESS;
1770         }
1771
1772         /* get global note size */
1773         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1774                 if (info_size < sizeof(uint32_t)) {
1775                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1776                         return -EINVAL;
1777                 }
1778                 if (note_seg_hdr == NULL)
1779                         ret_size = 0;
1780                 else
1781                         ret_size = note_seg_hdr->size;
1782                 *(uint32_t *)info_buff = ret_size;
1783                 return I40E_SUCCESS;
1784         }
1785
1786         /* get global note */
1787         if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1788                 if (note_seg_hdr == NULL)
1789                         return -ENOTSUP;
1790                 if (info_size < note_seg_hdr->size) {
1791                         PMD_DRV_LOG(ERR, "Information buffer size is too small");
1792                         return -EINVAL;
1793                 }
1794                 memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1795                 return I40E_SUCCESS;
1796         }
1797
1798         /* get i40e segment header info */
1799         if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1800                 struct rte_pmd_i40e_profile_info *info =
1801                         (struct rte_pmd_i40e_profile_info *)info_buff;
1802
1803                 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1804                         PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1805                         return -EINVAL;
1806                 }
1807
1808                 if (!metadata_seg_hdr) {
1809                         PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1810                         return -EINVAL;
1811                 }
1812
1813                 if (!i40e_seg_hdr) {
1814                         PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1815                         return -EINVAL;
1816                 }
1817
1818                 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1819                 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1820                 info->track_id =
1821                         ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1822
1823                 memcpy(info->name,
1824                         ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1825                         I40E_DDP_NAME_SIZE);
1826                 memcpy(&info->version,
1827                         &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1828                         sizeof(struct i40e_ddp_version));
1829                 return I40E_SUCCESS;
1830         }
1831
1832         /* get number of devices */
1833         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1834                 if (info_size < sizeof(uint32_t)) {
1835                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1836                         return -EINVAL;
1837                 }
1838                 *(uint32_t *)info_buff =
1839                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1840                 return I40E_SUCCESS;
1841         }
1842
1843         /* get list of devices */
1844         if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1845                 uint32_t dev_num;
1846                 dev_num =
1847                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1848                 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1849                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1850                         return -EINVAL;
1851                 }
1852                 memcpy(info_buff,
1853                         ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1854                         sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1855                 return I40E_SUCCESS;
1856         }
1857
1858         /* get number of protocols */
1859         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1860                 struct i40e_profile_section_header *proto;
1861
1862                 if (info_size < sizeof(uint32_t)) {
1863                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1864                         return -EINVAL;
1865                 }
1866                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1867                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1868                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1869                 return I40E_SUCCESS;
1870         }
1871
1872         /* get list of protocols */
1873         if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1874                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1875                 struct rte_pmd_i40e_proto_info *pinfo;
1876                 struct i40e_profile_section_header *proto;
1877                 struct i40e_profile_tlv_section_record *tlv;
1878
1879                 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1880                 nb_proto_info = info_size /
1881                                         sizeof(struct rte_pmd_i40e_proto_info);
1882                 for (i = 0; i < nb_proto_info; i++) {
1883                         pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1884                         memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1885                 }
1886                 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1887                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1888                 nb_tlv = i40e_get_tlv_section_size(proto);
1889                 if (nb_tlv == 0)
1890                         return I40E_SUCCESS;
1891                 if (nb_proto_info < nb_tlv) {
1892                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1893                         return -EINVAL;
1894                 }
1895                 /* get number of records in the section */
1896                 nb_rec = proto->section.size /
1897                                 sizeof(struct i40e_profile_tlv_section_record);
1898                 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1899                 for (i = j = 0; i < nb_rec; j++) {
1900                         pinfo[j].proto_id = tlv->data[0];
1901                         snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
1902                                  (const char *)&tlv->data[1]);
1903                         i += tlv->len;
1904                         tlv = &tlv[tlv->len];
1905                 }
1906                 return I40E_SUCCESS;
1907         }
1908
1909         /* get number of packet classification types */
1910         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1911                 struct i40e_profile_section_header *pctype;
1912
1913                 if (info_size < sizeof(uint32_t)) {
1914                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1915                         return -EINVAL;
1916                 }
1917                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1918                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1919                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1920                 return I40E_SUCCESS;
1921         }
1922
1923         /* get list of packet classification types */
1924         if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1925                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1926                 struct rte_pmd_i40e_ptype_info *pinfo;
1927                 struct i40e_profile_section_header *pctype;
1928                 struct i40e_profile_tlv_section_record *tlv;
1929
1930                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1931                 nb_proto_info = info_size /
1932                                         sizeof(struct rte_pmd_i40e_ptype_info);
1933                 for (i = 0; i < nb_proto_info; i++)
1934                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1935                                sizeof(struct rte_pmd_i40e_ptype_info));
1936                 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1937                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1938                 nb_tlv = i40e_get_tlv_section_size(pctype);
1939                 if (nb_tlv == 0)
1940                         return I40E_SUCCESS;
1941                 if (nb_proto_info < nb_tlv) {
1942                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1943                         return -EINVAL;
1944                 }
1945
1946                 /* get number of records in the section */
1947                 nb_rec = pctype->section.size /
1948                                 sizeof(struct i40e_profile_tlv_section_record);
1949                 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1950                 for (i = j = 0; i < nb_rec; j++) {
1951                         memcpy(&pinfo[j], tlv->data,
1952                                sizeof(struct rte_pmd_i40e_ptype_info));
1953                         i += tlv->len;
1954                         tlv = &tlv[tlv->len];
1955                 }
1956                 return I40E_SUCCESS;
1957         }
1958
1959         /* get number of packet types */
1960         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
1961                 struct i40e_profile_section_header *ptype;
1962
1963                 if (info_size < sizeof(uint32_t)) {
1964                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1965                         return -EINVAL;
1966                 }
1967                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
1968                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1969                 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
1970                 return I40E_SUCCESS;
1971         }
1972
1973         /* get list of packet types */
1974         if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
1975                 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1976                 struct rte_pmd_i40e_ptype_info *pinfo;
1977                 struct i40e_profile_section_header *ptype;
1978                 struct i40e_profile_tlv_section_record *tlv;
1979
1980                 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1981                 nb_proto_info = info_size /
1982                                         sizeof(struct rte_pmd_i40e_ptype_info);
1983                 for (i = 0; i < nb_proto_info; i++)
1984                         memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1985                                sizeof(struct rte_pmd_i40e_ptype_info));
1986                 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
1987                                 (struct i40e_profile_segment *)i40e_seg_hdr);
1988                 nb_tlv = i40e_get_tlv_section_size(ptype);
1989                 if (nb_tlv == 0)
1990                         return I40E_SUCCESS;
1991                 if (nb_proto_info < nb_tlv) {
1992                         PMD_DRV_LOG(ERR, "Invalid information buffer size");
1993                         return -EINVAL;
1994                 }
1995                 /* get number of records in the section */
1996                 nb_rec = ptype->section.size /
1997                                 sizeof(struct i40e_profile_tlv_section_record);
1998                 for (i = j = 0; i < nb_rec; j++) {
1999                         tlv = (struct i40e_profile_tlv_section_record *)
2000                                                                 &ptype[1 + i];
2001                         memcpy(&pinfo[j], tlv->data,
2002                                sizeof(struct rte_pmd_i40e_ptype_info));
2003                         i += tlv->len;
2004                 }
2005                 return I40E_SUCCESS;
2006         }
2007
2008         PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2009         return -EINVAL;
2010 }
2011
2012 int
2013 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2014 {
2015         struct rte_eth_dev *dev;
2016         struct i40e_hw *hw;
2017         enum i40e_status_code status = I40E_SUCCESS;
2018
2019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2020
2021         dev = &rte_eth_devices[port];
2022
2023         if (!is_i40e_supported(dev))
2024                 return -ENOTSUP;
2025
2026         if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2027                 return -EINVAL;
2028
2029         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2030
2031         status = i40e_aq_get_ddp_list(hw, (void *)buff,
2032                                       size, 0, NULL);
2033
2034         return status;
2035 }
2036
2037 static int check_invalid_pkt_type(uint32_t pkt_type)
2038 {
2039         uint32_t l2, l3, l4, tnl, il2, il3, il4;
2040
2041         l2 = pkt_type & RTE_PTYPE_L2_MASK;
2042         l3 = pkt_type & RTE_PTYPE_L3_MASK;
2043         l4 = pkt_type & RTE_PTYPE_L4_MASK;
2044         tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2045         il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2046         il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2047         il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2048
2049         if (l2 &&
2050             l2 != RTE_PTYPE_L2_ETHER &&
2051             l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2052             l2 != RTE_PTYPE_L2_ETHER_ARP &&
2053             l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2054             l2 != RTE_PTYPE_L2_ETHER_NSH &&
2055             l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2056             l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2057             l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2058                 return -1;
2059
2060         if (l3 &&
2061             l3 != RTE_PTYPE_L3_IPV4 &&
2062             l3 != RTE_PTYPE_L3_IPV4_EXT &&
2063             l3 != RTE_PTYPE_L3_IPV6 &&
2064             l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2065             l3 != RTE_PTYPE_L3_IPV6_EXT &&
2066             l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2067                 return -1;
2068
2069         if (l4 &&
2070             l4 != RTE_PTYPE_L4_TCP &&
2071             l4 != RTE_PTYPE_L4_UDP &&
2072             l4 != RTE_PTYPE_L4_FRAG &&
2073             l4 != RTE_PTYPE_L4_SCTP &&
2074             l4 != RTE_PTYPE_L4_ICMP &&
2075             l4 != RTE_PTYPE_L4_NONFRAG)
2076                 return -1;
2077
2078         if (tnl &&
2079             tnl != RTE_PTYPE_TUNNEL_IP &&
2080             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2081             tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2082             tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2083             tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2084             tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2085             tnl != RTE_PTYPE_TUNNEL_GTPC &&
2086             tnl != RTE_PTYPE_TUNNEL_GTPU &&
2087             tnl != RTE_PTYPE_TUNNEL_L2TP)
2088                 return -1;
2089
2090         if (il2 &&
2091             il2 != RTE_PTYPE_INNER_L2_ETHER &&
2092             il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2093             il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2094                 return -1;
2095
2096         if (il3 &&
2097             il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2098             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2099             il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2100             il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2101             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2102             il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2103                 return -1;
2104
2105         if (il4 &&
2106             il4 != RTE_PTYPE_INNER_L4_TCP &&
2107             il4 != RTE_PTYPE_INNER_L4_UDP &&
2108             il4 != RTE_PTYPE_INNER_L4_FRAG &&
2109             il4 != RTE_PTYPE_INNER_L4_SCTP &&
2110             il4 != RTE_PTYPE_INNER_L4_ICMP &&
2111             il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2112                 return -1;
2113
2114         return 0;
2115 }
2116
2117 static int check_invalid_ptype_mapping(
2118                 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2119                 uint16_t count)
2120 {
2121         int i;
2122
2123         for (i = 0; i < count; i++) {
2124                 uint16_t ptype = mapping_table[i].hw_ptype;
2125                 uint32_t pkt_type = mapping_table[i].sw_ptype;
2126
2127                 if (ptype >= I40E_MAX_PKT_TYPE)
2128                         return -1;
2129
2130                 if (pkt_type == RTE_PTYPE_UNKNOWN)
2131                         continue;
2132
2133                 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2134                         continue;
2135
2136                 if (check_invalid_pkt_type(pkt_type))
2137                         return -1;
2138         }
2139
2140         return 0;
2141 }
2142
2143 int
2144 rte_pmd_i40e_ptype_mapping_update(
2145                         uint16_t port,
2146                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2147                         uint16_t count,
2148                         uint8_t exclusive)
2149 {
2150         struct rte_eth_dev *dev;
2151         struct i40e_adapter *ad;
2152         int i;
2153
2154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2155
2156         dev = &rte_eth_devices[port];
2157
2158         if (!is_i40e_supported(dev))
2159                 return -ENOTSUP;
2160
2161         if (count > I40E_MAX_PKT_TYPE)
2162                 return -EINVAL;
2163
2164         if (check_invalid_ptype_mapping(mapping_items, count))
2165                 return -EINVAL;
2166
2167         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2168
2169         if (exclusive) {
2170                 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2171                         ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2172         }
2173
2174         for (i = 0; i < count; i++)
2175                 ad->ptype_tbl[mapping_items[i].hw_ptype]
2176                         = mapping_items[i].sw_ptype;
2177
2178         return 0;
2179 }
2180
2181 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2182 {
2183         struct rte_eth_dev *dev;
2184
2185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2186
2187         dev = &rte_eth_devices[port];
2188
2189         if (!is_i40e_supported(dev))
2190                 return -ENOTSUP;
2191
2192         i40e_set_default_ptype_table(dev);
2193
2194         return 0;
2195 }
2196
2197 int rte_pmd_i40e_ptype_mapping_get(
2198                         uint16_t port,
2199                         struct rte_pmd_i40e_ptype_mapping *mapping_items,
2200                         uint16_t size,
2201                         uint16_t *count,
2202                         uint8_t valid_only)
2203 {
2204         struct rte_eth_dev *dev;
2205         struct i40e_adapter *ad;
2206         int n = 0;
2207         uint16_t i;
2208
2209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2210
2211         dev = &rte_eth_devices[port];
2212
2213         if (!is_i40e_supported(dev))
2214                 return -ENOTSUP;
2215
2216         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2217
2218         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2219                 if (n >= size)
2220                         break;
2221                 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2222                         continue;
2223                 mapping_items[n].hw_ptype = i;
2224                 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2225                 n++;
2226         }
2227
2228         *count = n;
2229         return 0;
2230 }
2231
2232 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2233                                        uint32_t target,
2234                                        uint8_t mask,
2235                                        uint32_t pkt_type)
2236 {
2237         struct rte_eth_dev *dev;
2238         struct i40e_adapter *ad;
2239         uint16_t i;
2240
2241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2242
2243         dev = &rte_eth_devices[port];
2244
2245         if (!is_i40e_supported(dev))
2246                 return -ENOTSUP;
2247
2248         if (!mask && check_invalid_pkt_type(target))
2249                 return -EINVAL;
2250
2251         if (check_invalid_pkt_type(pkt_type))
2252                 return -EINVAL;
2253
2254         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2255
2256         for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2257                 if (mask) {
2258                         if ((target | ad->ptype_tbl[i]) == target &&
2259                             (target & ad->ptype_tbl[i]))
2260                                 ad->ptype_tbl[i] = pkt_type;
2261                 } else {
2262                         if (ad->ptype_tbl[i] == target)
2263                                 ad->ptype_tbl[i] = pkt_type;
2264                 }
2265         }
2266
2267         return 0;
2268 }
2269
2270 int
2271 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2272                              struct ether_addr *mac_addr)
2273 {
2274         struct rte_eth_dev *dev;
2275         struct i40e_pf_vf *vf;
2276         struct i40e_vsi *vsi;
2277         struct i40e_pf *pf;
2278         struct i40e_mac_filter_info mac_filter;
2279         int ret;
2280
2281         if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2282                 return -EINVAL;
2283
2284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2285
2286         dev = &rte_eth_devices[port];
2287
2288         if (!is_i40e_supported(dev))
2289                 return -ENOTSUP;
2290
2291         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2292
2293         if (vf_id >= pf->vf_num || !pf->vfs)
2294                 return -EINVAL;
2295
2296         vf = &pf->vfs[vf_id];
2297         vsi = vf->vsi;
2298         if (!vsi) {
2299                 PMD_DRV_LOG(ERR, "Invalid VSI.");
2300                 return -EINVAL;
2301         }
2302
2303         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2304         ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2305         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2306         if (ret != I40E_SUCCESS) {
2307                 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2308                 return -1;
2309         }
2310
2311         return 0;
2312 }
2313
2314 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2315 {
2316         struct rte_eth_dev *dev;
2317
2318         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2319
2320         dev = &rte_eth_devices[port];
2321
2322         if (!is_i40e_supported(dev))
2323                 return -ENOTSUP;
2324
2325         i40e_set_default_pctype_table(dev);
2326
2327         return 0;
2328 }
2329
2330 int rte_pmd_i40e_flow_type_mapping_get(
2331                         uint16_t port,
2332                         struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2333 {
2334         struct rte_eth_dev *dev;
2335         struct i40e_adapter *ad;
2336         uint16_t i;
2337
2338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2339
2340         dev = &rte_eth_devices[port];
2341
2342         if (!is_i40e_supported(dev))
2343                 return -ENOTSUP;
2344
2345         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2346
2347         for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2348                 mapping_items[i].flow_type = i;
2349                 mapping_items[i].pctype = ad->pctypes_tbl[i];
2350         }
2351
2352         return 0;
2353 }
2354
2355 int
2356 rte_pmd_i40e_flow_type_mapping_update(
2357                         uint16_t port,
2358                         struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2359                         uint16_t count,
2360                         uint8_t exclusive)
2361 {
2362         struct rte_eth_dev *dev;
2363         struct i40e_adapter *ad;
2364         int i;
2365
2366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2367
2368         dev = &rte_eth_devices[port];
2369
2370         if (!is_i40e_supported(dev))
2371                 return -ENOTSUP;
2372
2373         if (count > I40E_FLOW_TYPE_MAX)
2374                 return -EINVAL;
2375
2376         for (i = 0; i < count; i++)
2377                 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2378                     mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2379                     (mapping_items[i].pctype &
2380                     (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2381                         return -EINVAL;
2382
2383         ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2384
2385         if (exclusive) {
2386                 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2387                         ad->pctypes_tbl[i] = 0ULL;
2388                 ad->flow_types_mask = 0ULL;
2389         }
2390
2391         for (i = 0; i < count; i++) {
2392                 ad->pctypes_tbl[mapping_items[i].flow_type] =
2393                                                 mapping_items[i].pctype;
2394                 if (mapping_items[i].pctype)
2395                         ad->flow_types_mask |=
2396                                         (1ULL << mapping_items[i].flow_type);
2397                 else
2398                         ad->flow_types_mask &=
2399                                         ~(1ULL << mapping_items[i].flow_type);
2400         }
2401
2402         for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2403                 ad->pctypes_mask |= ad->pctypes_tbl[i];
2404
2405         return 0;
2406 }
2407
2408 int
2409 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2410 {
2411         struct rte_eth_dev *dev;
2412         struct ether_addr *mac;
2413         struct i40e_pf *pf;
2414         int vf_id;
2415         struct i40e_pf_vf *vf;
2416         uint16_t vf_num;
2417
2418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2419         dev = &rte_eth_devices[port];
2420
2421         if (!is_i40e_supported(dev))
2422                 return -ENOTSUP;
2423
2424         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2425         vf_num = pf->vf_num;
2426
2427         for (vf_id = 0; vf_id < vf_num; vf_id++) {
2428                 vf = &pf->vfs[vf_id];
2429                 mac = &vf->mac_addr;
2430
2431                 if (is_same_ether_addr(mac, vf_mac))
2432                         return vf_id;
2433         }
2434
2435         return -EINVAL;
2436 }
2437
2438 static int
2439 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2440                               struct i40e_pf *pf)
2441 {
2442         uint16_t i;
2443         struct i40e_vsi *vsi = pf->main_vsi;
2444         uint16_t queue_offset, bsf, tc_index;
2445         struct i40e_vsi_context ctxt;
2446         struct i40e_aqc_vsi_properties_data *vsi_info;
2447         struct i40e_queue_regions *region_info =
2448                                 &pf->queue_region;
2449         int32_t ret = -EINVAL;
2450
2451         if (!region_info->queue_region_number) {
2452                 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2453                 return ret;
2454         }
2455
2456         memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2457
2458         /* Update Queue Pairs Mapping for currently enabled UPs */
2459         ctxt.seid = vsi->seid;
2460         ctxt.pf_num = hw->pf_id;
2461         ctxt.vf_num = 0;
2462         ctxt.uplink_seid = vsi->uplink_seid;
2463         ctxt.info = vsi->info;
2464         vsi_info = &ctxt.info;
2465
2466         memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2467         memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2468
2469         /* Configure queue region and queue mapping parameters,
2470          * for enabled queue region, allocate queues to this region.
2471          */
2472
2473         for (i = 0; i < region_info->queue_region_number; i++) {
2474                 tc_index = region_info->region[i].region_id;
2475                 bsf = rte_bsf32(region_info->region[i].queue_num);
2476                 queue_offset = region_info->region[i].queue_start_index;
2477                 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2478                         (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2479                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2480         }
2481
2482         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2483         vsi_info->mapping_flags |=
2484                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2485         vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2486         vsi_info->valid_sections |=
2487                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2488
2489         /* Update the VSI after updating the VSI queue-mapping information */
2490         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2491         if (ret) {
2492                 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2493                                 hw->aq.asq_last_status);
2494                 return ret;
2495         }
2496         /* update the local VSI info with updated queue map */
2497         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2498                                         sizeof(vsi->info.tc_mapping));
2499         rte_memcpy(&vsi->info.queue_mapping,
2500                         &ctxt.info.queue_mapping,
2501                         sizeof(vsi->info.queue_mapping));
2502         vsi->info.mapping_flags = ctxt.info.mapping_flags;
2503         vsi->info.valid_sections = 0;
2504
2505         return 0;
2506 }
2507
2508
2509 static int
2510 i40e_queue_region_set_region(struct i40e_pf *pf,
2511                                 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2512 {
2513         uint16_t i;
2514         struct i40e_vsi *main_vsi = pf->main_vsi;
2515         struct i40e_queue_regions *info = &pf->queue_region;
2516         int32_t ret = -EINVAL;
2517
2518         if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2519                                 conf_ptr->queue_num <= 64)) {
2520                 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2521                         "total number of queues do not exceed the VSI allocation");
2522                 return ret;
2523         }
2524
2525         if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2526                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2527                 return ret;
2528         }
2529
2530         if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2531                                         > main_vsi->nb_used_qps) {
2532                 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2533                 return ret;
2534         }
2535
2536         for (i = 0; i < info->queue_region_number; i++)
2537                 if (conf_ptr->region_id == info->region[i].region_id)
2538                         break;
2539
2540         if (i == info->queue_region_number &&
2541                                 i <= I40E_REGION_MAX_INDEX) {
2542                 info->region[i].region_id = conf_ptr->region_id;
2543                 info->region[i].queue_num = conf_ptr->queue_num;
2544                 info->region[i].queue_start_index =
2545                         conf_ptr->queue_start_index;
2546                 info->queue_region_number++;
2547         } else {
2548                 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2549                 return ret;
2550         }
2551
2552         return 0;
2553 }
2554
2555 static int
2556 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2557                         struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2558 {
2559         int32_t ret = -EINVAL;
2560         struct i40e_queue_regions *info = &pf->queue_region;
2561         uint16_t i, j;
2562         uint16_t region_index, flowtype_index;
2563
2564         /* For the pctype or hardware flowtype of packet,
2565          * the specific index for each type has been defined
2566          * in file i40e_type.h as enum i40e_filter_pctype.
2567          */
2568
2569         if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2570                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2571                 return ret;
2572         }
2573
2574         if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2575                 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2576                 return ret;
2577         }
2578
2579
2580         for (i = 0; i < info->queue_region_number; i++)
2581                 if (rss_region_conf->region_id == info->region[i].region_id)
2582                         break;
2583
2584         if (i == info->queue_region_number) {
2585                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2586                 ret = -EINVAL;
2587                 return ret;
2588         }
2589         region_index = i;
2590
2591         for (i = 0; i < info->queue_region_number; i++) {
2592                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2593                         if (rss_region_conf->hw_flowtype ==
2594                                 info->region[i].hw_flowtype[j]) {
2595                                 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2596                                 return 0;
2597                         }
2598                 }
2599         }
2600
2601         flowtype_index = info->region[region_index].flowtype_num;
2602         info->region[region_index].hw_flowtype[flowtype_index] =
2603                                         rss_region_conf->hw_flowtype;
2604         info->region[region_index].flowtype_num++;
2605
2606         return 0;
2607 }
2608
2609 static void
2610 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2611                                 struct i40e_pf *pf)
2612 {
2613         uint8_t hw_flowtype;
2614         uint32_t pfqf_hregion;
2615         uint16_t i, j, index;
2616         struct i40e_queue_regions *info = &pf->queue_region;
2617
2618         /* For the pctype or hardware flowtype of packet,
2619          * the specific index for each type has been defined
2620          * in file i40e_type.h as enum i40e_filter_pctype.
2621          */
2622
2623         for (i = 0; i < info->queue_region_number; i++) {
2624                 for (j = 0; j < info->region[i].flowtype_num; j++) {
2625                         hw_flowtype = info->region[i].hw_flowtype[j];
2626                         index = hw_flowtype >> 3;
2627                         pfqf_hregion =
2628                                 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2629
2630                         if ((hw_flowtype & 0x7) == 0) {
2631                                 pfqf_hregion |= info->region[i].region_id <<
2632                                         I40E_PFQF_HREGION_REGION_0_SHIFT;
2633                                 pfqf_hregion |= 1 <<
2634                                         I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2635                         } else if ((hw_flowtype & 0x7) == 1) {
2636                                 pfqf_hregion |= info->region[i].region_id  <<
2637                                         I40E_PFQF_HREGION_REGION_1_SHIFT;
2638                                 pfqf_hregion |= 1 <<
2639                                         I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2640                         } else if ((hw_flowtype & 0x7) == 2) {
2641                                 pfqf_hregion |= info->region[i].region_id  <<
2642                                         I40E_PFQF_HREGION_REGION_2_SHIFT;
2643                                 pfqf_hregion |= 1 <<
2644                                         I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2645                         } else if ((hw_flowtype & 0x7) == 3) {
2646                                 pfqf_hregion |= info->region[i].region_id  <<
2647                                         I40E_PFQF_HREGION_REGION_3_SHIFT;
2648                                 pfqf_hregion |= 1 <<
2649                                         I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2650                         } else if ((hw_flowtype & 0x7) == 4) {
2651                                 pfqf_hregion |= info->region[i].region_id  <<
2652                                         I40E_PFQF_HREGION_REGION_4_SHIFT;
2653                                 pfqf_hregion |= 1 <<
2654                                         I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2655                         } else if ((hw_flowtype & 0x7) == 5) {
2656                                 pfqf_hregion |= info->region[i].region_id  <<
2657                                         I40E_PFQF_HREGION_REGION_5_SHIFT;
2658                                 pfqf_hregion |= 1 <<
2659                                         I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2660                         } else if ((hw_flowtype & 0x7) == 6) {
2661                                 pfqf_hregion |= info->region[i].region_id  <<
2662                                         I40E_PFQF_HREGION_REGION_6_SHIFT;
2663                                 pfqf_hregion |= 1 <<
2664                                         I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2665                         } else {
2666                                 pfqf_hregion |= info->region[i].region_id  <<
2667                                         I40E_PFQF_HREGION_REGION_7_SHIFT;
2668                                 pfqf_hregion |= 1 <<
2669                                         I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2670                         }
2671
2672                         i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2673                                                 pfqf_hregion);
2674                 }
2675         }
2676 }
2677
2678 static int
2679 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2680                 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2681 {
2682         struct i40e_queue_regions *info = &pf->queue_region;
2683         int32_t ret = -EINVAL;
2684         uint16_t i, j, region_index;
2685
2686         if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2687                 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2688                 return ret;
2689         }
2690
2691         if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2692                 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2693                 return ret;
2694         }
2695
2696         for (i = 0; i < info->queue_region_number; i++)
2697                 if (rss_region_conf->region_id == info->region[i].region_id)
2698                         break;
2699
2700         if (i == info->queue_region_number) {
2701                 PMD_DRV_LOG(ERR, "that region id has not been set before");
2702                 ret = -EINVAL;
2703                 return ret;
2704         }
2705
2706         region_index = i;
2707
2708         for (i = 0; i < info->queue_region_number; i++) {
2709                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2710                         if (info->region[i].user_priority[j] ==
2711                                 rss_region_conf->user_priority) {
2712                                 PMD_DRV_LOG(ERR, "that user priority has been set before");
2713                                 return 0;
2714                         }
2715                 }
2716         }
2717
2718         j = info->region[region_index].user_priority_num;
2719         info->region[region_index].user_priority[j] =
2720                                         rss_region_conf->user_priority;
2721         info->region[region_index].user_priority_num++;
2722
2723         return 0;
2724 }
2725
2726 static int
2727 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2728                                 struct i40e_pf *pf)
2729 {
2730         struct i40e_dcbx_config dcb_cfg_local;
2731         struct i40e_dcbx_config *dcb_cfg;
2732         struct i40e_queue_regions *info = &pf->queue_region;
2733         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2734         int32_t ret = -EINVAL;
2735         uint16_t i, j, prio_index, region_index;
2736         uint8_t tc_map, tc_bw, bw_lf;
2737
2738         if (!info->queue_region_number) {
2739                 PMD_DRV_LOG(ERR, "No queue region been set before");
2740                 return ret;
2741         }
2742
2743         dcb_cfg = &dcb_cfg_local;
2744         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2745
2746         /* assume each tc has the same bw */
2747         tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2748         for (i = 0; i < info->queue_region_number; i++)
2749                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2750         /* to ensure the sum of tcbw is equal to 100 */
2751         bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2752         for (i = 0; i < bw_lf; i++)
2753                 dcb_cfg->etscfg.tcbwtable[i]++;
2754
2755         /* assume each tc has the same Transmission Selection Algorithm */
2756         for (i = 0; i < info->queue_region_number; i++)
2757                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2758
2759         for (i = 0; i < info->queue_region_number; i++) {
2760                 for (j = 0; j < info->region[i].user_priority_num; j++) {
2761                         prio_index = info->region[i].user_priority[j];
2762                         region_index = info->region[i].region_id;
2763                         dcb_cfg->etscfg.prioritytable[prio_index] =
2764                                                 region_index;
2765                 }
2766         }
2767
2768         /* FW needs one App to configure HW */
2769         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2770         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2771         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2772         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2773
2774         tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2775
2776         dcb_cfg->pfc.willing = 0;
2777         dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2778         dcb_cfg->pfc.pfcenable = tc_map;
2779
2780         /* Copy the new config to the current config */
2781         *old_cfg = *dcb_cfg;
2782         old_cfg->etsrec = old_cfg->etscfg;
2783         ret = i40e_set_dcb_config(hw);
2784
2785         if (ret) {
2786                 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2787                          i40e_stat_str(hw, ret),
2788                          i40e_aq_str(hw, hw->aq.asq_last_status));
2789                 return ret;
2790         }
2791
2792         return 0;
2793 }
2794
2795 int
2796 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2797         struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2798 {
2799         int32_t ret = -EINVAL;
2800         struct i40e_queue_regions *info = &pf->queue_region;
2801         struct i40e_vsi *main_vsi = pf->main_vsi;
2802
2803         if (on) {
2804                 i40e_queue_region_pf_flowtype_conf(hw, pf);
2805
2806                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2807                 if (ret != I40E_SUCCESS) {
2808                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2809                         return ret;
2810                 }
2811
2812                 ret = i40e_queue_region_dcb_configure(hw, pf);
2813                 if (ret != I40E_SUCCESS) {
2814                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2815                         return ret;
2816                 }
2817
2818                 return 0;
2819         }
2820
2821         if (info->queue_region_number) {
2822                 info->queue_region_number = 1;
2823                 info->region[0].queue_num = main_vsi->nb_used_qps;
2824                 info->region[0].queue_start_index = 0;
2825
2826                 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2827                 if (ret != I40E_SUCCESS)
2828                         PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2829
2830                 ret = i40e_dcb_init_configure(dev, TRUE);
2831                 if (ret != I40E_SUCCESS) {
2832                         PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2833                         pf->flags &= ~I40E_FLAG_DCB;
2834                 }
2835
2836                 i40e_init_queue_region_conf(dev);
2837         }
2838         return 0;
2839 }
2840
2841 static int
2842 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2843 {
2844         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2845         uint64_t hena;
2846
2847         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2848         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2849
2850         if (!hena)
2851                 return -ENOTSUP;
2852
2853         return 0;
2854 }
2855
2856 static int
2857 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2858                 struct i40e_queue_regions *regions_ptr)
2859 {
2860         struct i40e_queue_regions *info = &pf->queue_region;
2861
2862         rte_memcpy(regions_ptr, info,
2863                         sizeof(struct i40e_queue_regions));
2864
2865         return 0;
2866 }
2867
2868 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2869                 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2870 {
2871         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2872         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2873         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2874         int32_t ret;
2875
2876         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2877
2878         if (!is_i40e_supported(dev))
2879                 return -ENOTSUP;
2880
2881         if (!(!i40e_queue_region_pf_check_rss(pf)))
2882                 return -ENOTSUP;
2883
2884         /* This queue region feature only support pf by now. It should
2885          * be called after dev_start, and will be clear after dev_stop.
2886          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2887          * is just an enable function which server for other configuration,
2888          * it is for all configuration about queue region from up layer,
2889          * at first will only keep in DPDK softwarestored in driver,
2890          * only after "FLUSH_ON", it commit all configuration to HW.
2891          * Because PMD had to set hardware configuration at a time, so
2892          * it will record all up layer command at first.
2893          * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2894          * just clean all configuration about queue region just now,
2895          * and restore all to DPDK i40e driver default
2896          * config when start up.
2897          */
2898
2899         switch (op_type) {
2900         case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2901                 ret = i40e_queue_region_set_region(pf,
2902                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2903                 break;
2904         case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2905                 ret = i40e_queue_region_set_flowtype(pf,
2906                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2907                 break;
2908         case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2909                 ret = i40e_queue_region_set_user_priority(pf,
2910                                 (struct rte_pmd_i40e_queue_region_conf *)arg);
2911                 break;
2912         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2913                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2914                 break;
2915         case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2916                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2917                 break;
2918         case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2919                 ret = i40e_queue_region_get_all_info(pf,
2920                                 (struct i40e_queue_regions *)arg);
2921                 break;
2922         default:
2923                 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2924                             op_type);
2925                 ret = -EINVAL;
2926         }
2927
2928         I40E_WRITE_FLUSH(hw);
2929
2930         return ret;
2931 }
2932
2933 int rte_pmd_i40e_flow_add_del_packet_template(
2934                         uint16_t port,
2935                         const struct rte_pmd_i40e_pkt_template_conf *conf,
2936                         uint8_t add)
2937 {
2938         struct rte_eth_dev *dev = &rte_eth_devices[port];
2939         struct i40e_fdir_filter_conf filter_conf;
2940
2941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2942
2943         if (!is_i40e_supported(dev))
2944                 return -ENOTSUP;
2945
2946         memset(&filter_conf, 0, sizeof(filter_conf));
2947         filter_conf.soft_id = conf->soft_id;
2948         filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
2949         filter_conf.input.flow.raw_flow.packet = conf->input.packet;
2950         filter_conf.input.flow.raw_flow.length = conf->input.length;
2951         filter_conf.input.flow_ext.pkt_template = true;
2952
2953         filter_conf.action.rx_queue = conf->action.rx_queue;
2954         filter_conf.action.behavior =
2955                 (enum i40e_fdir_behavior)conf->action.behavior;
2956         filter_conf.action.report_status =
2957                 (enum i40e_fdir_status)conf->action.report_status;
2958         filter_conf.action.flex_off = conf->action.flex_off;
2959
2960         return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
2961 }
2962
2963 int
2964 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
2965                        struct rte_pmd_i40e_inset *inset,
2966                        enum rte_pmd_i40e_inset_type inset_type)
2967 {
2968         struct rte_eth_dev *dev;
2969         struct i40e_hw *hw;
2970         uint64_t inset_reg;
2971         uint32_t mask_reg[2];
2972         int i;
2973
2974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2975
2976         dev = &rte_eth_devices[port];
2977
2978         if (!is_i40e_supported(dev))
2979                 return -ENOTSUP;
2980
2981         if (pctype > 63)
2982                 return -EINVAL;
2983
2984         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2985         memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
2986
2987         switch (inset_type) {
2988         case INSET_HASH:
2989                 /* Get input set */
2990                 inset_reg =
2991                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
2992                 inset_reg <<= I40E_32_BIT_WIDTH;
2993                 inset_reg |=
2994                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
2995                 /* Get field mask */
2996                 mask_reg[0] =
2997                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
2998                 mask_reg[1] =
2999                         i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3000                 break;
3001         case INSET_FDIR:
3002                 inset_reg =
3003                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3004                 inset_reg <<= I40E_32_BIT_WIDTH;
3005                 inset_reg |=
3006                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3007                 mask_reg[0] =
3008                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3009                 mask_reg[1] =
3010                         i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3011                 break;
3012         case INSET_FDIR_FLX:
3013                 inset_reg =
3014                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3015                 mask_reg[0] =
3016                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3017                 mask_reg[1] =
3018                         i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3019                 break;
3020         default:
3021                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3022                 return -EINVAL;
3023         }
3024
3025         inset->inset = inset_reg;
3026
3027         for (i = 0; i < 2; i++) {
3028                 inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3029                 inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3030         }
3031
3032         return 0;
3033 }
3034
3035 int
3036 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3037                        struct rte_pmd_i40e_inset *inset,
3038                        enum rte_pmd_i40e_inset_type inset_type)
3039 {
3040         struct rte_eth_dev *dev;
3041         struct i40e_hw *hw;
3042         uint64_t inset_reg;
3043         uint32_t mask_reg[2];
3044         int i;
3045
3046         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3047
3048         dev = &rte_eth_devices[port];
3049
3050         if (!is_i40e_supported(dev))
3051                 return -ENOTSUP;
3052
3053         if (pctype > 63)
3054                 return -EINVAL;
3055
3056         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3057
3058         /* Clear mask first */
3059         for (i = 0; i < 2; i++)
3060                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
3061
3062         inset_reg = inset->inset;
3063         for (i = 0; i < 2; i++)
3064                 mask_reg[i] = (inset->mask[i].field_idx << 16) |
3065                         inset->mask[i].mask;
3066
3067         switch (inset_type) {
3068         case INSET_HASH:
3069                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3070                                      (uint32_t)(inset_reg & UINT32_MAX));
3071                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3072                                      (uint32_t)((inset_reg >>
3073                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3074                 for (i = 0; i < 2; i++)
3075                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
3076                                              mask_reg[i]);
3077                 break;
3078         case INSET_FDIR:
3079                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3080                                      (uint32_t)(inset_reg & UINT32_MAX));
3081                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3082                                      (uint32_t)((inset_reg >>
3083                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
3084                 for (i = 0; i < 2; i++)
3085                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
3086                                              mask_reg[i]);
3087                 break;
3088         case INSET_FDIR_FLX:
3089                 i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3090                                      (uint32_t)(inset_reg & UINT32_MAX));
3091                 for (i = 0; i < 2; i++)
3092                         i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3093                                              mask_reg[i]);
3094                 break;
3095         default:
3096                 PMD_DRV_LOG(ERR, "Unsupported input set type.");
3097                 return -EINVAL;
3098         }
3099
3100         I40E_WRITE_FLUSH(hw);
3101         return 0;
3102 }