app/testpmd: fix creating E-Tag and NVGRE flow rules
[dpdk.git] / app / test-pmd / config.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   Copyright 2013-2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <stdarg.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <stdarg.h>
40 #include <stdint.h>
41 #include <inttypes.h>
42
43 #include <sys/queue.h>
44
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
47 #include <rte_debug.h>
48 #include <rte_log.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_launch.h>
53 #include <rte_eal.h>
54 #include <rte_per_lcore.h>
55 #include <rte_lcore.h>
56 #include <rte_atomic.h>
57 #include <rte_branch_prediction.h>
58 #include <rte_mempool.h>
59 #include <rte_mbuf.h>
60 #include <rte_interrupts.h>
61 #include <rte_pci.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_string_fns.h>
65 #include <rte_cycles.h>
66 #include <rte_flow.h>
67 #include <rte_errno.h>
68 #ifdef RTE_LIBRTE_IXGBE_PMD
69 #include <rte_pmd_ixgbe.h>
70 #endif
71
72 #include "testpmd.h"
73
74 static char *flowtype_to_str(uint16_t flow_type);
75
76 static const struct {
77         enum tx_pkt_split split;
78         const char *name;
79 } tx_split_name[] = {
80         {
81                 .split = TX_PKT_SPLIT_OFF,
82                 .name = "off",
83         },
84         {
85                 .split = TX_PKT_SPLIT_ON,
86                 .name = "on",
87         },
88         {
89                 .split = TX_PKT_SPLIT_RND,
90                 .name = "rand",
91         },
92 };
93
94 struct rss_type_info {
95         char str[32];
96         uint64_t rss_type;
97 };
98
99 static const struct rss_type_info rss_type_table[] = {
100         { "ipv4", ETH_RSS_IPV4 },
101         { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
102         { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
103         { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
104         { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
105         { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
106         { "ipv6", ETH_RSS_IPV6 },
107         { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
108         { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
109         { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
110         { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
111         { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
112         { "l2-payload", ETH_RSS_L2_PAYLOAD },
113         { "ipv6-ex", ETH_RSS_IPV6_EX },
114         { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
115         { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
116         { "port", ETH_RSS_PORT },
117         { "vxlan", ETH_RSS_VXLAN },
118         { "geneve", ETH_RSS_GENEVE },
119         { "nvgre", ETH_RSS_NVGRE },
120
121 };
122
123 static void
124 print_ethaddr(const char *name, struct ether_addr *eth_addr)
125 {
126         char buf[ETHER_ADDR_FMT_SIZE];
127         ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
128         printf("%s%s", name, buf);
129 }
130
131 void
132 nic_stats_display(portid_t port_id)
133 {
134         static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
135         static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
136         static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
137         uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
138         uint64_t mpps_rx, mpps_tx;
139         struct rte_eth_stats stats;
140         struct rte_port *port = &ports[port_id];
141         uint8_t i;
142         portid_t pid;
143
144         static const char *nic_stats_border = "########################";
145
146         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
147                 printf("Valid port range is [0");
148                 RTE_ETH_FOREACH_DEV(pid)
149                         printf(", %d", pid);
150                 printf("]\n");
151                 return;
152         }
153         rte_eth_stats_get(port_id, &stats);
154         printf("\n  %s NIC statistics for port %-2d %s\n",
155                nic_stats_border, port_id, nic_stats_border);
156
157         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
158                 printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
159                        "%-"PRIu64"\n",
160                        stats.ipackets, stats.imissed, stats.ibytes);
161                 printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
162                 printf("  RX-nombuf:  %-10"PRIu64"\n",
163                        stats.rx_nombuf);
164                 printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
165                        "%-"PRIu64"\n",
166                        stats.opackets, stats.oerrors, stats.obytes);
167         }
168         else {
169                 printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
170                        "    RX-bytes: %10"PRIu64"\n",
171                        stats.ipackets, stats.ierrors, stats.ibytes);
172                 printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
173                 printf("  RX-nombuf:               %10"PRIu64"\n",
174                        stats.rx_nombuf);
175                 printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
176                        "    TX-bytes: %10"PRIu64"\n",
177                        stats.opackets, stats.oerrors, stats.obytes);
178         }
179
180         if (port->rx_queue_stats_mapping_enabled) {
181                 printf("\n");
182                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
183                         printf("  Stats reg %2d RX-packets: %10"PRIu64
184                                "    RX-errors: %10"PRIu64
185                                "    RX-bytes: %10"PRIu64"\n",
186                                i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
187                 }
188         }
189         if (port->tx_queue_stats_mapping_enabled) {
190                 printf("\n");
191                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
192                         printf("  Stats reg %2d TX-packets: %10"PRIu64
193                                "                             TX-bytes: %10"PRIu64"\n",
194                                i, stats.q_opackets[i], stats.q_obytes[i]);
195                 }
196         }
197
198         diff_cycles = prev_cycles[port_id];
199         prev_cycles[port_id] = rte_rdtsc();
200         if (diff_cycles > 0)
201                 diff_cycles = prev_cycles[port_id] - diff_cycles;
202
203         diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id];
204         diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id];
205         prev_pkts_rx[port_id] = stats.ipackets;
206         prev_pkts_tx[port_id] = stats.opackets;
207         mpps_rx = diff_cycles > 0 ?
208                 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
209         mpps_tx = diff_cycles > 0 ?
210                 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
211         printf("\n  Throughput (since last show)\n");
212         printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
213                         mpps_rx, mpps_tx);
214
215         printf("  %s############################%s\n",
216                nic_stats_border, nic_stats_border);
217 }
218
219 void
220 nic_stats_clear(portid_t port_id)
221 {
222         portid_t pid;
223
224         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
225                 printf("Valid port range is [0");
226                 RTE_ETH_FOREACH_DEV(pid)
227                         printf(", %d", pid);
228                 printf("]\n");
229                 return;
230         }
231         rte_eth_stats_reset(port_id);
232         printf("\n  NIC statistics for port %d cleared\n", port_id);
233 }
234
235 void
236 nic_xstats_display(portid_t port_id)
237 {
238         struct rte_eth_xstat *xstats;
239         int cnt_xstats, idx_xstat;
240         struct rte_eth_xstat_name *xstats_names;
241
242         printf("###### NIC extended statistics for port %-2d\n", port_id);
243         if (!rte_eth_dev_is_valid_port(port_id)) {
244                 printf("Error: Invalid port number %i\n", port_id);
245                 return;
246         }
247
248         /* Get count */
249         cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
250         if (cnt_xstats  < 0) {
251                 printf("Error: Cannot get count of xstats\n");
252                 return;
253         }
254
255         /* Get id-name lookup table */
256         xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
257         if (xstats_names == NULL) {
258                 printf("Cannot allocate memory for xstats lookup\n");
259                 return;
260         }
261         if (cnt_xstats != rte_eth_xstats_get_names(
262                         port_id, xstats_names, cnt_xstats)) {
263                 printf("Error: Cannot get xstats lookup\n");
264                 free(xstats_names);
265                 return;
266         }
267
268         /* Get stats themselves */
269         xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
270         if (xstats == NULL) {
271                 printf("Cannot allocate memory for xstats\n");
272                 free(xstats_names);
273                 return;
274         }
275         if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
276                 printf("Error: Unable to get xstats\n");
277                 free(xstats_names);
278                 free(xstats);
279                 return;
280         }
281
282         /* Display xstats */
283         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
284                 printf("%s: %"PRIu64"\n",
285                         xstats_names[idx_xstat].name,
286                         xstats[idx_xstat].value);
287         free(xstats_names);
288         free(xstats);
289 }
290
291 void
292 nic_xstats_clear(portid_t port_id)
293 {
294         rte_eth_xstats_reset(port_id);
295 }
296
297 void
298 nic_stats_mapping_display(portid_t port_id)
299 {
300         struct rte_port *port = &ports[port_id];
301         uint16_t i;
302         portid_t pid;
303
304         static const char *nic_stats_mapping_border = "########################";
305
306         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
307                 printf("Valid port range is [0");
308                 RTE_ETH_FOREACH_DEV(pid)
309                         printf(", %d", pid);
310                 printf("]\n");
311                 return;
312         }
313
314         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
315                 printf("Port id %d - either does not support queue statistic mapping or"
316                        " no queue statistic mapping set\n", port_id);
317                 return;
318         }
319
320         printf("\n  %s NIC statistics mapping for port %-2d %s\n",
321                nic_stats_mapping_border, port_id, nic_stats_mapping_border);
322
323         if (port->rx_queue_stats_mapping_enabled) {
324                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
325                         if (rx_queue_stats_mappings[i].port_id == port_id) {
326                                 printf("  RX-queue %2d mapped to Stats Reg %2d\n",
327                                        rx_queue_stats_mappings[i].queue_id,
328                                        rx_queue_stats_mappings[i].stats_counter_id);
329                         }
330                 }
331                 printf("\n");
332         }
333
334
335         if (port->tx_queue_stats_mapping_enabled) {
336                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
337                         if (tx_queue_stats_mappings[i].port_id == port_id) {
338                                 printf("  TX-queue %2d mapped to Stats Reg %2d\n",
339                                        tx_queue_stats_mappings[i].queue_id,
340                                        tx_queue_stats_mappings[i].stats_counter_id);
341                         }
342                 }
343         }
344
345         printf("  %s####################################%s\n",
346                nic_stats_mapping_border, nic_stats_mapping_border);
347 }
348
349 void
350 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
351 {
352         struct rte_eth_rxq_info qinfo;
353         int32_t rc;
354         static const char *info_border = "*********************";
355
356         rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
357         if (rc != 0) {
358                 printf("Failed to retrieve information for port: %hhu, "
359                         "RX queue: %hu\nerror desc: %s(%d)\n",
360                         port_id, queue_id, strerror(-rc), rc);
361                 return;
362         }
363
364         printf("\n%s Infos for port %-2u, RX queue %-2u %s",
365                info_border, port_id, queue_id, info_border);
366
367         printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
368         printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
369         printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
370         printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
371         printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
372         printf("\nRX drop packets: %s",
373                 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
374         printf("\nRX deferred start: %s",
375                 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
376         printf("\nRX scattered packets: %s",
377                 (qinfo.scattered_rx != 0) ? "on" : "off");
378         printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
379         printf("\n");
380 }
381
382 void
383 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
384 {
385         struct rte_eth_txq_info qinfo;
386         int32_t rc;
387         static const char *info_border = "*********************";
388
389         rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
390         if (rc != 0) {
391                 printf("Failed to retrieve information for port: %hhu, "
392                         "TX queue: %hu\nerror desc: %s(%d)\n",
393                         port_id, queue_id, strerror(-rc), rc);
394                 return;
395         }
396
397         printf("\n%s Infos for port %-2u, TX queue %-2u %s",
398                info_border, port_id, queue_id, info_border);
399
400         printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
401         printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
402         printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
403         printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
404         printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
405         printf("\nTX flags: %#x", qinfo.conf.txq_flags);
406         printf("\nTX deferred start: %s",
407                 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
408         printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
409         printf("\n");
410 }
411
412 void
413 port_infos_display(portid_t port_id)
414 {
415         struct rte_port *port;
416         struct ether_addr mac_addr;
417         struct rte_eth_link link;
418         struct rte_eth_dev_info dev_info;
419         int vlan_offload;
420         struct rte_mempool * mp;
421         static const char *info_border = "*********************";
422         portid_t pid;
423         uint16_t mtu;
424
425         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
426                 printf("Valid port range is [0");
427                 RTE_ETH_FOREACH_DEV(pid)
428                         printf(", %d", pid);
429                 printf("]\n");
430                 return;
431         }
432         port = &ports[port_id];
433         rte_eth_link_get_nowait(port_id, &link);
434         memset(&dev_info, 0, sizeof(dev_info));
435         rte_eth_dev_info_get(port_id, &dev_info);
436         printf("\n%s Infos for port %-2d %s\n",
437                info_border, port_id, info_border);
438         rte_eth_macaddr_get(port_id, &mac_addr);
439         print_ethaddr("MAC address: ", &mac_addr);
440         printf("\nDriver name: %s", dev_info.driver_name);
441         printf("\nConnect to socket: %u", port->socket_id);
442
443         if (port_numa[port_id] != NUMA_NO_CONFIG) {
444                 mp = mbuf_pool_find(port_numa[port_id]);
445                 if (mp)
446                         printf("\nmemory allocation on the socket: %d",
447                                                         port_numa[port_id]);
448         } else
449                 printf("\nmemory allocation on the socket: %u",port->socket_id);
450
451         printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
452         printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
453         printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
454                ("full-duplex") : ("half-duplex"));
455
456         if (!rte_eth_dev_get_mtu(port_id, &mtu))
457                 printf("MTU: %u\n", mtu);
458
459         printf("Promiscuous mode: %s\n",
460                rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
461         printf("Allmulticast mode: %s\n",
462                rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
463         printf("Maximum number of MAC addresses: %u\n",
464                (unsigned int)(port->dev_info.max_mac_addrs));
465         printf("Maximum number of MAC addresses of hash filtering: %u\n",
466                (unsigned int)(port->dev_info.max_hash_mac_addrs));
467
468         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
469         if (vlan_offload >= 0){
470                 printf("VLAN offload: \n");
471                 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
472                         printf("  strip on \n");
473                 else
474                         printf("  strip off \n");
475
476                 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
477                         printf("  filter on \n");
478                 else
479                         printf("  filter off \n");
480
481                 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
482                         printf("  qinq(extend) on \n");
483                 else
484                         printf("  qinq(extend) off \n");
485         }
486
487         if (dev_info.hash_key_size > 0)
488                 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
489         if (dev_info.reta_size > 0)
490                 printf("Redirection table size: %u\n", dev_info.reta_size);
491         if (!dev_info.flow_type_rss_offloads)
492                 printf("No flow type is supported.\n");
493         else {
494                 uint16_t i;
495                 char *p;
496
497                 printf("Supported flow types:\n");
498                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
499                                                                 i++) {
500                         if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
501                                 continue;
502                         p = flowtype_to_str(i);
503                         printf("  %s\n", (p ? p : "unknown"));
504                 }
505         }
506
507         printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
508         printf("Max possible number of RXDs per queue: %hu\n",
509                 dev_info.rx_desc_lim.nb_max);
510         printf("Min possible number of RXDs per queue: %hu\n",
511                 dev_info.rx_desc_lim.nb_min);
512         printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
513
514         printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
515         printf("Max possible number of TXDs per queue: %hu\n",
516                 dev_info.tx_desc_lim.nb_max);
517         printf("Min possible number of TXDs per queue: %hu\n",
518                 dev_info.tx_desc_lim.nb_min);
519         printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
520 }
521
522 void
523 port_offload_cap_display(portid_t port_id)
524 {
525         struct rte_eth_dev *dev;
526         struct rte_eth_dev_info dev_info;
527         static const char *info_border = "************";
528
529         if (port_id_is_invalid(port_id, ENABLED_WARN))
530                 return;
531
532         dev = &rte_eth_devices[port_id];
533         rte_eth_dev_info_get(port_id, &dev_info);
534
535         printf("\n%s Port %d supported offload features: %s\n",
536                 info_border, port_id, info_border);
537
538         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
539                 printf("VLAN stripped:                 ");
540                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
541                         printf("on\n");
542                 else
543                         printf("off\n");
544         }
545
546         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
547                 printf("Double VLANs stripped:         ");
548                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
549                         printf("on\n");
550                 else
551                         printf("off\n");
552         }
553
554         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
555                 printf("RX IPv4 checksum:              ");
556                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
557                         printf("on\n");
558                 else
559                         printf("off\n");
560         }
561
562         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
563                 printf("RX UDP checksum:               ");
564                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
565                         printf("on\n");
566                 else
567                         printf("off\n");
568         }
569
570         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
571                 printf("RX TCP checksum:               ");
572                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
573                         printf("on\n");
574                 else
575                         printf("off\n");
576         }
577
578         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
579                 printf("RX Outer IPv4 checksum:        on");
580
581         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
582                 printf("Large receive offload:         ");
583                 if (dev->data->dev_conf.rxmode.enable_lro)
584                         printf("on\n");
585                 else
586                         printf("off\n");
587         }
588
589         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
590                 printf("VLAN insert:                   ");
591                 if (ports[port_id].tx_ol_flags &
592                     TESTPMD_TX_OFFLOAD_INSERT_VLAN)
593                         printf("on\n");
594                 else
595                         printf("off\n");
596         }
597
598         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
599                 printf("Double VLANs insert:           ");
600                 if (ports[port_id].tx_ol_flags &
601                     TESTPMD_TX_OFFLOAD_INSERT_QINQ)
602                         printf("on\n");
603                 else
604                         printf("off\n");
605         }
606
607         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
608                 printf("TX IPv4 checksum:              ");
609                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
610                         printf("on\n");
611                 else
612                         printf("off\n");
613         }
614
615         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
616                 printf("TX UDP checksum:               ");
617                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
618                         printf("on\n");
619                 else
620                         printf("off\n");
621         }
622
623         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
624                 printf("TX TCP checksum:               ");
625                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
626                         printf("on\n");
627                 else
628                         printf("off\n");
629         }
630
631         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
632                 printf("TX SCTP checksum:              ");
633                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM)
634                         printf("on\n");
635                 else
636                         printf("off\n");
637         }
638
639         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
640                 printf("TX Outer IPv4 checksum:        ");
641                 if (ports[port_id].tx_ol_flags &
642                     TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
643                         printf("on\n");
644                 else
645                         printf("off\n");
646         }
647
648         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
649                 printf("TX TCP segmentation:           ");
650                 if (ports[port_id].tso_segsz != 0)
651                         printf("on\n");
652                 else
653                         printf("off\n");
654         }
655
656         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
657                 printf("TX UDP segmentation:           ");
658                 if (ports[port_id].tso_segsz != 0)
659                         printf("on\n");
660                 else
661                         printf("off\n");
662         }
663
664         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
665                 printf("TSO for VXLAN tunnel packet:   ");
666                 if (ports[port_id].tunnel_tso_segsz)
667                         printf("on\n");
668                 else
669                         printf("off\n");
670         }
671
672         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
673                 printf("TSO for GRE tunnel packet:     ");
674                 if (ports[port_id].tunnel_tso_segsz)
675                         printf("on\n");
676                 else
677                         printf("off\n");
678         }
679
680         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
681                 printf("TSO for IPIP tunnel packet:    ");
682                 if (ports[port_id].tunnel_tso_segsz)
683                         printf("on\n");
684                 else
685                         printf("off\n");
686         }
687
688         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
689                 printf("TSO for GENEVE tunnel packet:  ");
690                 if (ports[port_id].tunnel_tso_segsz)
691                         printf("on\n");
692                 else
693                         printf("off\n");
694         }
695
696 }
697
698 int
699 port_id_is_invalid(portid_t port_id, enum print_warning warning)
700 {
701         if (port_id == (portid_t)RTE_PORT_ALL)
702                 return 0;
703
704         if (rte_eth_dev_is_valid_port(port_id))
705                 return 0;
706
707         if (warning == ENABLED_WARN)
708                 printf("Invalid port %d\n", port_id);
709
710         return 1;
711 }
712
713 static int
714 vlan_id_is_invalid(uint16_t vlan_id)
715 {
716         if (vlan_id < 4096)
717                 return 0;
718         printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
719         return 1;
720 }
721
722 static int
723 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
724 {
725         uint64_t pci_len;
726
727         if (reg_off & 0x3) {
728                 printf("Port register offset 0x%X not aligned on a 4-byte "
729                        "boundary\n",
730                        (unsigned)reg_off);
731                 return 1;
732         }
733         pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
734         if (reg_off >= pci_len) {
735                 printf("Port %d: register offset %u (0x%X) out of port PCI "
736                        "resource (length=%"PRIu64")\n",
737                        port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
738                 return 1;
739         }
740         return 0;
741 }
742
743 static int
744 reg_bit_pos_is_invalid(uint8_t bit_pos)
745 {
746         if (bit_pos <= 31)
747                 return 0;
748         printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
749         return 1;
750 }
751
752 #define display_port_and_reg_off(port_id, reg_off) \
753         printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
754
755 static inline void
756 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
757 {
758         display_port_and_reg_off(port_id, (unsigned)reg_off);
759         printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
760 }
761
762 void
763 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
764 {
765         uint32_t reg_v;
766
767
768         if (port_id_is_invalid(port_id, ENABLED_WARN))
769                 return;
770         if (port_reg_off_is_invalid(port_id, reg_off))
771                 return;
772         if (reg_bit_pos_is_invalid(bit_x))
773                 return;
774         reg_v = port_id_pci_reg_read(port_id, reg_off);
775         display_port_and_reg_off(port_id, (unsigned)reg_off);
776         printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
777 }
778
779 void
780 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
781                            uint8_t bit1_pos, uint8_t bit2_pos)
782 {
783         uint32_t reg_v;
784         uint8_t  l_bit;
785         uint8_t  h_bit;
786
787         if (port_id_is_invalid(port_id, ENABLED_WARN))
788                 return;
789         if (port_reg_off_is_invalid(port_id, reg_off))
790                 return;
791         if (reg_bit_pos_is_invalid(bit1_pos))
792                 return;
793         if (reg_bit_pos_is_invalid(bit2_pos))
794                 return;
795         if (bit1_pos > bit2_pos)
796                 l_bit = bit2_pos, h_bit = bit1_pos;
797         else
798                 l_bit = bit1_pos, h_bit = bit2_pos;
799
800         reg_v = port_id_pci_reg_read(port_id, reg_off);
801         reg_v >>= l_bit;
802         if (h_bit < 31)
803                 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
804         display_port_and_reg_off(port_id, (unsigned)reg_off);
805         printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
806                ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
807 }
808
809 void
810 port_reg_display(portid_t port_id, uint32_t reg_off)
811 {
812         uint32_t reg_v;
813
814         if (port_id_is_invalid(port_id, ENABLED_WARN))
815                 return;
816         if (port_reg_off_is_invalid(port_id, reg_off))
817                 return;
818         reg_v = port_id_pci_reg_read(port_id, reg_off);
819         display_port_reg_value(port_id, reg_off, reg_v);
820 }
821
822 void
823 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
824                  uint8_t bit_v)
825 {
826         uint32_t reg_v;
827
828         if (port_id_is_invalid(port_id, ENABLED_WARN))
829                 return;
830         if (port_reg_off_is_invalid(port_id, reg_off))
831                 return;
832         if (reg_bit_pos_is_invalid(bit_pos))
833                 return;
834         if (bit_v > 1) {
835                 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
836                 return;
837         }
838         reg_v = port_id_pci_reg_read(port_id, reg_off);
839         if (bit_v == 0)
840                 reg_v &= ~(1 << bit_pos);
841         else
842                 reg_v |= (1 << bit_pos);
843         port_id_pci_reg_write(port_id, reg_off, reg_v);
844         display_port_reg_value(port_id, reg_off, reg_v);
845 }
846
847 void
848 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
849                        uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
850 {
851         uint32_t max_v;
852         uint32_t reg_v;
853         uint8_t  l_bit;
854         uint8_t  h_bit;
855
856         if (port_id_is_invalid(port_id, ENABLED_WARN))
857                 return;
858         if (port_reg_off_is_invalid(port_id, reg_off))
859                 return;
860         if (reg_bit_pos_is_invalid(bit1_pos))
861                 return;
862         if (reg_bit_pos_is_invalid(bit2_pos))
863                 return;
864         if (bit1_pos > bit2_pos)
865                 l_bit = bit2_pos, h_bit = bit1_pos;
866         else
867                 l_bit = bit1_pos, h_bit = bit2_pos;
868
869         if ((h_bit - l_bit) < 31)
870                 max_v = (1 << (h_bit - l_bit + 1)) - 1;
871         else
872                 max_v = 0xFFFFFFFF;
873
874         if (value > max_v) {
875                 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
876                                 (unsigned)value, (unsigned)value,
877                                 (unsigned)max_v, (unsigned)max_v);
878                 return;
879         }
880         reg_v = port_id_pci_reg_read(port_id, reg_off);
881         reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
882         reg_v |= (value << l_bit); /* Set changed bits */
883         port_id_pci_reg_write(port_id, reg_off, reg_v);
884         display_port_reg_value(port_id, reg_off, reg_v);
885 }
886
887 void
888 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
889 {
890         if (port_id_is_invalid(port_id, ENABLED_WARN))
891                 return;
892         if (port_reg_off_is_invalid(port_id, reg_off))
893                 return;
894         port_id_pci_reg_write(port_id, reg_off, reg_v);
895         display_port_reg_value(port_id, reg_off, reg_v);
896 }
897
898 void
899 port_mtu_set(portid_t port_id, uint16_t mtu)
900 {
901         int diag;
902
903         if (port_id_is_invalid(port_id, ENABLED_WARN))
904                 return;
905         diag = rte_eth_dev_set_mtu(port_id, mtu);
906         if (diag == 0)
907                 return;
908         printf("Set MTU failed. diag=%d\n", diag);
909 }
910
911 /* Generic flow management functions. */
912
913 /** Generate flow_item[] entry. */
914 #define MK_FLOW_ITEM(t, s) \
915         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
916                 .name = # t, \
917                 .size = s, \
918         }
919
920 /** Information about known flow pattern items. */
921 static const struct {
922         const char *name;
923         size_t size;
924 } flow_item[] = {
925         MK_FLOW_ITEM(END, 0),
926         MK_FLOW_ITEM(VOID, 0),
927         MK_FLOW_ITEM(INVERT, 0),
928         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
929         MK_FLOW_ITEM(PF, 0),
930         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
931         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
932         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
933         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
934         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
935         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
936         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
937         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
938         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
939         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
940         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
941         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
942         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
943         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
944         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
945         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
946 };
947
948 /** Compute storage space needed by item specification. */
949 static void
950 flow_item_spec_size(const struct rte_flow_item *item,
951                     size_t *size, size_t *pad)
952 {
953         if (!item->spec)
954                 goto empty;
955         switch (item->type) {
956                 union {
957                         const struct rte_flow_item_raw *raw;
958                 } spec;
959
960         case RTE_FLOW_ITEM_TYPE_RAW:
961                 spec.raw = item->spec;
962                 *size = offsetof(struct rte_flow_item_raw, pattern) +
963                         spec.raw->length * sizeof(*spec.raw->pattern);
964                 break;
965         default:
966 empty:
967                 *size = 0;
968                 break;
969         }
970         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
971 }
972
973 /** Generate flow_action[] entry. */
974 #define MK_FLOW_ACTION(t, s) \
975         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
976                 .name = # t, \
977                 .size = s, \
978         }
979
980 /** Information about known flow actions. */
981 static const struct {
982         const char *name;
983         size_t size;
984 } flow_action[] = {
985         MK_FLOW_ACTION(END, 0),
986         MK_FLOW_ACTION(VOID, 0),
987         MK_FLOW_ACTION(PASSTHRU, 0),
988         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
989         MK_FLOW_ACTION(FLAG, 0),
990         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
991         MK_FLOW_ACTION(DROP, 0),
992         MK_FLOW_ACTION(COUNT, 0),
993         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
994         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
995         MK_FLOW_ACTION(PF, 0),
996         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
997 };
998
999 /** Compute storage space needed by action configuration. */
1000 static void
1001 flow_action_conf_size(const struct rte_flow_action *action,
1002                       size_t *size, size_t *pad)
1003 {
1004         if (!action->conf)
1005                 goto empty;
1006         switch (action->type) {
1007                 union {
1008                         const struct rte_flow_action_rss *rss;
1009                 } conf;
1010
1011         case RTE_FLOW_ACTION_TYPE_RSS:
1012                 conf.rss = action->conf;
1013                 *size = offsetof(struct rte_flow_action_rss, queue) +
1014                         conf.rss->num * sizeof(*conf.rss->queue);
1015                 break;
1016         default:
1017 empty:
1018                 *size = 0;
1019                 break;
1020         }
1021         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1022 }
1023
1024 /** Generate a port_flow entry from attributes/pattern/actions. */
1025 static struct port_flow *
1026 port_flow_new(const struct rte_flow_attr *attr,
1027               const struct rte_flow_item *pattern,
1028               const struct rte_flow_action *actions)
1029 {
1030         const struct rte_flow_item *item;
1031         const struct rte_flow_action *action;
1032         struct port_flow *pf = NULL;
1033         size_t tmp;
1034         size_t pad;
1035         size_t off1 = 0;
1036         size_t off2 = 0;
1037         int err = ENOTSUP;
1038
1039 store:
1040         item = pattern;
1041         if (pf)
1042                 pf->pattern = (void *)&pf->data[off1];
1043         do {
1044                 struct rte_flow_item *dst = NULL;
1045
1046                 if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
1047                     !flow_item[item->type].name)
1048                         goto notsup;
1049                 if (pf)
1050                         dst = memcpy(pf->data + off1, item, sizeof(*item));
1051                 off1 += sizeof(*item);
1052                 flow_item_spec_size(item, &tmp, &pad);
1053                 if (item->spec) {
1054                         if (pf)
1055                                 dst->spec = memcpy(pf->data + off2,
1056                                                    item->spec, tmp);
1057                         off2 += tmp + pad;
1058                 }
1059                 if (item->last) {
1060                         if (pf)
1061                                 dst->last = memcpy(pf->data + off2,
1062                                                    item->last, tmp);
1063                         off2 += tmp + pad;
1064                 }
1065                 if (item->mask) {
1066                         if (pf)
1067                                 dst->mask = memcpy(pf->data + off2,
1068                                                    item->mask, tmp);
1069                         off2 += tmp + pad;
1070                 }
1071                 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1072         } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
1073         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1074         action = actions;
1075         if (pf)
1076                 pf->actions = (void *)&pf->data[off1];
1077         do {
1078                 struct rte_flow_action *dst = NULL;
1079
1080                 if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
1081                     !flow_action[action->type].name)
1082                         goto notsup;
1083                 if (pf)
1084                         dst = memcpy(pf->data + off1, action, sizeof(*action));
1085                 off1 += sizeof(*action);
1086                 flow_action_conf_size(action, &tmp, &pad);
1087                 if (action->conf) {
1088                         if (pf)
1089                                 dst->conf = memcpy(pf->data + off2,
1090                                                    action->conf, tmp);
1091                         off2 += tmp + pad;
1092                 }
1093                 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1094         } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
1095         if (pf != NULL)
1096                 return pf;
1097         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1098         tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
1099         pf = calloc(1, tmp + off1 + off2);
1100         if (pf == NULL)
1101                 err = errno;
1102         else {
1103                 *pf = (const struct port_flow){
1104                         .size = tmp + off1 + off2,
1105                         .attr = *attr,
1106                 };
1107                 tmp -= offsetof(struct port_flow, data);
1108                 off2 = tmp + off1;
1109                 off1 = tmp;
1110                 goto store;
1111         }
1112 notsup:
1113         rte_errno = err;
1114         return NULL;
1115 }
1116
1117 /** Print a message out of a flow error. */
1118 static int
1119 port_flow_complain(struct rte_flow_error *error)
1120 {
1121         static const char *const errstrlist[] = {
1122                 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1123                 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1124                 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1125                 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1126                 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1127                 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1128                 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1129                 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1130                 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1131                 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1132                 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1133                 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1134         };
1135         const char *errstr;
1136         char buf[32];
1137         int err = rte_errno;
1138
1139         if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1140             !errstrlist[error->type])
1141                 errstr = "unknown type";
1142         else
1143                 errstr = errstrlist[error->type];
1144         printf("Caught error type %d (%s): %s%s\n",
1145                error->type, errstr,
1146                error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1147                                         error->cause), buf) : "",
1148                error->message ? error->message : "(no stated reason)");
1149         return -err;
1150 }
1151
1152 /** Validate flow rule. */
1153 int
1154 port_flow_validate(portid_t port_id,
1155                    const struct rte_flow_attr *attr,
1156                    const struct rte_flow_item *pattern,
1157                    const struct rte_flow_action *actions)
1158 {
1159         struct rte_flow_error error;
1160
1161         /* Poisoning to make sure PMDs update it in case of error. */
1162         memset(&error, 0x11, sizeof(error));
1163         if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1164                 return port_flow_complain(&error);
1165         printf("Flow rule validated\n");
1166         return 0;
1167 }
1168
1169 /** Create flow rule. */
1170 int
1171 port_flow_create(portid_t port_id,
1172                  const struct rte_flow_attr *attr,
1173                  const struct rte_flow_item *pattern,
1174                  const struct rte_flow_action *actions)
1175 {
1176         struct rte_flow *flow;
1177         struct rte_port *port;
1178         struct port_flow *pf;
1179         uint32_t id;
1180         struct rte_flow_error error;
1181
1182         /* Poisoning to make sure PMDs update it in case of error. */
1183         memset(&error, 0x22, sizeof(error));
1184         flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1185         if (!flow)
1186                 return port_flow_complain(&error);
1187         port = &ports[port_id];
1188         if (port->flow_list) {
1189                 if (port->flow_list->id == UINT32_MAX) {
1190                         printf("Highest rule ID is already assigned, delete"
1191                                " it first");
1192                         rte_flow_destroy(port_id, flow, NULL);
1193                         return -ENOMEM;
1194                 }
1195                 id = port->flow_list->id + 1;
1196         } else
1197                 id = 0;
1198         pf = port_flow_new(attr, pattern, actions);
1199         if (!pf) {
1200                 int err = rte_errno;
1201
1202                 printf("Cannot allocate flow: %s\n", rte_strerror(err));
1203                 rte_flow_destroy(port_id, flow, NULL);
1204                 return -err;
1205         }
1206         pf->next = port->flow_list;
1207         pf->id = id;
1208         pf->flow = flow;
1209         port->flow_list = pf;
1210         printf("Flow rule #%u created\n", pf->id);
1211         return 0;
1212 }
1213
1214 /** Destroy a number of flow rules. */
1215 int
1216 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1217 {
1218         struct rte_port *port;
1219         struct port_flow **tmp;
1220         uint32_t c = 0;
1221         int ret = 0;
1222
1223         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1224             port_id == (portid_t)RTE_PORT_ALL)
1225                 return -EINVAL;
1226         port = &ports[port_id];
1227         tmp = &port->flow_list;
1228         while (*tmp) {
1229                 uint32_t i;
1230
1231                 for (i = 0; i != n; ++i) {
1232                         struct rte_flow_error error;
1233                         struct port_flow *pf = *tmp;
1234
1235                         if (rule[i] != pf->id)
1236                                 continue;
1237                         /*
1238                          * Poisoning to make sure PMDs update it in case
1239                          * of error.
1240                          */
1241                         memset(&error, 0x33, sizeof(error));
1242                         if (rte_flow_destroy(port_id, pf->flow, &error)) {
1243                                 ret = port_flow_complain(&error);
1244                                 continue;
1245                         }
1246                         printf("Flow rule #%u destroyed\n", pf->id);
1247                         *tmp = pf->next;
1248                         free(pf);
1249                         break;
1250                 }
1251                 if (i == n)
1252                         tmp = &(*tmp)->next;
1253                 ++c;
1254         }
1255         return ret;
1256 }
1257
1258 /** Remove all flow rules. */
1259 int
1260 port_flow_flush(portid_t port_id)
1261 {
1262         struct rte_flow_error error;
1263         struct rte_port *port;
1264         int ret = 0;
1265
1266         /* Poisoning to make sure PMDs update it in case of error. */
1267         memset(&error, 0x44, sizeof(error));
1268         if (rte_flow_flush(port_id, &error)) {
1269                 ret = port_flow_complain(&error);
1270                 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1271                     port_id == (portid_t)RTE_PORT_ALL)
1272                         return ret;
1273         }
1274         port = &ports[port_id];
1275         while (port->flow_list) {
1276                 struct port_flow *pf = port->flow_list->next;
1277
1278                 free(port->flow_list);
1279                 port->flow_list = pf;
1280         }
1281         return ret;
1282 }
1283
1284 /** Query a flow rule. */
1285 int
1286 port_flow_query(portid_t port_id, uint32_t rule,
1287                 enum rte_flow_action_type action)
1288 {
1289         struct rte_flow_error error;
1290         struct rte_port *port;
1291         struct port_flow *pf;
1292         const char *name;
1293         union {
1294                 struct rte_flow_query_count count;
1295         } query;
1296
1297         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1298             port_id == (portid_t)RTE_PORT_ALL)
1299                 return -EINVAL;
1300         port = &ports[port_id];
1301         for (pf = port->flow_list; pf; pf = pf->next)
1302                 if (pf->id == rule)
1303                         break;
1304         if (!pf) {
1305                 printf("Flow rule #%u not found\n", rule);
1306                 return -ENOENT;
1307         }
1308         if ((unsigned int)action >= RTE_DIM(flow_action) ||
1309             !flow_action[action].name)
1310                 name = "unknown";
1311         else
1312                 name = flow_action[action].name;
1313         switch (action) {
1314         case RTE_FLOW_ACTION_TYPE_COUNT:
1315                 break;
1316         default:
1317                 printf("Cannot query action type %d (%s)\n", action, name);
1318                 return -ENOTSUP;
1319         }
1320         /* Poisoning to make sure PMDs update it in case of error. */
1321         memset(&error, 0x55, sizeof(error));
1322         memset(&query, 0, sizeof(query));
1323         if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1324                 return port_flow_complain(&error);
1325         switch (action) {
1326         case RTE_FLOW_ACTION_TYPE_COUNT:
1327                 printf("%s:\n"
1328                        " hits_set: %u\n"
1329                        " bytes_set: %u\n"
1330                        " hits: %" PRIu64 "\n"
1331                        " bytes: %" PRIu64 "\n",
1332                        name,
1333                        query.count.hits_set,
1334                        query.count.bytes_set,
1335                        query.count.hits,
1336                        query.count.bytes);
1337                 break;
1338         default:
1339                 printf("Cannot display result for action type %d (%s)\n",
1340                        action, name);
1341                 break;
1342         }
1343         return 0;
1344 }
1345
1346 /** List flow rules. */
1347 void
1348 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1349 {
1350         struct rte_port *port;
1351         struct port_flow *pf;
1352         struct port_flow *list = NULL;
1353         uint32_t i;
1354
1355         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1356             port_id == (portid_t)RTE_PORT_ALL)
1357                 return;
1358         port = &ports[port_id];
1359         if (!port->flow_list)
1360                 return;
1361         /* Sort flows by group, priority and ID. */
1362         for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1363                 struct port_flow **tmp;
1364
1365                 if (n) {
1366                         /* Filter out unwanted groups. */
1367                         for (i = 0; i != n; ++i)
1368                                 if (pf->attr.group == group[i])
1369                                         break;
1370                         if (i == n)
1371                                 continue;
1372                 }
1373                 tmp = &list;
1374                 while (*tmp &&
1375                        (pf->attr.group > (*tmp)->attr.group ||
1376                         (pf->attr.group == (*tmp)->attr.group &&
1377                          pf->attr.priority > (*tmp)->attr.priority) ||
1378                         (pf->attr.group == (*tmp)->attr.group &&
1379                          pf->attr.priority == (*tmp)->attr.priority &&
1380                          pf->id > (*tmp)->id)))
1381                         tmp = &(*tmp)->tmp;
1382                 pf->tmp = *tmp;
1383                 *tmp = pf;
1384         }
1385         printf("ID\tGroup\tPrio\tAttr\tRule\n");
1386         for (pf = list; pf != NULL; pf = pf->tmp) {
1387                 const struct rte_flow_item *item = pf->pattern;
1388                 const struct rte_flow_action *action = pf->actions;
1389
1390                 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
1391                        pf->id,
1392                        pf->attr.group,
1393                        pf->attr.priority,
1394                        pf->attr.ingress ? 'i' : '-',
1395                        pf->attr.egress ? 'e' : '-');
1396                 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1397                         if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1398                                 printf("%s ", flow_item[item->type].name);
1399                         ++item;
1400                 }
1401                 printf("=>");
1402                 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1403                         if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1404                                 printf(" %s", flow_action[action->type].name);
1405                         ++action;
1406                 }
1407                 printf("\n");
1408         }
1409 }
1410
1411 /*
1412  * RX/TX ring descriptors display functions.
1413  */
1414 int
1415 rx_queue_id_is_invalid(queueid_t rxq_id)
1416 {
1417         if (rxq_id < nb_rxq)
1418                 return 0;
1419         printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1420         return 1;
1421 }
1422
1423 int
1424 tx_queue_id_is_invalid(queueid_t txq_id)
1425 {
1426         if (txq_id < nb_txq)
1427                 return 0;
1428         printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1429         return 1;
1430 }
1431
1432 static int
1433 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1434 {
1435         if (rxdesc_id < nb_rxd)
1436                 return 0;
1437         printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1438                rxdesc_id, nb_rxd);
1439         return 1;
1440 }
1441
1442 static int
1443 tx_desc_id_is_invalid(uint16_t txdesc_id)
1444 {
1445         if (txdesc_id < nb_txd)
1446                 return 0;
1447         printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1448                txdesc_id, nb_txd);
1449         return 1;
1450 }
1451
1452 static const struct rte_memzone *
1453 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
1454 {
1455         char mz_name[RTE_MEMZONE_NAMESIZE];
1456         const struct rte_memzone *mz;
1457
1458         snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
1459                  ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
1460         mz = rte_memzone_lookup(mz_name);
1461         if (mz == NULL)
1462                 printf("%s ring memory zoneof (port %d, queue %d) not"
1463                        "found (zone name = %s\n",
1464                        ring_name, port_id, q_id, mz_name);
1465         return mz;
1466 }
1467
1468 union igb_ring_dword {
1469         uint64_t dword;
1470         struct {
1471 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1472                 uint32_t lo;
1473                 uint32_t hi;
1474 #else
1475                 uint32_t hi;
1476                 uint32_t lo;
1477 #endif
1478         } words;
1479 };
1480
1481 struct igb_ring_desc_32_bytes {
1482         union igb_ring_dword lo_dword;
1483         union igb_ring_dword hi_dword;
1484         union igb_ring_dword resv1;
1485         union igb_ring_dword resv2;
1486 };
1487
1488 struct igb_ring_desc_16_bytes {
1489         union igb_ring_dword lo_dword;
1490         union igb_ring_dword hi_dword;
1491 };
1492
1493 static void
1494 ring_rxd_display_dword(union igb_ring_dword dword)
1495 {
1496         printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1497                                         (unsigned)dword.words.hi);
1498 }
1499
1500 static void
1501 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1502 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1503                            uint8_t port_id,
1504 #else
1505                            __rte_unused uint8_t port_id,
1506 #endif
1507                            uint16_t desc_id)
1508 {
1509         struct igb_ring_desc_16_bytes *ring =
1510                 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1511 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1512         struct rte_eth_dev_info dev_info;
1513
1514         memset(&dev_info, 0, sizeof(dev_info));
1515         rte_eth_dev_info_get(port_id, &dev_info);
1516         if (strstr(dev_info.driver_name, "i40e") != NULL) {
1517                 /* 32 bytes RX descriptor, i40e only */
1518                 struct igb_ring_desc_32_bytes *ring =
1519                         (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1520                 ring[desc_id].lo_dword.dword =
1521                         rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1522                 ring_rxd_display_dword(ring[desc_id].lo_dword);
1523                 ring[desc_id].hi_dword.dword =
1524                         rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1525                 ring_rxd_display_dword(ring[desc_id].hi_dword);
1526                 ring[desc_id].resv1.dword =
1527                         rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1528                 ring_rxd_display_dword(ring[desc_id].resv1);
1529                 ring[desc_id].resv2.dword =
1530                         rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1531                 ring_rxd_display_dword(ring[desc_id].resv2);
1532
1533                 return;
1534         }
1535 #endif
1536         /* 16 bytes RX descriptor */
1537         ring[desc_id].lo_dword.dword =
1538                 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1539         ring_rxd_display_dword(ring[desc_id].lo_dword);
1540         ring[desc_id].hi_dword.dword =
1541                 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1542         ring_rxd_display_dword(ring[desc_id].hi_dword);
1543 }
1544
1545 static void
1546 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1547 {
1548         struct igb_ring_desc_16_bytes *ring;
1549         struct igb_ring_desc_16_bytes txd;
1550
1551         ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1552         txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1553         txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1554         printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1555                         (unsigned)txd.lo_dword.words.lo,
1556                         (unsigned)txd.lo_dword.words.hi,
1557                         (unsigned)txd.hi_dword.words.lo,
1558                         (unsigned)txd.hi_dword.words.hi);
1559 }
1560
1561 void
1562 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1563 {
1564         const struct rte_memzone *rx_mz;
1565
1566         if (port_id_is_invalid(port_id, ENABLED_WARN))
1567                 return;
1568         if (rx_queue_id_is_invalid(rxq_id))
1569                 return;
1570         if (rx_desc_id_is_invalid(rxd_id))
1571                 return;
1572         rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1573         if (rx_mz == NULL)
1574                 return;
1575         ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1576 }
1577
1578 void
1579 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1580 {
1581         const struct rte_memzone *tx_mz;
1582
1583         if (port_id_is_invalid(port_id, ENABLED_WARN))
1584                 return;
1585         if (tx_queue_id_is_invalid(txq_id))
1586                 return;
1587         if (tx_desc_id_is_invalid(txd_id))
1588                 return;
1589         tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1590         if (tx_mz == NULL)
1591                 return;
1592         ring_tx_descriptor_display(tx_mz, txd_id);
1593 }
1594
1595 void
1596 fwd_lcores_config_display(void)
1597 {
1598         lcoreid_t lc_id;
1599
1600         printf("List of forwarding lcores:");
1601         for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1602                 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1603         printf("\n");
1604 }
1605 void
1606 rxtx_config_display(void)
1607 {
1608         printf("  %s packet forwarding%s - CRC stripping %s - "
1609                "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
1610                retry_enabled == 0 ? "" : " with retry",
1611                rx_mode.hw_strip_crc ? "enabled" : "disabled",
1612                nb_pkt_per_burst);
1613
1614         if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1615                 printf("  packet len=%u - nb packet segments=%d\n",
1616                                 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1617
1618         struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
1619         struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
1620
1621         printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1622                nb_fwd_lcores, nb_fwd_ports);
1623         printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
1624                nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
1625         printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
1626                rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
1627                rx_conf->rx_thresh.wthresh);
1628         printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
1629                nb_txq, nb_txd, tx_conf->tx_free_thresh);
1630         printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
1631                tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
1632                tx_conf->tx_thresh.wthresh);
1633         printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
1634                tx_conf->tx_rs_thresh, tx_conf->txq_flags);
1635 }
1636
1637 void
1638 port_rss_reta_info(portid_t port_id,
1639                    struct rte_eth_rss_reta_entry64 *reta_conf,
1640                    uint16_t nb_entries)
1641 {
1642         uint16_t i, idx, shift;
1643         int ret;
1644
1645         if (port_id_is_invalid(port_id, ENABLED_WARN))
1646                 return;
1647
1648         ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1649         if (ret != 0) {
1650                 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1651                 return;
1652         }
1653
1654         for (i = 0; i < nb_entries; i++) {
1655                 idx = i / RTE_RETA_GROUP_SIZE;
1656                 shift = i % RTE_RETA_GROUP_SIZE;
1657                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1658                         continue;
1659                 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1660                                         i, reta_conf[idx].reta[shift]);
1661         }
1662 }
1663
1664 /*
1665  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1666  * key of the port.
1667  */
1668 void
1669 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1670 {
1671         struct rte_eth_rss_conf rss_conf;
1672         uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1673         uint64_t rss_hf;
1674         uint8_t i;
1675         int diag;
1676         struct rte_eth_dev_info dev_info;
1677         uint8_t hash_key_size;
1678
1679         if (port_id_is_invalid(port_id, ENABLED_WARN))
1680                 return;
1681
1682         memset(&dev_info, 0, sizeof(dev_info));
1683         rte_eth_dev_info_get(port_id, &dev_info);
1684         if (dev_info.hash_key_size > 0 &&
1685                         dev_info.hash_key_size <= sizeof(rss_key))
1686                 hash_key_size = dev_info.hash_key_size;
1687         else {
1688                 printf("dev_info did not provide a valid hash key size\n");
1689                 return;
1690         }
1691
1692         rss_conf.rss_hf = 0;
1693         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1694                 if (!strcmp(rss_info, rss_type_table[i].str))
1695                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1696         }
1697
1698         /* Get RSS hash key if asked to display it */
1699         rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1700         rss_conf.rss_key_len = hash_key_size;
1701         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1702         if (diag != 0) {
1703                 switch (diag) {
1704                 case -ENODEV:
1705                         printf("port index %d invalid\n", port_id);
1706                         break;
1707                 case -ENOTSUP:
1708                         printf("operation not supported by device\n");
1709                         break;
1710                 default:
1711                         printf("operation failed - diag=%d\n", diag);
1712                         break;
1713                 }
1714                 return;
1715         }
1716         rss_hf = rss_conf.rss_hf;
1717         if (rss_hf == 0) {
1718                 printf("RSS disabled\n");
1719                 return;
1720         }
1721         printf("RSS functions:\n ");
1722         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1723                 if (rss_hf & rss_type_table[i].rss_type)
1724                         printf("%s ", rss_type_table[i].str);
1725         }
1726         printf("\n");
1727         if (!show_rss_key)
1728                 return;
1729         printf("RSS key:\n");
1730         for (i = 0; i < hash_key_size; i++)
1731                 printf("%02X", rss_key[i]);
1732         printf("\n");
1733 }
1734
1735 void
1736 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1737                          uint hash_key_len)
1738 {
1739         struct rte_eth_rss_conf rss_conf;
1740         int diag;
1741         unsigned int i;
1742
1743         rss_conf.rss_key = NULL;
1744         rss_conf.rss_key_len = hash_key_len;
1745         rss_conf.rss_hf = 0;
1746         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1747                 if (!strcmp(rss_type_table[i].str, rss_type))
1748                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1749         }
1750         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1751         if (diag == 0) {
1752                 rss_conf.rss_key = hash_key;
1753                 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1754         }
1755         if (diag == 0)
1756                 return;
1757
1758         switch (diag) {
1759         case -ENODEV:
1760                 printf("port index %d invalid\n", port_id);
1761                 break;
1762         case -ENOTSUP:
1763                 printf("operation not supported by device\n");
1764                 break;
1765         default:
1766                 printf("operation failed - diag=%d\n", diag);
1767                 break;
1768         }
1769 }
1770
1771 /*
1772  * Setup forwarding configuration for each logical core.
1773  */
1774 static void
1775 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1776 {
1777         streamid_t nb_fs_per_lcore;
1778         streamid_t nb_fs;
1779         streamid_t sm_id;
1780         lcoreid_t  nb_extra;
1781         lcoreid_t  nb_fc;
1782         lcoreid_t  nb_lc;
1783         lcoreid_t  lc_id;
1784
1785         nb_fs = cfg->nb_fwd_streams;
1786         nb_fc = cfg->nb_fwd_lcores;
1787         if (nb_fs <= nb_fc) {
1788                 nb_fs_per_lcore = 1;
1789                 nb_extra = 0;
1790         } else {
1791                 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1792                 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1793         }
1794
1795         nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1796         sm_id = 0;
1797         for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1798                 fwd_lcores[lc_id]->stream_idx = sm_id;
1799                 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1800                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1801         }
1802
1803         /*
1804          * Assign extra remaining streams, if any.
1805          */
1806         nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1807         for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1808                 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1809                 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1810                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1811         }
1812 }
1813
1814 static void
1815 simple_fwd_config_setup(void)
1816 {
1817         portid_t i;
1818         portid_t j;
1819         portid_t inc = 2;
1820
1821         if (port_topology == PORT_TOPOLOGY_CHAINED ||
1822             port_topology == PORT_TOPOLOGY_LOOP) {
1823                 inc = 1;
1824         } else if (nb_fwd_ports % 2) {
1825                 printf("\nWarning! Cannot handle an odd number of ports "
1826                        "with the current port topology. Configuration "
1827                        "must be changed to have an even number of ports, "
1828                        "or relaunch application with "
1829                        "--port-topology=chained\n\n");
1830         }
1831
1832         cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1833         cur_fwd_config.nb_fwd_streams =
1834                 (streamid_t) cur_fwd_config.nb_fwd_ports;
1835
1836         /* reinitialize forwarding streams */
1837         init_fwd_streams();
1838
1839         /*
1840          * In the simple forwarding test, the number of forwarding cores
1841          * must be lower or equal to the number of forwarding ports.
1842          */
1843         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1844         if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1845                 cur_fwd_config.nb_fwd_lcores =
1846                         (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1847         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1848
1849         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1850                 if (port_topology != PORT_TOPOLOGY_LOOP)
1851                         j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1852                 else
1853                         j = i;
1854                 fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1855                 fwd_streams[i]->rx_queue  = 0;
1856                 fwd_streams[i]->tx_port   = fwd_ports_ids[j];
1857                 fwd_streams[i]->tx_queue  = 0;
1858                 fwd_streams[i]->peer_addr = j;
1859                 fwd_streams[i]->retry_enabled = retry_enabled;
1860
1861                 if (port_topology == PORT_TOPOLOGY_PAIRED) {
1862                         fwd_streams[j]->rx_port   = fwd_ports_ids[j];
1863                         fwd_streams[j]->rx_queue  = 0;
1864                         fwd_streams[j]->tx_port   = fwd_ports_ids[i];
1865                         fwd_streams[j]->tx_queue  = 0;
1866                         fwd_streams[j]->peer_addr = i;
1867                         fwd_streams[j]->retry_enabled = retry_enabled;
1868                 }
1869         }
1870 }
1871
1872 /**
1873  * For the RSS forwarding test all streams distributed over lcores. Each stream
1874  * being composed of a RX queue to poll on a RX port for input messages,
1875  * associated with a TX queue of a TX port where to send forwarded packets.
1876  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1877  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1878  * following rules:
1879  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1880  *    - TxQl = RxQj
1881  */
1882 static void
1883 rss_fwd_config_setup(void)
1884 {
1885         portid_t   rxp;
1886         portid_t   txp;
1887         queueid_t  rxq;
1888         queueid_t  nb_q;
1889         streamid_t  sm_id;
1890
1891         nb_q = nb_rxq;
1892         if (nb_q > nb_txq)
1893                 nb_q = nb_txq;
1894         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1895         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1896         cur_fwd_config.nb_fwd_streams =
1897                 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1898
1899         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1900                 cur_fwd_config.nb_fwd_lcores =
1901                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1902
1903         /* reinitialize forwarding streams */
1904         init_fwd_streams();
1905
1906         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1907         rxp = 0; rxq = 0;
1908         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1909                 struct fwd_stream *fs;
1910
1911                 fs = fwd_streams[sm_id];
1912
1913                 if ((rxp & 0x1) == 0)
1914                         txp = (portid_t) (rxp + 1);
1915                 else
1916                         txp = (portid_t) (rxp - 1);
1917                 /*
1918                  * if we are in loopback, simply send stuff out through the
1919                  * ingress port
1920                  */
1921                 if (port_topology == PORT_TOPOLOGY_LOOP)
1922                         txp = rxp;
1923
1924                 fs->rx_port = fwd_ports_ids[rxp];
1925                 fs->rx_queue = rxq;
1926                 fs->tx_port = fwd_ports_ids[txp];
1927                 fs->tx_queue = rxq;
1928                 fs->peer_addr = fs->tx_port;
1929                 fs->retry_enabled = retry_enabled;
1930                 rxq = (queueid_t) (rxq + 1);
1931                 if (rxq < nb_q)
1932                         continue;
1933                 /*
1934                  * rxq == nb_q
1935                  * Restart from RX queue 0 on next RX port
1936                  */
1937                 rxq = 0;
1938                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1939                         rxp = (portid_t)
1940                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1941                 else
1942                         rxp = (portid_t) (rxp + 1);
1943         }
1944 }
1945
1946 /**
1947  * For the DCB forwarding test, each core is assigned on each traffic class.
1948  *
1949  * Each core is assigned a multi-stream, each stream being composed of
1950  * a RX queue to poll on a RX port for input messages, associated with
1951  * a TX queue of a TX port where to send forwarded packets. All RX and
1952  * TX queues are mapping to the same traffic class.
1953  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1954  * the same core
1955  */
1956 static void
1957 dcb_fwd_config_setup(void)
1958 {
1959         struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1960         portid_t txp, rxp = 0;
1961         queueid_t txq, rxq = 0;
1962         lcoreid_t  lc_id;
1963         uint16_t nb_rx_queue, nb_tx_queue;
1964         uint16_t i, j, k, sm_id = 0;
1965         uint8_t tc = 0;
1966
1967         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1968         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1969         cur_fwd_config.nb_fwd_streams =
1970                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1971
1972         /* reinitialize forwarding streams */
1973         init_fwd_streams();
1974         sm_id = 0;
1975         txp = 1;
1976         /* get the dcb info on the first RX and TX ports */
1977         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1978         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1979
1980         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1981                 fwd_lcores[lc_id]->stream_nb = 0;
1982                 fwd_lcores[lc_id]->stream_idx = sm_id;
1983                 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
1984                         /* if the nb_queue is zero, means this tc is
1985                          * not enabled on the POOL
1986                          */
1987                         if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
1988                                 break;
1989                         k = fwd_lcores[lc_id]->stream_nb +
1990                                 fwd_lcores[lc_id]->stream_idx;
1991                         rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
1992                         txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
1993                         nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1994                         nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
1995                         for (j = 0; j < nb_rx_queue; j++) {
1996                                 struct fwd_stream *fs;
1997
1998                                 fs = fwd_streams[k + j];
1999                                 fs->rx_port = fwd_ports_ids[rxp];
2000                                 fs->rx_queue = rxq + j;
2001                                 fs->tx_port = fwd_ports_ids[txp];
2002                                 fs->tx_queue = txq + j % nb_tx_queue;
2003                                 fs->peer_addr = fs->tx_port;
2004                                 fs->retry_enabled = retry_enabled;
2005                         }
2006                         fwd_lcores[lc_id]->stream_nb +=
2007                                 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2008                 }
2009                 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2010
2011                 tc++;
2012                 if (tc < rxp_dcb_info.nb_tcs)
2013                         continue;
2014                 /* Restart from TC 0 on next RX port */
2015                 tc = 0;
2016                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2017                         rxp = (portid_t)
2018                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2019                 else
2020                         rxp++;
2021                 if (rxp >= nb_fwd_ports)
2022                         return;
2023                 /* get the dcb information on next RX and TX ports */
2024                 if ((rxp & 0x1) == 0)
2025                         txp = (portid_t) (rxp + 1);
2026                 else
2027                         txp = (portid_t) (rxp - 1);
2028                 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2029                 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2030         }
2031 }
2032
2033 static void
2034 icmp_echo_config_setup(void)
2035 {
2036         portid_t  rxp;
2037         queueid_t rxq;
2038         lcoreid_t lc_id;
2039         uint16_t  sm_id;
2040
2041         if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2042                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2043                         (nb_txq * nb_fwd_ports);
2044         else
2045                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2046         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2047         cur_fwd_config.nb_fwd_streams =
2048                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2049         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2050                 cur_fwd_config.nb_fwd_lcores =
2051                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2052         if (verbose_level > 0) {
2053                 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2054                        __FUNCTION__,
2055                        cur_fwd_config.nb_fwd_lcores,
2056                        cur_fwd_config.nb_fwd_ports,
2057                        cur_fwd_config.nb_fwd_streams);
2058         }
2059
2060         /* reinitialize forwarding streams */
2061         init_fwd_streams();
2062         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2063         rxp = 0; rxq = 0;
2064         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2065                 if (verbose_level > 0)
2066                         printf("  core=%d: \n", lc_id);
2067                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2068                         struct fwd_stream *fs;
2069                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2070                         fs->rx_port = fwd_ports_ids[rxp];
2071                         fs->rx_queue = rxq;
2072                         fs->tx_port = fs->rx_port;
2073                         fs->tx_queue = rxq;
2074                         fs->peer_addr = fs->tx_port;
2075                         fs->retry_enabled = retry_enabled;
2076                         if (verbose_level > 0)
2077                                 printf("  stream=%d port=%d rxq=%d txq=%d\n",
2078                                        sm_id, fs->rx_port, fs->rx_queue,
2079                                        fs->tx_queue);
2080                         rxq = (queueid_t) (rxq + 1);
2081                         if (rxq == nb_rxq) {
2082                                 rxq = 0;
2083                                 rxp = (portid_t) (rxp + 1);
2084                         }
2085                 }
2086         }
2087 }
2088
2089 void
2090 fwd_config_setup(void)
2091 {
2092         cur_fwd_config.fwd_eng = cur_fwd_eng;
2093         if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2094                 icmp_echo_config_setup();
2095                 return;
2096         }
2097         if ((nb_rxq > 1) && (nb_txq > 1)){
2098                 if (dcb_config)
2099                         dcb_fwd_config_setup();
2100                 else
2101                         rss_fwd_config_setup();
2102         }
2103         else
2104                 simple_fwd_config_setup();
2105 }
2106
2107 void
2108 pkt_fwd_config_display(struct fwd_config *cfg)
2109 {
2110         struct fwd_stream *fs;
2111         lcoreid_t  lc_id;
2112         streamid_t sm_id;
2113
2114         printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2115                 "NUMA support %s, MP over anonymous pages %s\n",
2116                 cfg->fwd_eng->fwd_mode_name,
2117                 retry_enabled == 0 ? "" : " with retry",
2118                 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2119                 numa_support == 1 ? "enabled" : "disabled",
2120                 mp_anon != 0 ? "enabled" : "disabled");
2121
2122         if (retry_enabled)
2123                 printf("TX retry num: %u, delay between TX retries: %uus\n",
2124                         burst_tx_retry_num, burst_tx_delay_time);
2125         for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2126                 printf("Logical Core %u (socket %u) forwards packets on "
2127                        "%d streams:",
2128                        fwd_lcores_cpuids[lc_id],
2129                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2130                        fwd_lcores[lc_id]->stream_nb);
2131                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2132                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2133                         printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2134                                "P=%d/Q=%d (socket %u) ",
2135                                fs->rx_port, fs->rx_queue,
2136                                ports[fs->rx_port].socket_id,
2137                                fs->tx_port, fs->tx_queue,
2138                                ports[fs->tx_port].socket_id);
2139                         print_ethaddr("peer=",
2140                                       &peer_eth_addrs[fs->peer_addr]);
2141                 }
2142                 printf("\n");
2143         }
2144         printf("\n");
2145 }
2146
2147 int
2148 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2149 {
2150         unsigned int i;
2151         unsigned int lcore_cpuid;
2152         int record_now;
2153
2154         record_now = 0;
2155  again:
2156         for (i = 0; i < nb_lc; i++) {
2157                 lcore_cpuid = lcorelist[i];
2158                 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2159                         printf("lcore %u not enabled\n", lcore_cpuid);
2160                         return -1;
2161                 }
2162                 if (lcore_cpuid == rte_get_master_lcore()) {
2163                         printf("lcore %u cannot be masked on for running "
2164                                "packet forwarding, which is the master lcore "
2165                                "and reserved for command line parsing only\n",
2166                                lcore_cpuid);
2167                         return -1;
2168                 }
2169                 if (record_now)
2170                         fwd_lcores_cpuids[i] = lcore_cpuid;
2171         }
2172         if (record_now == 0) {
2173                 record_now = 1;
2174                 goto again;
2175         }
2176         nb_cfg_lcores = (lcoreid_t) nb_lc;
2177         if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2178                 printf("previous number of forwarding cores %u - changed to "
2179                        "number of configured cores %u\n",
2180                        (unsigned int) nb_fwd_lcores, nb_lc);
2181                 nb_fwd_lcores = (lcoreid_t) nb_lc;
2182         }
2183
2184         return 0;
2185 }
2186
2187 int
2188 set_fwd_lcores_mask(uint64_t lcoremask)
2189 {
2190         unsigned int lcorelist[64];
2191         unsigned int nb_lc;
2192         unsigned int i;
2193
2194         if (lcoremask == 0) {
2195                 printf("Invalid NULL mask of cores\n");
2196                 return -1;
2197         }
2198         nb_lc = 0;
2199         for (i = 0; i < 64; i++) {
2200                 if (! ((uint64_t)(1ULL << i) & lcoremask))
2201                         continue;
2202                 lcorelist[nb_lc++] = i;
2203         }
2204         return set_fwd_lcores_list(lcorelist, nb_lc);
2205 }
2206
2207 void
2208 set_fwd_lcores_number(uint16_t nb_lc)
2209 {
2210         if (nb_lc > nb_cfg_lcores) {
2211                 printf("nb fwd cores %u > %u (max. number of configured "
2212                        "lcores) - ignored\n",
2213                        (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2214                 return;
2215         }
2216         nb_fwd_lcores = (lcoreid_t) nb_lc;
2217         printf("Number of forwarding cores set to %u\n",
2218                (unsigned int) nb_fwd_lcores);
2219 }
2220
2221 void
2222 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2223 {
2224         unsigned int i;
2225         portid_t port_id;
2226         int record_now;
2227
2228         record_now = 0;
2229  again:
2230         for (i = 0; i < nb_pt; i++) {
2231                 port_id = (portid_t) portlist[i];
2232                 if (port_id_is_invalid(port_id, ENABLED_WARN))
2233                         return;
2234                 if (record_now)
2235                         fwd_ports_ids[i] = port_id;
2236         }
2237         if (record_now == 0) {
2238                 record_now = 1;
2239                 goto again;
2240         }
2241         nb_cfg_ports = (portid_t) nb_pt;
2242         if (nb_fwd_ports != (portid_t) nb_pt) {
2243                 printf("previous number of forwarding ports %u - changed to "
2244                        "number of configured ports %u\n",
2245                        (unsigned int) nb_fwd_ports, nb_pt);
2246                 nb_fwd_ports = (portid_t) nb_pt;
2247         }
2248 }
2249
2250 void
2251 set_fwd_ports_mask(uint64_t portmask)
2252 {
2253         unsigned int portlist[64];
2254         unsigned int nb_pt;
2255         unsigned int i;
2256
2257         if (portmask == 0) {
2258                 printf("Invalid NULL mask of ports\n");
2259                 return;
2260         }
2261         nb_pt = 0;
2262         RTE_ETH_FOREACH_DEV(i) {
2263                 if (! ((uint64_t)(1ULL << i) & portmask))
2264                         continue;
2265                 portlist[nb_pt++] = i;
2266         }
2267         set_fwd_ports_list(portlist, nb_pt);
2268 }
2269
2270 void
2271 set_fwd_ports_number(uint16_t nb_pt)
2272 {
2273         if (nb_pt > nb_cfg_ports) {
2274                 printf("nb fwd ports %u > %u (number of configured "
2275                        "ports) - ignored\n",
2276                        (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2277                 return;
2278         }
2279         nb_fwd_ports = (portid_t) nb_pt;
2280         printf("Number of forwarding ports set to %u\n",
2281                (unsigned int) nb_fwd_ports);
2282 }
2283
2284 int
2285 port_is_forwarding(portid_t port_id)
2286 {
2287         unsigned int i;
2288
2289         if (port_id_is_invalid(port_id, ENABLED_WARN))
2290                 return -1;
2291
2292         for (i = 0; i < nb_fwd_ports; i++) {
2293                 if (fwd_ports_ids[i] == port_id)
2294                         return 1;
2295         }
2296
2297         return 0;
2298 }
2299
2300 void
2301 set_nb_pkt_per_burst(uint16_t nb)
2302 {
2303         if (nb > MAX_PKT_BURST) {
2304                 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2305                        " ignored\n",
2306                        (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2307                 return;
2308         }
2309         nb_pkt_per_burst = nb;
2310         printf("Number of packets per burst set to %u\n",
2311                (unsigned int) nb_pkt_per_burst);
2312 }
2313
2314 static const char *
2315 tx_split_get_name(enum tx_pkt_split split)
2316 {
2317         uint32_t i;
2318
2319         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2320                 if (tx_split_name[i].split == split)
2321                         return tx_split_name[i].name;
2322         }
2323         return NULL;
2324 }
2325
2326 void
2327 set_tx_pkt_split(const char *name)
2328 {
2329         uint32_t i;
2330
2331         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2332                 if (strcmp(tx_split_name[i].name, name) == 0) {
2333                         tx_pkt_split = tx_split_name[i].split;
2334                         return;
2335                 }
2336         }
2337         printf("unknown value: \"%s\"\n", name);
2338 }
2339
2340 void
2341 show_tx_pkt_segments(void)
2342 {
2343         uint32_t i, n;
2344         const char *split;
2345
2346         n = tx_pkt_nb_segs;
2347         split = tx_split_get_name(tx_pkt_split);
2348
2349         printf("Number of segments: %u\n", n);
2350         printf("Segment sizes: ");
2351         for (i = 0; i != n - 1; i++)
2352                 printf("%hu,", tx_pkt_seg_lengths[i]);
2353         printf("%hu\n", tx_pkt_seg_lengths[i]);
2354         printf("Split packet: %s\n", split);
2355 }
2356
2357 void
2358 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2359 {
2360         uint16_t tx_pkt_len;
2361         unsigned i;
2362
2363         if (nb_segs >= (unsigned) nb_txd) {
2364                 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2365                        nb_segs, (unsigned int) nb_txd);
2366                 return;
2367         }
2368
2369         /*
2370          * Check that each segment length is greater or equal than
2371          * the mbuf data sise.
2372          * Check also that the total packet length is greater or equal than the
2373          * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2374          */
2375         tx_pkt_len = 0;
2376         for (i = 0; i < nb_segs; i++) {
2377                 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2378                         printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2379                                i, seg_lengths[i], (unsigned) mbuf_data_size);
2380                         return;
2381                 }
2382                 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2383         }
2384         if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2385                 printf("total packet length=%u < %d - give up\n",
2386                                 (unsigned) tx_pkt_len,
2387                                 (int)(sizeof(struct ether_hdr) + 20 + 8));
2388                 return;
2389         }
2390
2391         for (i = 0; i < nb_segs; i++)
2392                 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2393
2394         tx_pkt_length  = tx_pkt_len;
2395         tx_pkt_nb_segs = (uint8_t) nb_segs;
2396 }
2397
2398 char*
2399 list_pkt_forwarding_modes(void)
2400 {
2401         static char fwd_modes[128] = "";
2402         const char *separator = "|";
2403         struct fwd_engine *fwd_eng;
2404         unsigned i = 0;
2405
2406         if (strlen (fwd_modes) == 0) {
2407                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2408                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2409                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2410                         strncat(fwd_modes, separator,
2411                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2412                 }
2413                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2414         }
2415
2416         return fwd_modes;
2417 }
2418
2419 char*
2420 list_pkt_forwarding_retry_modes(void)
2421 {
2422         static char fwd_modes[128] = "";
2423         const char *separator = "|";
2424         struct fwd_engine *fwd_eng;
2425         unsigned i = 0;
2426
2427         if (strlen(fwd_modes) == 0) {
2428                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2429                         if (fwd_eng == &rx_only_engine)
2430                                 continue;
2431                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2432                                         sizeof(fwd_modes) -
2433                                         strlen(fwd_modes) - 1);
2434                         strncat(fwd_modes, separator,
2435                                         sizeof(fwd_modes) -
2436                                         strlen(fwd_modes) - 1);
2437                 }
2438                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2439         }
2440
2441         return fwd_modes;
2442 }
2443
2444 void
2445 set_pkt_forwarding_mode(const char *fwd_mode_name)
2446 {
2447         struct fwd_engine *fwd_eng;
2448         unsigned i;
2449
2450         i = 0;
2451         while ((fwd_eng = fwd_engines[i]) != NULL) {
2452                 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2453                         printf("Set %s packet forwarding mode%s\n",
2454                                fwd_mode_name,
2455                                retry_enabled == 0 ? "" : " with retry");
2456                         cur_fwd_eng = fwd_eng;
2457                         return;
2458                 }
2459                 i++;
2460         }
2461         printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2462 }
2463
2464 void
2465 set_verbose_level(uint16_t vb_level)
2466 {
2467         printf("Change verbose level from %u to %u\n",
2468                (unsigned int) verbose_level, (unsigned int) vb_level);
2469         verbose_level = vb_level;
2470 }
2471
2472 void
2473 vlan_extend_set(portid_t port_id, int on)
2474 {
2475         int diag;
2476         int vlan_offload;
2477
2478         if (port_id_is_invalid(port_id, ENABLED_WARN))
2479                 return;
2480
2481         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2482
2483         if (on)
2484                 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2485         else
2486                 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2487
2488         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2489         if (diag < 0)
2490                 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2491                "diag=%d\n", port_id, on, diag);
2492 }
2493
2494 void
2495 rx_vlan_strip_set(portid_t port_id, int on)
2496 {
2497         int diag;
2498         int vlan_offload;
2499
2500         if (port_id_is_invalid(port_id, ENABLED_WARN))
2501                 return;
2502
2503         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2504
2505         if (on)
2506                 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2507         else
2508                 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2509
2510         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2511         if (diag < 0)
2512                 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2513                "diag=%d\n", port_id, on, diag);
2514 }
2515
2516 void
2517 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2518 {
2519         int diag;
2520
2521         if (port_id_is_invalid(port_id, ENABLED_WARN))
2522                 return;
2523
2524         diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2525         if (diag < 0)
2526                 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2527                "diag=%d\n", port_id, queue_id, on, diag);
2528 }
2529
2530 void
2531 rx_vlan_filter_set(portid_t port_id, int on)
2532 {
2533         int diag;
2534         int vlan_offload;
2535
2536         if (port_id_is_invalid(port_id, ENABLED_WARN))
2537                 return;
2538
2539         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2540
2541         if (on)
2542                 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2543         else
2544                 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2545
2546         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2547         if (diag < 0)
2548                 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2549                "diag=%d\n", port_id, on, diag);
2550 }
2551
2552 int
2553 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2554 {
2555         int diag;
2556
2557         if (port_id_is_invalid(port_id, ENABLED_WARN))
2558                 return 1;
2559         if (vlan_id_is_invalid(vlan_id))
2560                 return 1;
2561         diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2562         if (diag == 0)
2563                 return 0;
2564         printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2565                "diag=%d\n",
2566                port_id, vlan_id, on, diag);
2567         return -1;
2568 }
2569
2570 void
2571 rx_vlan_all_filter_set(portid_t port_id, int on)
2572 {
2573         uint16_t vlan_id;
2574
2575         if (port_id_is_invalid(port_id, ENABLED_WARN))
2576                 return;
2577         for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2578                 if (rx_vft_set(port_id, vlan_id, on))
2579                         break;
2580         }
2581 }
2582
2583 void
2584 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2585 {
2586         int diag;
2587
2588         if (port_id_is_invalid(port_id, ENABLED_WARN))
2589                 return;
2590
2591         diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2592         if (diag == 0)
2593                 return;
2594
2595         printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2596                "diag=%d\n",
2597                port_id, vlan_type, tp_id, diag);
2598 }
2599
2600 void
2601 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2602 {
2603         int vlan_offload;
2604         if (port_id_is_invalid(port_id, ENABLED_WARN))
2605                 return;
2606         if (vlan_id_is_invalid(vlan_id))
2607                 return;
2608
2609         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2610         if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2611                 printf("Error, as QinQ has been enabled.\n");
2612                 return;
2613         }
2614
2615         tx_vlan_reset(port_id);
2616         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
2617         ports[port_id].tx_vlan_id = vlan_id;
2618 }
2619
2620 void
2621 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2622 {
2623         int vlan_offload;
2624         if (port_id_is_invalid(port_id, ENABLED_WARN))
2625                 return;
2626         if (vlan_id_is_invalid(vlan_id))
2627                 return;
2628         if (vlan_id_is_invalid(vlan_id_outer))
2629                 return;
2630
2631         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2632         if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2633                 printf("Error, as QinQ hasn't been enabled.\n");
2634                 return;
2635         }
2636
2637         tx_vlan_reset(port_id);
2638         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
2639         ports[port_id].tx_vlan_id = vlan_id;
2640         ports[port_id].tx_vlan_id_outer = vlan_id_outer;
2641 }
2642
2643 void
2644 tx_vlan_reset(portid_t port_id)
2645 {
2646         if (port_id_is_invalid(port_id, ENABLED_WARN))
2647                 return;
2648         ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
2649                                 TESTPMD_TX_OFFLOAD_INSERT_QINQ);
2650         ports[port_id].tx_vlan_id = 0;
2651         ports[port_id].tx_vlan_id_outer = 0;
2652 }
2653
2654 void
2655 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2656 {
2657         if (port_id_is_invalid(port_id, ENABLED_WARN))
2658                 return;
2659
2660         rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2661 }
2662
2663 void
2664 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2665 {
2666         uint16_t i;
2667         uint8_t existing_mapping_found = 0;
2668
2669         if (port_id_is_invalid(port_id, ENABLED_WARN))
2670                 return;
2671
2672         if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2673                 return;
2674
2675         if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2676                 printf("map_value not in required range 0..%d\n",
2677                                 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2678                 return;
2679         }
2680
2681         if (!is_rx) { /*then tx*/
2682                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2683                         if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2684                             (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2685                                 tx_queue_stats_mappings[i].stats_counter_id = map_value;
2686                                 existing_mapping_found = 1;
2687                                 break;
2688                         }
2689                 }
2690                 if (!existing_mapping_found) { /* A new additional mapping... */
2691                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2692                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2693                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2694                         nb_tx_queue_stats_mappings++;
2695                 }
2696         }
2697         else { /*rx*/
2698                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2699                         if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2700                             (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2701                                 rx_queue_stats_mappings[i].stats_counter_id = map_value;
2702                                 existing_mapping_found = 1;
2703                                 break;
2704                         }
2705                 }
2706                 if (!existing_mapping_found) { /* A new additional mapping... */
2707                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2708                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2709                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2710                         nb_rx_queue_stats_mappings++;
2711                 }
2712         }
2713 }
2714
2715 static inline void
2716 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2717 {
2718         printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2719
2720         if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2721                 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2722                         " tunnel_id: 0x%08x",
2723                         mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2724                         rte_be_to_cpu_32(mask->tunnel_id_mask));
2725         else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2726                 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2727                         rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2728                         rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2729
2730                 printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
2731                         rte_be_to_cpu_16(mask->src_port_mask),
2732                         rte_be_to_cpu_16(mask->dst_port_mask));
2733
2734                 printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2735                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2736                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2737                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2738                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2739
2740                 printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2741                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2742                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2743                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2744                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2745         }
2746
2747         printf("\n");
2748 }
2749
2750 static inline void
2751 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2752 {
2753         struct rte_eth_flex_payload_cfg *cfg;
2754         uint32_t i, j;
2755
2756         for (i = 0; i < flex_conf->nb_payloads; i++) {
2757                 cfg = &flex_conf->flex_set[i];
2758                 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2759                         printf("\n    RAW:  ");
2760                 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2761                         printf("\n    L2_PAYLOAD:  ");
2762                 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2763                         printf("\n    L3_PAYLOAD:  ");
2764                 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2765                         printf("\n    L4_PAYLOAD:  ");
2766                 else
2767                         printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
2768                 for (j = 0; j < num; j++)
2769                         printf("  %-5u", cfg->src_offset[j]);
2770         }
2771         printf("\n");
2772 }
2773
2774 static char *
2775 flowtype_to_str(uint16_t flow_type)
2776 {
2777         struct flow_type_info {
2778                 char str[32];
2779                 uint16_t ftype;
2780         };
2781
2782         uint8_t i;
2783         static struct flow_type_info flowtype_str_table[] = {
2784                 {"raw", RTE_ETH_FLOW_RAW},
2785                 {"ipv4", RTE_ETH_FLOW_IPV4},
2786                 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2787                 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2788                 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2789                 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2790                 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2791                 {"ipv6", RTE_ETH_FLOW_IPV6},
2792                 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2793                 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2794                 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2795                 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2796                 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2797                 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2798                 {"port", RTE_ETH_FLOW_PORT},
2799                 {"vxlan", RTE_ETH_FLOW_VXLAN},
2800                 {"geneve", RTE_ETH_FLOW_GENEVE},
2801                 {"nvgre", RTE_ETH_FLOW_NVGRE},
2802         };
2803
2804         for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2805                 if (flowtype_str_table[i].ftype == flow_type)
2806                         return flowtype_str_table[i].str;
2807         }
2808
2809         return NULL;
2810 }
2811
2812 static inline void
2813 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2814 {
2815         struct rte_eth_fdir_flex_mask *mask;
2816         uint32_t i, j;
2817         char *p;
2818
2819         for (i = 0; i < flex_conf->nb_flexmasks; i++) {
2820                 mask = &flex_conf->flex_mask[i];
2821                 p = flowtype_to_str(mask->flow_type);
2822                 printf("\n    %s:\t", p ? p : "unknown");
2823                 for (j = 0; j < num; j++)
2824                         printf(" %02x", mask->mask[j]);
2825         }
2826         printf("\n");
2827 }
2828
2829 static inline void
2830 print_fdir_flow_type(uint32_t flow_types_mask)
2831 {
2832         int i;
2833         char *p;
2834
2835         for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
2836                 if (!(flow_types_mask & (1 << i)))
2837                         continue;
2838                 p = flowtype_to_str(i);
2839                 if (p)
2840                         printf(" %s", p);
2841                 else
2842                         printf(" unknown");
2843         }
2844         printf("\n");
2845 }
2846
2847 void
2848 fdir_get_infos(portid_t port_id)
2849 {
2850         struct rte_eth_fdir_stats fdir_stat;
2851         struct rte_eth_fdir_info fdir_info;
2852         int ret;
2853
2854         static const char *fdir_stats_border = "########################";
2855
2856         if (port_id_is_invalid(port_id, ENABLED_WARN))
2857                 return;
2858         ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
2859         if (ret < 0) {
2860                 printf("\n FDIR is not supported on port %-2d\n",
2861                         port_id);
2862                 return;
2863         }
2864
2865         memset(&fdir_info, 0, sizeof(fdir_info));
2866         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2867                                RTE_ETH_FILTER_INFO, &fdir_info);
2868         memset(&fdir_stat, 0, sizeof(fdir_stat));
2869         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2870                                RTE_ETH_FILTER_STATS, &fdir_stat);
2871         printf("\n  %s FDIR infos for port %-2d     %s\n",
2872                fdir_stats_border, port_id, fdir_stats_border);
2873         printf("  MODE: ");
2874         if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
2875                 printf("  PERFECT\n");
2876         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
2877                 printf("  PERFECT-MAC-VLAN\n");
2878         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2879                 printf("  PERFECT-TUNNEL\n");
2880         else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
2881                 printf("  SIGNATURE\n");
2882         else
2883                 printf("  DISABLE\n");
2884         if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
2885                 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
2886                 printf("  SUPPORTED FLOW TYPE: ");
2887                 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
2888         }
2889         printf("  FLEX PAYLOAD INFO:\n");
2890         printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
2891                "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
2892                "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
2893                 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
2894                 fdir_info.flex_payload_unit,
2895                 fdir_info.max_flex_payload_segment_num,
2896                 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
2897         printf("  MASK: ");
2898         print_fdir_mask(&fdir_info.mask);
2899         if (fdir_info.flex_conf.nb_payloads > 0) {
2900                 printf("  FLEX PAYLOAD SRC OFFSET:");
2901                 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2902         }
2903         if (fdir_info.flex_conf.nb_flexmasks > 0) {
2904                 printf("  FLEX MASK CFG:");
2905                 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2906         }
2907         printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
2908                fdir_stat.guarant_cnt, fdir_stat.best_cnt);
2909         printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
2910                fdir_info.guarant_spc, fdir_info.best_spc);
2911         printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
2912                "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
2913                "  add:           %-10"PRIu64"  remove:        %"PRIu64"\n"
2914                "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
2915                fdir_stat.collision, fdir_stat.free,
2916                fdir_stat.maxhash, fdir_stat.maxlen,
2917                fdir_stat.add, fdir_stat.remove,
2918                fdir_stat.f_add, fdir_stat.f_remove);
2919         printf("  %s############################%s\n",
2920                fdir_stats_border, fdir_stats_border);
2921 }
2922
2923 void
2924 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
2925 {
2926         struct rte_port *port;
2927         struct rte_eth_fdir_flex_conf *flex_conf;
2928         int i, idx = 0;
2929
2930         port = &ports[port_id];
2931         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2932         for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
2933                 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
2934                         idx = i;
2935                         break;
2936                 }
2937         }
2938         if (i >= RTE_ETH_FLOW_MAX) {
2939                 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
2940                         idx = flex_conf->nb_flexmasks;
2941                         flex_conf->nb_flexmasks++;
2942                 } else {
2943                         printf("The flex mask table is full. Can not set flex"
2944                                 " mask for flow_type(%u).", cfg->flow_type);
2945                         return;
2946                 }
2947         }
2948         (void)rte_memcpy(&flex_conf->flex_mask[idx],
2949                          cfg,
2950                          sizeof(struct rte_eth_fdir_flex_mask));
2951 }
2952
2953 void
2954 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
2955 {
2956         struct rte_port *port;
2957         struct rte_eth_fdir_flex_conf *flex_conf;
2958         int i, idx = 0;
2959
2960         port = &ports[port_id];
2961         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2962         for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
2963                 if (cfg->type == flex_conf->flex_set[i].type) {
2964                         idx = i;
2965                         break;
2966                 }
2967         }
2968         if (i >= RTE_ETH_PAYLOAD_MAX) {
2969                 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
2970                         idx = flex_conf->nb_payloads;
2971                         flex_conf->nb_payloads++;
2972                 } else {
2973                         printf("The flex payload table is full. Can not set"
2974                                 " flex payload for type(%u).", cfg->type);
2975                         return;
2976                 }
2977         }
2978         (void)rte_memcpy(&flex_conf->flex_set[idx],
2979                          cfg,
2980                          sizeof(struct rte_eth_flex_payload_cfg));
2981
2982 }
2983
2984 #ifdef RTE_LIBRTE_IXGBE_PMD
2985 void
2986 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
2987 {
2988         int diag;
2989
2990         if (is_rx)
2991                 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
2992         else
2993                 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
2994
2995         if (diag == 0)
2996                 return;
2997         if(is_rx)
2998                 printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed "
2999                         "diag=%d\n", port_id, diag);
3000         else
3001                 printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed "
3002                         "diag=%d\n", port_id, diag);
3003
3004 }
3005 #endif
3006
3007 int
3008 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3009 {
3010         int diag;
3011         struct rte_eth_link link;
3012
3013         if (port_id_is_invalid(port_id, ENABLED_WARN))
3014                 return 1;
3015         rte_eth_link_get_nowait(port_id, &link);
3016         if (rate > link.link_speed) {
3017                 printf("Invalid rate value:%u bigger than link speed: %u\n",
3018                         rate, link.link_speed);
3019                 return 1;
3020         }
3021         diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3022         if (diag == 0)
3023                 return diag;
3024         printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3025                 port_id, diag);
3026         return diag;
3027 }
3028
3029 #ifdef RTE_LIBRTE_IXGBE_PMD
3030 int
3031 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3032 {
3033         int diag;
3034
3035         diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk);
3036         if (diag == 0)
3037                 return diag;
3038         printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n",
3039                 port_id, diag);
3040         return diag;
3041 }
3042 #endif
3043
3044 /*
3045  * Functions to manage the set of filtered Multicast MAC addresses.
3046  *
3047  * A pool of filtered multicast MAC addresses is associated with each port.
3048  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3049  * The address of the pool and the number of valid multicast MAC addresses
3050  * recorded in the pool are stored in the fields "mc_addr_pool" and
3051  * "mc_addr_nb" of the "rte_port" data structure.
3052  *
3053  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3054  * to be supplied a contiguous array of multicast MAC addresses.
3055  * To comply with this constraint, the set of multicast addresses recorded
3056  * into the pool are systematically compacted at the beginning of the pool.
3057  * Hence, when a multicast address is removed from the pool, all following
3058  * addresses, if any, are copied back to keep the set contiguous.
3059  */
3060 #define MCAST_POOL_INC 32
3061
3062 static int
3063 mcast_addr_pool_extend(struct rte_port *port)
3064 {
3065         struct ether_addr *mc_pool;
3066         size_t mc_pool_size;
3067
3068         /*
3069          * If a free entry is available at the end of the pool, just
3070          * increment the number of recorded multicast addresses.
3071          */
3072         if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3073                 port->mc_addr_nb++;
3074                 return 0;
3075         }
3076
3077         /*
3078          * [re]allocate a pool with MCAST_POOL_INC more entries.
3079          * The previous test guarantees that port->mc_addr_nb is a multiple
3080          * of MCAST_POOL_INC.
3081          */
3082         mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3083                                                     MCAST_POOL_INC);
3084         mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3085                                                 mc_pool_size);
3086         if (mc_pool == NULL) {
3087                 printf("allocation of pool of %u multicast addresses failed\n",
3088                        port->mc_addr_nb + MCAST_POOL_INC);
3089                 return -ENOMEM;
3090         }
3091
3092         port->mc_addr_pool = mc_pool;
3093         port->mc_addr_nb++;
3094         return 0;
3095
3096 }
3097
3098 static void
3099 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3100 {
3101         port->mc_addr_nb--;
3102         if (addr_idx == port->mc_addr_nb) {
3103                 /* No need to recompact the set of multicast addressses. */
3104                 if (port->mc_addr_nb == 0) {
3105                         /* free the pool of multicast addresses. */
3106                         free(port->mc_addr_pool);
3107                         port->mc_addr_pool = NULL;
3108                 }
3109                 return;
3110         }
3111         memmove(&port->mc_addr_pool[addr_idx],
3112                 &port->mc_addr_pool[addr_idx + 1],
3113                 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3114 }
3115
3116 static void
3117 eth_port_multicast_addr_list_set(uint8_t port_id)
3118 {
3119         struct rte_port *port;
3120         int diag;
3121
3122         port = &ports[port_id];
3123         diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3124                                             port->mc_addr_nb);
3125         if (diag == 0)
3126                 return;
3127         printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3128                port->mc_addr_nb, port_id, -diag);
3129 }
3130
3131 void
3132 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
3133 {
3134         struct rte_port *port;
3135         uint32_t i;
3136
3137         if (port_id_is_invalid(port_id, ENABLED_WARN))
3138                 return;
3139
3140         port = &ports[port_id];
3141
3142         /*
3143          * Check that the added multicast MAC address is not already recorded
3144          * in the pool of multicast addresses.
3145          */
3146         for (i = 0; i < port->mc_addr_nb; i++) {
3147                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3148                         printf("multicast address already filtered by port\n");
3149                         return;
3150                 }
3151         }
3152
3153         if (mcast_addr_pool_extend(port) != 0)
3154                 return;
3155         ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3156         eth_port_multicast_addr_list_set(port_id);
3157 }
3158
3159 void
3160 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
3161 {
3162         struct rte_port *port;
3163         uint32_t i;
3164
3165         if (port_id_is_invalid(port_id, ENABLED_WARN))
3166                 return;
3167
3168         port = &ports[port_id];
3169
3170         /*
3171          * Search the pool of multicast MAC addresses for the removed address.
3172          */
3173         for (i = 0; i < port->mc_addr_nb; i++) {
3174                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3175                         break;
3176         }
3177         if (i == port->mc_addr_nb) {
3178                 printf("multicast address not filtered by port %d\n", port_id);
3179                 return;
3180         }
3181
3182         mcast_addr_pool_remove(port, i);
3183         eth_port_multicast_addr_list_set(port_id);
3184 }
3185
3186 void
3187 port_dcb_info_display(uint8_t port_id)
3188 {
3189         struct rte_eth_dcb_info dcb_info;
3190         uint16_t i;
3191         int ret;
3192         static const char *border = "================";
3193
3194         if (port_id_is_invalid(port_id, ENABLED_WARN))
3195                 return;
3196
3197         ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3198         if (ret) {
3199                 printf("\n Failed to get dcb infos on port %-2d\n",
3200                         port_id);
3201                 return;
3202         }
3203         printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3204         printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3205         printf("\n  TC :        ");
3206         for (i = 0; i < dcb_info.nb_tcs; i++)
3207                 printf("\t%4d", i);
3208         printf("\n  Priority :  ");
3209         for (i = 0; i < dcb_info.nb_tcs; i++)
3210                 printf("\t%4d", dcb_info.prio_tc[i]);
3211         printf("\n  BW percent :");
3212         for (i = 0; i < dcb_info.nb_tcs; i++)
3213                 printf("\t%4d%%", dcb_info.tc_bws[i]);
3214         printf("\n  RXQ base :  ");
3215         for (i = 0; i < dcb_info.nb_tcs; i++)
3216                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3217         printf("\n  RXQ number :");
3218         for (i = 0; i < dcb_info.nb_tcs; i++)
3219                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3220         printf("\n  TXQ base :  ");
3221         for (i = 0; i < dcb_info.nb_tcs; i++)
3222                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3223         printf("\n  TXQ number :");
3224         for (i = 0; i < dcb_info.nb_tcs; i++)
3225                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3226         printf("\n");
3227 }
3228
3229 uint8_t *
3230 open_ddp_package_file(const char *file_path, uint32_t *size)
3231 {
3232         FILE *fh = fopen(file_path, "rb");
3233         uint32_t pkg_size;
3234         uint8_t *buf = NULL;
3235         int ret = 0;
3236
3237         if (size)
3238                 *size = 0;
3239
3240         if (fh == NULL) {
3241                 printf("%s: Failed to open %s\n", __func__, file_path);
3242                 return buf;
3243         }
3244
3245         ret = fseek(fh, 0, SEEK_END);
3246         if (ret < 0) {
3247                 fclose(fh);
3248                 printf("%s: File operations failed\n", __func__);
3249                 return buf;
3250         }
3251
3252         pkg_size = ftell(fh);
3253
3254         buf = (uint8_t *)malloc(pkg_size);
3255         if (!buf) {
3256                 fclose(fh);
3257                 printf("%s: Failed to malloc memory\n", __func__);
3258                 return buf;
3259         }
3260
3261         ret = fseek(fh, 0, SEEK_SET);
3262         if (ret < 0) {
3263                 fclose(fh);
3264                 printf("%s: File seek operation failed\n", __func__);
3265                 close_ddp_package_file(buf);
3266                 return NULL;
3267         }
3268
3269         ret = fread(buf, 1, pkg_size, fh);
3270         if (ret < 0) {
3271                 fclose(fh);
3272                 printf("%s: File read operation failed\n", __func__);
3273                 close_ddp_package_file(buf);
3274                 return NULL;
3275         }
3276
3277         if (size)
3278                 *size = pkg_size;
3279
3280         fclose(fh);
3281
3282         return buf;
3283 }
3284
3285 int
3286 close_ddp_package_file(uint8_t *buf)
3287 {
3288         if (buf) {
3289                 free((void *)buf);
3290                 return 0;
3291         }
3292
3293         return -1;
3294 }