/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <stdarg.h>
print_ethaddr(const char *name, struct ether_addr *eth_addr)
{
printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
- eth_addr->addr_bytes[0],
- eth_addr->addr_bytes[1],
- eth_addr->addr_bytes[2],
- eth_addr->addr_bytes[3],
- eth_addr->addr_bytes[4],
- eth_addr->addr_bytes[5]);
+ (unsigned int)eth_addr->addr_bytes[0],
+ (unsigned int)eth_addr->addr_bytes[1],
+ (unsigned int)eth_addr->addr_bytes[2],
+ (unsigned int)eth_addr->addr_bytes[3],
+ (unsigned int)eth_addr->addr_bytes[4],
+ (unsigned int)eth_addr->addr_bytes[5]);
}
void
nic_stats_display(portid_t port_id)
{
struct rte_eth_stats stats;
+ struct rte_port *port = &ports[port_id];
+ uint8_t i;
static const char *nic_stats_border = "########################";
rte_eth_stats_get(port_id, &stats);
printf("\n %s NIC statistics for port %-2d %s\n",
nic_stats_border, port_id, nic_stats_border);
- printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
- "%-"PRIu64"\n"
- " TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
- "%-"PRIu64"\n",
- stats.ipackets, stats.ierrors, stats.ibytes,
- stats.opackets, stats.oerrors, stats.obytes);
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
+ "%-"PRIu64"\n"
+ " TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
+ "%-"PRIu64"\n",
+ stats.ipackets, stats.ierrors, stats.ibytes,
+ stats.opackets, stats.oerrors, stats.obytes);
+ }
+ else {
+ printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
+ " RX-bytes: %10"PRIu64"\n"
+ " TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
+ " TX-bytes: %10"PRIu64"\n",
+ stats.ipackets, stats.ierrors, stats.ibytes,
+ stats.opackets, stats.oerrors, stats.obytes);
+ }
/* stats fdir */
if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
stats.fdirmiss,
stats.fdirmatch);
+ if (port->rx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d RX-packets: %10"PRIu64
+ " RX-errors: %10"PRIu64
+ " RX-bytes: %10"PRIu64"\n",
+ i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
+ }
+ }
+ if (port->tx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d TX-packets: %10"PRIu64
+ " TX-bytes: %10"PRIu64"\n",
+ i, stats.q_opackets[i], stats.q_obytes[i]);
+ }
+ }
+
+ /* Display statistics of XON/XOFF pause frames, if any. */
+ if ((stats.tx_pause_xon | stats.rx_pause_xon |
+ stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
+ printf(" RX-XOFF: %-10"PRIu64" RX-XON: %-10"PRIu64"\n",
+ stats.rx_pause_xoff, stats.rx_pause_xon);
+ printf(" TX-XOFF: %-10"PRIu64" TX-XON: %-10"PRIu64"\n",
+ stats.tx_pause_xoff, stats.tx_pause_xon);
+ }
printf(" %s############################%s\n",
nic_stats_border, nic_stats_border);
}
printf("\n NIC statistics for port %d cleared\n", port_id);
}
+
+void
+nic_stats_mapping_display(portid_t port_id)
+{
+ struct rte_port *port = &ports[port_id];
+ uint16_t i;
+
+ static const char *nic_stats_mapping_border = "########################";
+
+ if (port_id >= nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf("Port id %d - either does not support queue statistic mapping or"
+ " no queue statistic mapping set\n", port_id);
+ return;
+ }
+
+ printf("\n %s NIC statistics mapping for port %-2d %s\n",
+ nic_stats_mapping_border, port_id, nic_stats_mapping_border);
+
+ if (port->rx_queue_stats_mapping_enabled) {
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if (rx_queue_stats_mappings[i].port_id == port_id) {
+ printf(" RX-queue %2d mapped to Stats Reg %2d\n",
+ rx_queue_stats_mappings[i].queue_id,
+ rx_queue_stats_mappings[i].stats_counter_id);
+ }
+ }
+ printf("\n");
+ }
+
+
+ if (port->tx_queue_stats_mapping_enabled) {
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if (tx_queue_stats_mappings[i].port_id == port_id) {
+ printf(" TX-queue %2d mapped to Stats Reg %2d\n",
+ tx_queue_stats_mappings[i].queue_id,
+ tx_queue_stats_mappings[i].stats_counter_id);
+ }
+ }
+ }
+
+ printf(" %s####################################%s\n",
+ nic_stats_mapping_border, nic_stats_mapping_border);
+}
+
void
port_infos_display(portid_t port_id)
{
struct rte_port *port;
struct rte_eth_link link;
+ int vlan_offload;
+ struct rte_mempool * mp;
static const char *info_border = "*********************";
if (port_id >= nb_ports) {
return;
}
port = &ports[port_id];
- rte_eth_link_get(port_id, &link);
+ rte_eth_link_get_nowait(port_id, &link);
printf("\n%s Infos for port %-2d %s\n",
info_border, port_id, info_border);
print_ethaddr("MAC address: ", &port->eth_addr);
+ printf("\nConnect to socket: %u", port->socket_id);
+
+ if (port_numa[port_id] != NUMA_NO_CONFIG) {
+ mp = mbuf_pool_find(port_numa[port_id]);
+ if (mp)
+ printf("\nmemory allocation on the socket: %d",
+ port_numa[port_id]);
+ } else
+ printf("\nmemory allocation on the socket: %u",port->socket_id);
+
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
printf("Maximum number of MAC addresses: %u\n",
(unsigned int)(port->dev_info.max_mac_addrs));
+ printf("Maximum number of MAC addresses of hash filtering: %u\n",
+ (unsigned int)(port->dev_info.max_hash_mac_addrs));
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+ if (vlan_offload >= 0){
+ printf("VLAN offload: \n");
+ if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ printf(" strip on \n");
+ else
+ printf(" strip off \n");
+
+ if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ printf(" filter on \n");
+ else
+ printf(" filter off \n");
+
+ if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ printf(" qinq(extend) on \n");
+ else
+ printf(" qinq(extend) off \n");
+ }
}
static int
(unsigned)reg_off);
return 1;
}
- pci_len = ports[port_id].dev_info.pci_dev->mem_resource.len;
+ pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
if (reg_off >= pci_len) {
printf("Port %d: register offset %u (0x%X) out of port PCI "
"resource (length=%"PRIu64")\n",
nb_txq, nb_txd, tx_free_thresh);
printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
- printf(" TX RS bit threshold=%d\n", tx_rs_thresh);
+ printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
+ tx_rs_thresh, txq_flags);
+}
+
+void
+port_rss_reta_info(portid_t port_id,struct rte_eth_rss_reta *reta_conf)
+{
+ uint8_t i,j;
+ int ret;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ ret = rte_eth_dev_rss_reta_query(port_id, reta_conf);
+ if (ret != 0) {
+ printf("Failed to get RSS RETA info, return code = %d\n", ret);
+ return;
+ }
+
+ if (reta_conf->mask_lo != 0) {
+ for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
+ if (reta_conf->mask_lo & (uint64_t)(1ULL << i))
+ printf("RSS RETA configuration: hash index=%d,"
+ "queue=%d\n",i,reta_conf->reta[i]);
+ }
+ }
+
+ if (reta_conf->mask_hi != 0) {
+ for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
+ if(reta_conf->mask_hi & (uint64_t)(1ULL << i)) {
+ j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
+ printf("RSS RETA configuration: hash index=%d,"
+ "queue=%d\n",j,reta_conf->reta[j]);
+ }
+ }
+ }
}
/*
nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
nb_extra = (lcoreid_t) (nb_fs % nb_fc);
}
- nb_extra = (lcoreid_t) (nb_fs % nb_fc);
nb_lc = (lcoreid_t) (nb_fc - nb_extra);
sm_id = 0;
portid_t j;
portid_t inc = 2;
- if (nb_fwd_ports % 2) {
- if (port_topology == PORT_TOPOLOGY_CHAINED) {
- inc = 1;
- }
- else {
- printf("\nWarning! Cannot handle an odd number of ports "
- "with the current port topology. Configuration "
- "must be changed to have an even number of ports, "
- "or relaunch application with "
- "--port-topology=chained\n\n");
- }
+ if (port_topology == PORT_TOPOLOGY_CHAINED ||
+ port_topology == PORT_TOPOLOGY_LOOP) {
+ inc = 1;
+ } else if (nb_fwd_ports % 2) {
+ printf("\nWarning! Cannot handle an odd number of ports "
+ "with the current port topology. Configuration "
+ "must be changed to have an even number of ports, "
+ "or relaunch application with "
+ "--port-topology=chained\n\n");
}
cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
cur_fwd_config.nb_fwd_streams =
(streamid_t) cur_fwd_config.nb_fwd_ports;
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+
/*
* In the simple forwarding test, the number of forwarding cores
* must be lower or equal to the number of forwarding ports.
setup_fwd_config_of_each_lcore(&cur_fwd_config);
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
- j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
+ if (port_topology != PORT_TOPOLOGY_LOOP)
+ j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
+ else
+ j = i;
fwd_streams[i]->rx_port = fwd_ports_ids[i];
fwd_streams[i]->rx_queue = 0;
fwd_streams[i]->tx_port = fwd_ports_ids[j];
else
cur_fwd_config.nb_fwd_lcores =
(lcoreid_t)cur_fwd_config.nb_fwd_streams;
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+
setup_fwd_config_of_each_lcore(&cur_fwd_config);
rxp = 0; rxq = 0;
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
struct fwd_stream *fs;
fs = fwd_streams[lc_id];
+
if ((rxp & 0x1) == 0)
txp = (portid_t) (rxp + 1);
else
txp = (portid_t) (rxp - 1);
+ /*
+ * if we are in loopback, simply send stuff out through the
+ * ingress port
+ */
+ if (port_topology == PORT_TOPOLOGY_LOOP)
+ txp = rxp;
+
fs->rx_port = fwd_ports_ids[rxp];
fs->rx_queue = rxq;
fs->tx_port = fwd_ports_ids[txp];
}
}
+/*
+ * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
+ */
+static void
+dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
+{
+ if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
+
+ if (rxq < 32)
+ /* tc0: 0-31 */
+ *txq = rxq;
+ else if (rxq < 64) {
+ /* tc1: 64-95 */
+ *txq = (uint16_t)(rxq + 32);
+ }
+ else {
+ /* tc2: 96-111;tc3:112-127 */
+ *txq = (uint16_t)(rxq/2 + 64);
+ }
+ }
+ else {
+ if (rxq < 16)
+ /* tc0 mapping*/
+ *txq = rxq;
+ else if (rxq < 32) {
+ /* tc1 mapping*/
+ *txq = (uint16_t)(rxq + 16);
+ }
+ else if (rxq < 64) {
+ /*tc2,tc3 mapping */
+ *txq = (uint16_t)(rxq + 32);
+ }
+ else {
+ /* tc4,tc5,tc6 and tc7 mapping */
+ *txq = (uint16_t)(rxq/2 + 64);
+ }
+ }
+}
+
+/**
+ * For the DCB forwarding test, each core is assigned on every port multi-transmit
+ * queue.
+ *
+ * Each core is assigned a multi-stream, each stream being composed of
+ * a RX queue to poll on a RX port for input messages, associated with
+ * a TX queue of a TX port where to send forwarded packets.
+ * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
+ * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
+ * following rules:
+ * In VT mode,
+ * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
+ * - TxQl = RxQj
+ * In non-VT mode,
+ * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
+ * There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
+ * in dcb_rxq_2_txq_mapping function.
+ */
+static void
+dcb_fwd_config_setup(void)
+{
+ portid_t rxp;
+ portid_t txp;
+ queueid_t rxq;
+ queueid_t nb_q;
+ lcoreid_t lc_id;
+ uint16_t sm_id;
+
+ nb_q = nb_rxq;
+
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+ rxp = 0; rxq = 0;
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+ /* a fwd core can run multi-streams */
+ for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
+ {
+ struct fwd_stream *fs;
+ fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
+ if ((rxp & 0x1) == 0)
+ txp = (portid_t) (rxp + 1);
+ else
+ txp = (portid_t) (rxp - 1);
+ fs->rx_port = fwd_ports_ids[rxp];
+ fs->rx_queue = rxq;
+ fs->tx_port = fwd_ports_ids[txp];
+ if (dcb_q_mapping == DCB_VT_Q_MAPPING)
+ fs->tx_queue = rxq;
+ else
+ dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
+ fs->peer_addr = fs->tx_port;
+ rxq = (queueid_t) (rxq + 1);
+ if (rxq < nb_q)
+ continue;
+ rxq = 0;
+ if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
+ rxp = (portid_t)
+ (rxp + ((nb_ports >> 1) / nb_fwd_ports));
+ else
+ rxp = (portid_t) (rxp + 1);
+ }
+ }
+}
+
void
fwd_config_setup(void)
{
cur_fwd_config.fwd_eng = cur_fwd_eng;
- if ((nb_rxq > 1) && (nb_txq > 1))
- rss_fwd_config_setup();
+ if ((nb_rxq > 1) && (nb_txq > 1)){
+ if (dcb_config)
+ dcb_fwd_config_setup();
+ else
+ rss_fwd_config_setup();
+ }
else
simple_fwd_config_setup();
}
streamid_t sm_id;
printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
- "NUMA support %s\n",
- cfg->fwd_eng->fwd_mode_name,
- cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
- numa_support == 1 ? "enabled" : "disabled");
+ "NUMA support %s, MP over anonymous pages %s\n",
+ cfg->fwd_eng->fwd_mode_name,
+ cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
+ numa_support == 1 ? "enabled" : "disabled",
+ mp_anon != 0 ? "enabled" : "disabled");
+
+ if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
+ printf("TX retry num: %u, delay between TX retries: %uus\n",
+ burst_tx_retry_num, burst_tx_delay_time);
for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
printf("Logical Core %u (socket %u) forwards packets on "
"%d streams:",
void
fwd_config_display(void)
{
+ if((dcb_config) && (nb_fwd_lcores == 1)) {
+ printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
+ return;
+ }
fwd_config_setup();
pkt_fwd_config_display(&cur_fwd_config);
}
-void
+int
set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
{
unsigned int i;
for (i = 0; i < nb_lc; i++) {
lcore_cpuid = lcorelist[i];
if (! rte_lcore_is_enabled(lcore_cpuid)) {
- printf("Logical core %u not enabled\n", lcore_cpuid);
- return;
+ printf("lcore %u not enabled\n", lcore_cpuid);
+ return -1;
}
if (lcore_cpuid == rte_get_master_lcore()) {
- printf("Master core %u cannot forward packets\n",
+ printf("lcore %u cannot be masked on for running "
+ "packet forwarding, which is the master lcore "
+ "and reserved for command line parsing only\n",
lcore_cpuid);
- return;
+ return -1;
}
if (record_now)
fwd_lcores_cpuids[i] = lcore_cpuid;
(unsigned int) nb_fwd_lcores, nb_lc);
nb_fwd_lcores = (lcoreid_t) nb_lc;
}
+
+ return 0;
}
-void
+int
set_fwd_lcores_mask(uint64_t lcoremask)
{
unsigned int lcorelist[64];
if (lcoremask == 0) {
printf("Invalid NULL mask of cores\n");
- return;
+ return -1;
}
nb_lc = 0;
for (i = 0; i < 64; i++) {
continue;
lcorelist[nb_lc++] = i;
}
- set_fwd_lcores_list(lcorelist, nb_lc);
+ return set_fwd_lcores_list(lcorelist, nb_lc);
}
void
for (i = 0; i < nb_pt; i++) {
port_id = (portid_t) portlist[i];
if (port_id >= nb_ports) {
- printf("Invalid port id %u > %u\n",
+ printf("Invalid port id %u >= %u\n",
(unsigned int) port_id,
(unsigned int) nb_ports);
return;
tx_pkt_nb_segs = (uint8_t) nb_segs;
}
+char*
+list_pkt_forwarding_modes(void)
+{
+ static char fwd_modes[128] = "";
+ const char *separator = "|";
+ struct fwd_engine *fwd_eng;
+ unsigned i = 0;
+
+ if (strlen (fwd_modes) == 0) {
+ while ((fwd_eng = fwd_engines[i++]) != NULL) {
+ strcat(fwd_modes, fwd_eng->fwd_mode_name);
+ strcat(fwd_modes, separator);
+ }
+ fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
+ }
+
+ return fwd_modes;
+}
+
void
set_pkt_forwarding_mode(const char *fwd_mode_name)
{
}
void
-rx_vlan_filter_set(portid_t port_id, uint16_t vlan_id, int on)
+vlan_extend_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+void
+rx_vlan_strip_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+void
+rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
+ if (diag < 0)
+ printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
+ "diag=%d\n", port_id, queue_id, on, diag);
+}
+
+void
+rx_vlan_filter_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+void
+rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
{
int diag;
if (port_id_is_invalid(port_id))
return;
for (vlan_id = 0; vlan_id < 4096; vlan_id++)
- rx_vlan_filter_set(port_id, vlan_id, on);
+ rx_vft_set(port_id, vlan_id, on);
+}
+
+void
+vlan_tpid_set(portid_t port_id, uint16_t tp_id)
+{
+ int diag;
+ if (port_id_is_invalid(port_id))
+ return;
+
+ diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
+ if (diag == 0)
+ return;
+
+ printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
+ "diag=%d\n",
+ port_id, tp_id, diag);
}
void
ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
}
+void
+set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
+{
+ uint16_t i;
+ uint8_t existing_mapping_found = 0;
+
+ if (port_id_is_invalid(port_id))
+ return;
+
+ if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
+ return;
+
+ if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ printf("map_value not in required range 0..%d\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ return;
+ }
+
+ if (!is_rx) { /*then tx*/
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if ((tx_queue_stats_mappings[i].port_id == port_id) &&
+ (tx_queue_stats_mappings[i].queue_id == queue_id)) {
+ tx_queue_stats_mappings[i].stats_counter_id = map_value;
+ existing_mapping_found = 1;
+ break;
+ }
+ }
+ if (!existing_mapping_found) { /* A new additional mapping... */
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
+ nb_tx_queue_stats_mappings++;
+ }
+ }
+ else { /*rx*/
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if ((rx_queue_stats_mappings[i].port_id == port_id) &&
+ (rx_queue_stats_mappings[i].queue_id == queue_id)) {
+ rx_queue_stats_mappings[i].stats_counter_id = map_value;
+ existing_mapping_found = 1;
+ break;
+ }
+ }
+ if (!existing_mapping_found) { /* A new additional mapping... */
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
+ nb_rx_queue_stats_mappings++;
+ }
+ }
+}
+
void
tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
{
rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
- printf("\n %s FDIR infos for port %-2d %s\n",
+ printf("\n %s FDIR infos for port %-2d %s\n",
fdir_stats_border, port_id, fdir_stats_border);
- printf(" collision: %-10"PRIu64" free: %-10"PRIu64"\n"
- " maxhash: %-10"PRIu64" maxlen: %-10"PRIu64"\n"
- " add : %-10"PRIu64" remove : %-10"PRIu64"\n"
- " f_add: %-10"PRIu64" f_remove: %-10"PRIu64"\n",
+ printf(" collision: %-10"PRIu64" free: %"PRIu64"\n"
+ " maxhash: %-10"PRIu64" maxlen: %"PRIu64"\n"
+ " add: %-10"PRIu64" remove: %"PRIu64"\n"
+ " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
(uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
(uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
fdir_infos.add, fdir_infos.remove,
printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
"diag=%d\n", port_id, diag);
}
+
+void
+set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id))
+ return;
+ if (is_rx)
+ diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
+ else
+ diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
+ if (diag == 0)
+ return;
+ if(is_rx)
+ printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+ else
+ printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+
+}
+
+void
+set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id))
+ return;
+ if (vlan_id_is_invalid(vlan_id))
+ return;
+ diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
+ if (diag == 0)
+ return;
+ printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+}
+